1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright(c) 2024 Realtek Corporation
3 */
4
5#include "chan.h"
6#include "coex.h"
7#include "debug.h"
8#include "fw.h"
9#include "mac.h"
10#include "phy.h"
11#include "reg.h"
12#include "rtw8852bt.h"
13#include "rtw8852bt_rfk.h"
14#include "rtw8852bt_rfk_table.h"
15#include "rtw8852b_common.h"
16
17#define RTW8852BT_RXDCK_VER 0x1
18#define RTW8852BT_IQK_VER 0x2a
19#define RTW8852BT_SS 2
20#define RTW8852BT_TSSI_PATH_NR 2
21#define RTW8852BT_DPK_VER 0x06
22#define DPK_RF_PATH_MAX_8852BT 2
23
24#define _TSSI_DE_MASK GENMASK(21, 12)
25#define DPK_TXAGC_LOWER 0x2e
26#define DPK_TXAGC_UPPER 0x3f
27#define DPK_TXAGC_INVAL 0xff
28#define RFREG_MASKRXBB 0x003e0
29#define RFREG_MASKMODE 0xf0000
30
31enum rf_mode {
32 RF_SHUT_DOWN = 0x0,
33 RF_STANDBY = 0x1,
34 RF_TX = 0x2,
35 RF_RX = 0x3,
36 RF_TXIQK = 0x4,
37 RF_DPK = 0x5,
38 RF_RXK1 = 0x6,
39 RF_RXK2 = 0x7,
40};
41
42enum rtw8852bt_dpk_id {
43 LBK_RXIQK = 0x06,
44 SYNC = 0x10,
45 MDPK_IDL = 0x11,
46 MDPK_MPA = 0x12,
47 GAIN_LOSS = 0x13,
48 GAIN_CAL = 0x14,
49 DPK_RXAGC = 0x15,
50 KIP_PRESET = 0x16,
51 KIP_RESTORE = 0x17,
52 DPK_TXAGC = 0x19,
53 D_KIP_PRESET = 0x28,
54 D_TXAGC = 0x29,
55 D_RXAGC = 0x2a,
56 D_SYNC = 0x2b,
57 D_GAIN_LOSS = 0x2c,
58 D_MDPK_IDL = 0x2d,
59 D_GAIN_NORM = 0x2f,
60 D_KIP_THERMAL = 0x30,
61 D_KIP_RESTORE = 0x31
62};
63
64enum dpk_agc_step {
65 DPK_AGC_STEP_SYNC_DGAIN,
66 DPK_AGC_STEP_GAIN_ADJ,
67 DPK_AGC_STEP_GAIN_LOSS_IDX,
68 DPK_AGC_STEP_GL_GT_CRITERION,
69 DPK_AGC_STEP_GL_LT_CRITERION,
70 DPK_AGC_STEP_SET_TX_GAIN,
71};
72
73enum rtw8852bt_iqk_type {
74 ID_TXAGC = 0x0,
75 ID_FLOK_COARSE = 0x1,
76 ID_FLOK_FINE = 0x2,
77 ID_TXK = 0x3,
78 ID_RXAGC = 0x4,
79 ID_RXK = 0x5,
80 ID_NBTXK = 0x6,
81 ID_NBRXK = 0x7,
82 ID_FLOK_VBUFFER = 0x8,
83 ID_A_FLOK_COARSE = 0x9,
84 ID_G_FLOK_COARSE = 0xa,
85 ID_A_FLOK_FINE = 0xb,
86 ID_G_FLOK_FINE = 0xc,
87 ID_IQK_RESTORE = 0x10,
88};
89
90enum adc_ck {
91 ADC_NA = 0,
92 ADC_480M = 1,
93 ADC_960M = 2,
94 ADC_1920M = 3,
95};
96
97enum dac_ck {
98 DAC_40M = 0,
99 DAC_80M = 1,
100 DAC_120M = 2,
101 DAC_160M = 3,
102 DAC_240M = 4,
103 DAC_320M = 5,
104 DAC_480M = 6,
105 DAC_960M = 7,
106};
107
108static const u32 _tssi_trigger[RTW8852BT_TSSI_PATH_NR] = {0x5820, 0x7820};
109static const u32 _tssi_cw_rpt_addr[RTW8852BT_TSSI_PATH_NR] = {0x1c18, 0x3c18};
110static const u32 _tssi_cw_default_addr[RTW8852BT_TSSI_PATH_NR][4] = {
111 {0x5634, 0x5630, 0x5630, 0x5630},
112 {0x7634, 0x7630, 0x7630, 0x7630} };
113static const u32 _tssi_cw_default_mask[4] = {
114 0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff};
115static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852BT] = {0x5858, 0x7858};
116static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852BT] = {0x5860, 0x7860};
117static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852BT] = {0x5838, 0x7838};
118static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852BT] = {0x5840, 0x7840};
119static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852BT] = {0x5848, 0x7848};
120static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852BT] = {0x5850, 0x7850};
121static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852BT] = {0x5828, 0x7828};
122static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852BT] = {0x5830, 0x7830};
123
124static const u32 rtw8852bt_backup_bb_regs[] = {0x2344, 0x5800, 0x7800, 0x0704};
125static const u32 rtw8852bt_backup_rf_regs[] = {
126 0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x5, 0x10005};
127static const u32 rtw8852bt_backup_kip_regs[] = {
128 0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec,
129 0x823c, 0x8224, 0x8220, 0xc1d4, 0xc1d8, 0xc1c4, 0xc1ec};
130
131#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852bt_backup_bb_regs)
132#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852bt_backup_rf_regs)
133#define BACKUP_KIP_REGS_NR ARRAY_SIZE(rtw8852bt_backup_kip_regs)
134
135static void _rfk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path)
136{
137 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
138
139 rtw89_write_rf(rtwdev, rf_path: path, RR_TM, RR_TM_TRI, data: 0x1);
140 rtw89_write_rf(rtwdev, rf_path: path, RR_TM, RR_TM_TRI, data: 0x0);
141 rtw89_write_rf(rtwdev, rf_path: path, RR_TM, RR_TM_TRI, data: 0x1);
142
143 udelay(usec: 200);
144
145 dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, rf_path: path, RR_TM, RR_TM_VAL);
146
147 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] thermal@DPK = 0x%x\n",
148 dpk->bp[path][kidx].ther_dpk);
149}
150
151static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
152{
153 u32 i;
154
155 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
156 backup_bb_reg_val[i] =
157 rtw89_phy_read32_mask(rtwdev, addr: rtw8852bt_backup_bb_regs[i], MASKDWORD);
158 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
159 fmt: "[RFK]backup bb reg : %x, value =%x\n",
160 rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]);
161 }
162}
163
164static void _rfk_backup_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[])
165{
166 u32 i;
167
168 for (i = 0; i < BACKUP_KIP_REGS_NR; i++) {
169 backup_kip_reg_val[i] =
170 rtw89_phy_read32_mask(rtwdev, addr: rtw8852bt_backup_kip_regs[i],
171 MASKDWORD);
172 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] Backup 0x%x = %x\n",
173 rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]);
174 }
175}
176
177static
178void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], u8 rf_path)
179{
180 u32 i;
181
182 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
183 backup_rf_reg_val[i] =
184 rtw89_read_rf(rtwdev, rf_path, addr: rtw8852bt_backup_rf_regs[i],
185 RFREG_MASK);
186
187 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] Backup RF S%d 0x%x = %x\n",
188 rf_path, rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]);
189 }
190}
191
192static void _rfk_reload_bb_reg(struct rtw89_dev *rtwdev, const u32 backup_bb_reg_val[])
193{
194 u32 i;
195
196 for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
197 rtw89_phy_write32_mask(rtwdev, addr: rtw8852bt_backup_bb_regs[i],
198 MASKDWORD, data: backup_bb_reg_val[i]);
199 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
200 fmt: "[RFK]restore bb reg : %x, value =%x\n",
201 rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]);
202 }
203}
204
205static void _rfk_reload_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[])
206{
207 u32 i;
208
209 for (i = 0; i < BACKUP_KIP_REGS_NR; i++) {
210 rtw89_phy_write32_mask(rtwdev, addr: rtw8852bt_backup_kip_regs[i],
211 MASKDWORD, data: backup_kip_reg_val[i]);
212
213 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
214 fmt: "[RFK]restore kip reg : %x, value =%x\n",
215 rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]);
216 }
217}
218
219static void _rfk_reload_rf_reg(struct rtw89_dev *rtwdev,
220 const u32 backup_rf_reg_val[], u8 rf_path)
221{
222 u32 i;
223
224 for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
225 rtw89_write_rf(rtwdev, rf_path, addr: rtw8852bt_backup_rf_regs[i],
226 RFREG_MASK, data: backup_rf_reg_val[i]);
227
228 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
229 fmt: "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
230 rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]);
231 }
232}
233
234static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
235{
236 u8 val;
237
238 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RFK]dbcc_en: %x,PHY%d\n",
239 rtwdev->dbcc_en, phy_idx);
240
241 if (!rtwdev->dbcc_en) {
242 val = RF_AB;
243 } else {
244 if (phy_idx == RTW89_PHY_0)
245 val = RF_A;
246 else
247 val = RF_B;
248 }
249 return val;
250}
251
252static
253void _txck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force,
254 enum dac_ck ck)
255{
256 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, data: 0x0);
257
258 if (!force)
259 return;
260
261 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, data: ck);
262 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, data: 0x1);
263}
264
265static
266void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force,
267 enum adc_ck ck)
268{
269 u32 bw = 0;
270
271 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, data: 0x0);
272
273 if (!force)
274 return;
275
276 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, data: ck);
277 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, data: 0x1);
278
279 switch (ck) {
280 case ADC_480M:
281 bw = RTW89_CHANNEL_WIDTH_40;
282 break;
283 case ADC_960M:
284 bw = RTW89_CHANNEL_WIDTH_80;
285 break;
286 case ADC_1920M:
287 bw = RTW89_CHANNEL_WIDTH_160;
288 break;
289 default:
290 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "%s==>Invalid ck", __func__);
291 break;
292 }
293
294 rtw8852bx_adc_cfg(rtwdev, bw, path);
295}
296
297static void _rfk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
298 enum rtw89_rf_path path, u8 kpath)
299{
300 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, data: 0x0303);
301 rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, data: 0x1);
302 rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, data: 0x1);
303 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, data: 0x3);
304 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, data: 0x3);
305 rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, data: 0x1ffffff);
306 rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, data: 0x1);
307 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, data: 0x1);
308 rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, data: 0x3ff);
309 rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, data: 0x3);
310 rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, data: 0x1);
311 rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, data: 0x1);
312 rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, data: 0x1);
313 rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, data: 0x1);
314 rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, data: 0x1);
315 rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, data: 0x1);
316
317 _txck_force(rtwdev, path: RF_PATH_A, force: true, ck: DAC_960M);
318 _txck_force(rtwdev, path: RF_PATH_B, force: true, ck: DAC_960M);
319 _rxck_force(rtwdev, path: RF_PATH_A, force: true, ck: ADC_1920M);
320 _rxck_force(rtwdev, path: RF_PATH_B, force: true, ck: ADC_1920M);
321
322 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
323 B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, data: 0x5);
324 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, data: 0x1);
325 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, data: 0x1);
326 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, data: 0x1f);
327 udelay(usec: 1);
328 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, data: 0x13);
329 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, data: 0x0001);
330 udelay(usec: 1);
331 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, data: 0x0041);
332 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, data: 0x1);
333 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, data: 0x3333);
334
335 rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, data: 0x1);
336 rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN, MASKLWORD, data: 0x0000);
337 rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, data: 0x1);
338 rtw89_phy_write32_mask(rtwdev, R_P1_TXAGC_TH, MASKLWORD, data: 0x0000);
339}
340
341static void _rfk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
342 enum rtw89_rf_path path, u8 kpath)
343{
344 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, data: 0x0303);
345 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, data: 0x0);
346 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, data: 0x0);
347 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, data: 0x0);
348 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, data: 0x0);
349 rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, data: 0x0);
350 rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, data: 0x0);
351 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, data: 0x0);
352 rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, data: 0x63);
353 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, data: 0x00);
354 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, data: 0x00);
355 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
356 B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, data: 0x0);
357 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, data: 0x0000);
358 rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, data: 0x0);
359 rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, data: 0x0);
360 rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, data: 0x0);
361
362 rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, data: 0x0);
363 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, data: 0x1);
364 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, data: 0x2);
365 rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, data: 0x0);
366 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, data: 0x1);
367 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, data: 0x2);
368}
369
370static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
371 enum rtw89_rf_path path)
372{
373 rtw89_write_rf(rtwdev, rf_path: path, RR_DCK1, RR_DCK1_CLR, data: 0x0);
374 rtw89_write_rf(rtwdev, rf_path: path, RR_DCK, RR_DCK_LV, data: 0x0);
375 rtw89_write_rf(rtwdev, rf_path: path, RR_DCK, RR_DCK_LV, data: 0x1);
376 mdelay(1);
377}
378
379static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
380{
381 u8 path, dck_tune;
382 u32 rf_reg5;
383
384 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
385 fmt: "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n",
386 RTW8852BT_RXDCK_VER, rtwdev->hal.cv);
387
388 for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
389 rf_reg5 = rtw89_read_rf(rtwdev, rf_path: path, RR_RSV1, RFREG_MASK);
390 dck_tune = rtw89_read_rf(rtwdev, rf_path: path, RR_DCK, RR_DCK_FINE);
391
392 if (rtwdev->is_tssi_mode[path])
393 rtw89_phy_write32_mask(rtwdev,
394 R_P0_TSSI_TRK + (path << 13),
395 B_P0_TSSI_TRK_EN, data: 0x1);
396
397 rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RR_RSV1_RST, data: 0x0);
398 rtw89_write_rf(rtwdev, rf_path: path, RR_DCK, RR_DCK_FINE, data: 0x0);
399 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
400 _set_rx_dck(rtwdev, phy, path);
401 rtw89_write_rf(rtwdev, rf_path: path, RR_DCK, RR_DCK_FINE, data: dck_tune);
402 rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RFREG_MASK, data: rf_reg5);
403
404 if (rtwdev->is_tssi_mode[path])
405 rtw89_phy_write32_mask(rtwdev,
406 R_P0_TSSI_TRK + (path << 13),
407 B_P0_TSSI_TRK_EN, data: 0x0);
408 }
409}
410
411static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
412{
413 u32 rf_reg5;
414 u32 rck_val;
415 u32 val;
416 int ret;
417
418 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RCK] ====== S%d RCK ======\n", path);
419
420 rf_reg5 = rtw89_read_rf(rtwdev, rf_path: path, RR_RSV1, RFREG_MASK);
421
422 rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RR_RSV1_RST, data: 0x0);
423 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
424
425 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RCK] RF0x00 = 0x%05x\n",
426 rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK));
427
428 /* RCK trigger */
429 rtw89_write_rf(rtwdev, rf_path: path, RR_RCKC, RFREG_MASK, data: 0x00240);
430
431 ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
432 false, rtwdev, path, RR_RCKS, BIT(3));
433
434 rck_val = rtw89_read_rf(rtwdev, rf_path: path, RR_RCKC, RR_RCKC_CA);
435
436 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RCK] rck_val = 0x%x, ret = %d\n",
437 rck_val, ret);
438
439 rtw89_write_rf(rtwdev, rf_path: path, RR_RCKC, RFREG_MASK, data: rck_val);
440 rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RFREG_MASK, data: rf_reg5);
441
442 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RCK] RF 0x1b = 0x%x\n",
443 rtw89_read_rf(rtwdev, rf_path: path, RR_RCKC, RFREG_MASK));
444}
445
446static void _drck(struct rtw89_dev *rtwdev)
447{
448 u32 rck_d;
449 u32 val;
450 int ret;
451
452 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]Ddie RCK start!!!\n");
453 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, data: 0x1);
454
455 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
456 1, 10000, false,
457 rtwdev, R_DRCK_RES, B_DRCK_POL);
458 if (ret)
459 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]DRCK timeout\n");
460
461 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, data: 0x0);
462 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, data: 0x1);
463 udelay(usec: 1);
464 rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, data: 0x0);
465
466 rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, mask: 0x7c00);
467 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, data: 0x0);
468 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, data: rck_d);
469
470 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0xc0c4 = 0x%x\n",
471 rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
472}
473
474static void _dack_backup_s0(struct rtw89_dev *rtwdev)
475{
476 struct rtw89_dack_info *dack = &rtwdev->dack;
477 u8 i;
478
479 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, data: 0x1);
480
481 for (i = 0; i < 0x10; i++) {
482 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, data: i);
483 dack->msbk_d[0][0][i] =
484 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
485
486 rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, data: i);
487 dack->msbk_d[0][1][i] =
488 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
489 }
490
491 dack->biask_d[0][0] =
492 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
493 dack->biask_d[0][1] =
494 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
495
496 dack->dadck_d[0][0] =
497 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00);
498 dack->dadck_d[0][1] =
499 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01);
500}
501
502static void _dack_backup_s1(struct rtw89_dev *rtwdev)
503{
504 struct rtw89_dack_info *dack = &rtwdev->dack;
505 u8 i;
506
507 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, data: 0x1);
508
509 for (i = 0; i < 0x10; i++) {
510 rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, data: i);
511 dack->msbk_d[1][0][i] =
512 rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S);
513
514 rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, data: i);
515 dack->msbk_d[1][1][i] =
516 rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S);
517 }
518
519 dack->biask_d[1][0] =
520 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10);
521 dack->biask_d[1][1] =
522 rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11);
523
524 dack->dadck_d[1][0] =
525 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10);
526 dack->dadck_d[1][1] =
527 rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11);
528}
529
530static
531void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
532{
533 if (path == RF_PATH_A) {
534 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, data: 0x0);
535 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, data: 0x1);
536 } else {
537 rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, data: 0x0);
538 rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, data: 0x1);
539 }
540}
541
542static
543void _dack_reload_by_path(struct rtw89_dev *rtwdev, u8 path, u8 index)
544{
545 struct rtw89_dack_info *dack = &rtwdev->dack;
546 u32 tmp, tmp_offset, tmp_reg;
547 u32 idx_offset, path_offset;
548 u8 i;
549
550 if (index == 0)
551 idx_offset = 0;
552 else
553 idx_offset = 0x14;
554
555 if (path == RF_PATH_A)
556 path_offset = 0;
557 else
558 path_offset = 0x28;
559
560 tmp_offset = idx_offset + path_offset;
561
562 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, data: 0x1);
563 rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, data: 0x1);
564 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_RST, data: 0x1);
565 rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_RST, data: 0x1);
566
567 /* msbk_d: 15/14/13/12 */
568 tmp = 0x0;
569 for (i = 0; i < 4; i++)
570 tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
571 tmp_reg = 0xc200 + tmp_offset;
572 rtw89_phy_write32(rtwdev, addr: tmp_reg, data: tmp);
573 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x=0x%x\n", tmp_reg,
574 rtw89_phy_read32_mask(rtwdev, addr: tmp_reg, MASKDWORD));
575
576 /* msbk_d: 11/10/9/8 */
577 tmp = 0x0;
578 for (i = 0; i < 4; i++)
579 tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
580 tmp_reg = 0xc204 + tmp_offset;
581 rtw89_phy_write32(rtwdev, addr: tmp_reg, data: tmp);
582 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x=0x%x\n", tmp_reg,
583 rtw89_phy_read32_mask(rtwdev, addr: tmp_reg, MASKDWORD));
584
585 /* msbk_d: 7/6/5/4 */
586 tmp = 0x0;
587 for (i = 0; i < 4; i++)
588 tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
589 tmp_reg = 0xc208 + tmp_offset;
590 rtw89_phy_write32(rtwdev, addr: tmp_reg, data: tmp);
591 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x=0x%x\n", tmp_reg,
592 rtw89_phy_read32_mask(rtwdev, addr: tmp_reg, MASKDWORD));
593
594 /* msbk_d: 3/2/1/0 */
595 tmp = 0x0;
596 for (i = 0; i < 4; i++)
597 tmp |= dack->msbk_d[path][index][i] << (i * 8);
598 tmp_reg = 0xc20c + tmp_offset;
599 rtw89_phy_write32(rtwdev, addr: tmp_reg, data: tmp);
600 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x=0x%x\n", tmp_reg,
601 rtw89_phy_read32_mask(rtwdev, addr: tmp_reg, MASKDWORD));
602
603 /* dadak_d/biask_d */
604 tmp = (dack->biask_d[path][index] << 22) |
605 (dack->dadck_d[path][index] << 14);
606 tmp_reg = 0xc210 + tmp_offset;
607 rtw89_phy_write32(rtwdev, addr: tmp_reg, data: tmp);
608 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x=0x%x\n", tmp_reg,
609 rtw89_phy_read32_mask(rtwdev, addr: tmp_reg, MASKDWORD));
610
611 /* enable DACK result from reg */
612 rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + tmp_offset, B_DACKN0_EN, data: 0x1);
613}
614
615static
616void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
617{
618 u8 i;
619
620 for (i = 0; i < 2; i++)
621 _dack_reload_by_path(rtwdev, path, index: i);
622}
623
624static bool _dack_s0_poll(struct rtw89_dev *rtwdev)
625{
626 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
627 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
628 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
629 rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
630 return false;
631
632 return true;
633}
634
635static void _dack_s0(struct rtw89_dev *rtwdev)
636{
637 struct rtw89_dack_info *dack = &rtwdev->dack;
638 bool done;
639 int ret;
640
641 _txck_force(rtwdev, path: RF_PATH_A, force: true, ck: DAC_160M);
642
643 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, data: 0x1);
644 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), data: 0x1);
645 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, data: 0x0);
646 udelay(usec: 100);
647 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_VAL, data: 0x30);
648 rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_VAL, data: 0x30);
649
650 _dack_reset(rtwdev, path: RF_PATH_A);
651
652 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, data: 0x1);
653 udelay(usec: 1);
654
655 dack->msbk_timeout[0] = false;
656
657 ret = read_poll_timeout_atomic(_dack_s0_poll, done, done,
658 1, 20000, false, rtwdev);
659 if (ret) {
660 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S0 DACK timeout\n");
661 dack->msbk_timeout[0] = true;
662 }
663
664 rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, data: 0x0);
665
666 _txck_force(rtwdev, path: RF_PATH_A, force: false, ck: DAC_960M);
667 _dack_backup_s0(rtwdev);
668 _dack_reload(rtwdev, path: RF_PATH_A);
669
670 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, data: 0x0);
671}
672
673static bool _dack_s1_poll(struct rtw89_dev *rtwdev)
674{
675 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 ||
676 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 ||
677 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 ||
678 rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0)
679 return false;
680
681 return true;
682}
683
684static void _dack_s1(struct rtw89_dev *rtwdev)
685{
686 struct rtw89_dack_info *dack = &rtwdev->dack;
687 bool done;
688 int ret;
689
690 _txck_force(rtwdev, path: RF_PATH_B, force: true, ck: DAC_160M);
691
692 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, data: 0x1);
693 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), data: 0x1);
694 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, data: 0x0);
695 udelay(usec: 100);
696 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_VAL, data: 0x30);
697 rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_VAL, data: 0x30);
698
699 _dack_reset(rtwdev, path: RF_PATH_B);
700
701 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, data: 0x1);
702 udelay(usec: 1);
703
704 dack->msbk_timeout[1] = false;
705
706 ret = read_poll_timeout_atomic(_dack_s1_poll, done, done,
707 1, 10000, false, rtwdev);
708 if (ret) {
709 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S1 DACK timeout\n");
710 dack->msbk_timeout[1] = true;
711 }
712
713 rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, data: 0x0);
714
715 _txck_force(rtwdev, path: RF_PATH_B, force: false, ck: DAC_960M);
716 _dack_backup_s1(rtwdev);
717 _dack_reload(rtwdev, path: RF_PATH_B);
718
719 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, data: 0x0);
720}
721
722static void _dack(struct rtw89_dev *rtwdev)
723{
724 _dack_s0(rtwdev);
725 _dack_s1(rtwdev);
726}
727
728static void _dack_dump(struct rtw89_dev *rtwdev)
729{
730 struct rtw89_dack_info *dack = &rtwdev->dack;
731 u8 i;
732 u8 t;
733
734 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
735 fmt: "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
736 dack->addck_d[0][0], dack->addck_d[0][1]);
737 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
738 fmt: "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
739 dack->addck_d[1][0], dack->addck_d[1][1]);
740 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
741 fmt: "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
742 dack->dadck_d[0][0], dack->dadck_d[0][1]);
743 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
744 fmt: "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
745 dack->dadck_d[1][0], dack->dadck_d[1][1]);
746 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
747 fmt: "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
748 dack->biask_d[0][0], dack->biask_d[0][1]);
749 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
750 fmt: "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
751 dack->biask_d[1][0], dack->biask_d[1][1]);
752
753 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S0 MSBK ic:\n");
754 for (i = 0; i < 0x10; i++) {
755 t = dack->msbk_d[0][0][i];
756 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x\n", t);
757 }
758
759 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S0 MSBK qc:\n");
760 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
761 t = dack->msbk_d[0][1][i];
762 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x\n", t);
763 }
764
765 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S1 MSBK ic:\n");
766 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
767 t = dack->msbk_d[1][0][i];
768 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x\n", t);
769 }
770
771 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S1 MSBK qc:\n");
772 for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
773 t = dack->msbk_d[1][1][i];
774 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x\n", t);
775 }
776}
777
778static void _addck_ori(struct rtw89_dev *rtwdev)
779{
780 struct rtw89_dack_info *dack = &rtwdev->dack;
781 u32 val;
782 int ret;
783
784 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, data: 0x0);
785 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, data: 0x0);
786 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, data: 0x1);
787 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, data: 0x0);
788 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, data: 0x0);
789 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, data: 0x1);
790
791 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, data: 0xf);
792 udelay(usec: 100);
793
794 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, data: 0x0);
795 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), data: 0x1);
796 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, data: 0x3);
797 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, data: 0x1);
798 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, data: 0x0);
799 udelay(usec: 1);
800
801 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, data: 0x1);
802 dack->addck_timeout[0] = false;
803
804 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
805 1, 10000, false,
806 rtwdev, R_ADDCKR0, BIT(0));
807 if (ret) {
808 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S0 ADDCK timeout\n");
809 dack->addck_timeout[0] = true;
810 }
811
812 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), data: 0x0);
813 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, data: 0x1);
814 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, data: 0xc);
815 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, data: 0x1);
816
817 rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, data: 0x0);
818 dack->addck_d[0][0] =
819 rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
820 dack->addck_d[0][1] =
821 rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
822 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, data: 0x0);
823
824 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, data: 0x1);
825 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, data: 0x0);
826 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, data: 0x0);
827 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, data: 0x1);
828
829 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, data: 0xf);
830 udelay(usec: 100);
831
832 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, data: 0x0);
833 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), data: 0x1);
834 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, data: 0x3);
835 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, data: 0x1);
836 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, data: 0x0);
837 udelay(usec: 1);
838
839 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, data: 0x1);
840 dack->addck_timeout[1] = false;
841
842 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
843 1, 10000, false,
844 rtwdev, R_ADDCKR1, BIT(0));
845 if (ret) {
846 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S1 ADDCK timeout\n");
847 dack->addck_timeout[1] = true;
848 }
849
850 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), data: 0x0);
851 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, data: 0x1);
852 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, data: 0xc);
853 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, data: 0x1);
854
855 rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, data: 0x0);
856 dack->addck_d[1][0] =
857 rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0);
858 dack->addck_d[1][1] =
859 rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1);
860
861 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, data: 0x0);
862}
863
864static void _addck_reload(struct rtw89_dev *rtwdev)
865{
866 struct rtw89_dack_info *dack = &rtwdev->dack;
867
868 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, data: dack->addck_d[0][0]);
869 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, data: dack->addck_d[0][1]);
870
871 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, data: 0x3);
872
873 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1, data: dack->addck_d[1][0]);
874 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0, data: dack->addck_d[1][1]);
875
876 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, data: 0x3);
877}
878
879static void _dack_manual_off(struct rtw89_dev *rtwdev)
880{
881 rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, data: 0x0);
882 rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, data: 0x0);
883
884 rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_EN, data: 0x0);
885 rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_ON, data: 0x0);
886 rtw89_phy_write32_mask(rtwdev, R_DACKN2_CTL, B_DACKN2_ON, data: 0x0);
887 rtw89_phy_write32_mask(rtwdev, R_DACKN3_CTL, B_DACKN3_ON, data: 0x0);
888}
889
890static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
891{
892 struct rtw89_dack_info *dack = &rtwdev->dack;
893
894 dack->dack_done = false;
895 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]DACK start!!!\n");
896
897 _drck(rtwdev);
898 _dack_manual_off(rtwdev);
899 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV1, RFREG_MASK, data: 0x0);
900 rtw89_write_rf(rtwdev, rf_path: RF_PATH_B, RR_RSV1, RFREG_MASK, data: 0x0);
901 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MOD, RFREG_MASK, data: 0x337e1);
902 rtw89_write_rf(rtwdev, rf_path: RF_PATH_B, RR_MOD, RFREG_MASK, data: 0x337e1);
903 _rxck_force(rtwdev, path: RF_PATH_A, force: true, ck: ADC_960M);
904 _rxck_force(rtwdev, path: RF_PATH_B, force: true, ck: ADC_960M);
905 _addck_ori(rtwdev);
906
907 _rxck_force(rtwdev, path: RF_PATH_A, force: false, ck: ADC_960M);
908 _rxck_force(rtwdev, path: RF_PATH_B, force: false, ck: ADC_960M);
909 _addck_reload(rtwdev);
910
911 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MODOPT, RFREG_MASK, data: 0x0);
912 rtw89_write_rf(rtwdev, rf_path: RF_PATH_B, RR_MODOPT, RFREG_MASK, data: 0x0);
913
914 _dack(rtwdev);
915 _dack_dump(rtwdev);
916 dack->dack_done = true;
917 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV1, RFREG_MASK, data: 0x1);
918 rtw89_write_rf(rtwdev, rf_path: RF_PATH_B, RR_RSV1, RFREG_MASK, data: 0x1);
919
920 dack->dack_cnt++;
921 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]DACK finish!!!\n");
922}
923
924static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
925{
926 bool notready = false;
927 u32 val;
928 int ret;
929
930 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
931 10, 8200, false,
932 rtwdev, R_RFK_ST, MASKBYTE0);
933 if (ret)
934 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]NCTL1 IQK timeout!!!\n");
935
936 udelay(usec: 10);
937
938 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
939 10, 400, false,
940 rtwdev, R_RPT_COM, B_RPT_COM_RDY);
941 if (ret) {
942 notready = true;
943 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]NCTL2 IQK timeout!!!\n");
944 }
945
946 udelay(usec: 10);
947 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, data: 0x0);
948
949 return notready;
950}
951
952static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
953 u8 path, u8 ktype)
954{
955 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
956 u32 iqk_cmd;
957 bool fail;
958
959 switch (ktype) {
960 case ID_TXAGC:
961 iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
962 break;
963 case ID_FLOK_COARSE:
964 iqk_cmd = 0x108 | (1 << (4 + path));
965 break;
966 case ID_FLOK_FINE:
967 iqk_cmd = 0x208 | (1 << (4 + path));
968 break;
969 case ID_FLOK_VBUFFER:
970 iqk_cmd = 0x308 | (1 << (4 + path));
971 break;
972 case ID_TXK:
973 iqk_cmd = 0x008 | (1 << (path + 4)) |
974 (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
975 break;
976 case ID_RXAGC:
977 iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
978 break;
979 case ID_RXK:
980 iqk_cmd = 0x008 | (1 << (path + 4)) |
981 (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
982 break;
983 case ID_NBTXK:
984 iqk_cmd = 0x408 | (1 << (4 + path));
985 break;
986 case ID_NBRXK:
987 iqk_cmd = 0x608 | (1 << (4 + path));
988 break;
989 default:
990 return false;
991 }
992
993 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s, iqk_cmd = %x\n",
994 __func__, iqk_cmd + 1);
995
996 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, data: iqk_cmd + 1);
997 fail = _iqk_check_cal(rtwdev, path, ktype);
998
999 return fail;
1000}
1001
1002static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1003{
1004 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1005
1006 switch (iqk_info->iqk_band[path]) {
1007 case RTW89_BAND_2G:
1008 rtw89_write_rf(rtwdev, rf_path: path, RR_TXG1, RR_TXG1_ATT2, data: 0x0);
1009 rtw89_write_rf(rtwdev, rf_path: path, RR_TXG1, RR_TXG1_ATT1, data: 0x0);
1010 rtw89_write_rf(rtwdev, rf_path: path, RR_TXG2, RR_TXG2_ATT0, data: 0x1);
1011 rtw89_write_rf(rtwdev, rf_path: path, RR_TXGA, RR_TXGA_LOK_EXT, data: 0x0);
1012 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWE, RR_LUTWE_LOK, data: 0x1);
1013 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWA, RR_LUTWA_M1, data: 0x00);
1014 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_IQK, data: 0x403e);
1015 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR0, data: 0x0);
1016 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR1, data: 0x5);
1017 udelay(usec: 1);
1018 break;
1019 case RTW89_BAND_5G:
1020 rtw89_write_rf(rtwdev, rf_path: path, RR_BIASA, RR_BIASA_A, data: 0x1);
1021 rtw89_write_rf(rtwdev, rf_path: path, RR_TXGA, RR_TXGA_LOK_EXT, data: 0x0);
1022 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWE, RR_LUTWE_LOK, data: 0x1);
1023 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWA, RR_LUTWA_M1, data: 0x80);
1024 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_IQK, data: 0x403e);
1025 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR0, data: 0x0);
1026 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR1, data: 0x4);
1027 udelay(usec: 1);
1028 break;
1029 default:
1030 break;
1031 }
1032}
1033
1034static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1035{
1036 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n", __func__);
1037
1038 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: 0x0);
1039 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1040 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, data: 0x09);
1041 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, data: 0x021);
1042 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, data: 0x00000119 + (path << 4));
1043
1044 _iqk_check_cal(rtwdev, path, ktype: ID_FLOK_COARSE);
1045 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1046 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1047
1048 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: 0x12);
1049 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1050 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, data: 0x24);
1051 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, data: 0x00000319 + (path << 4));
1052
1053 _iqk_check_cal(rtwdev, path, ktype: ID_FLOK_VBUFFER);
1054 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1055 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1056
1057 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: 0x0);
1058 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1059 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, data: 0x09);
1060 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, data: 0x00000219 + (path << 4));
1061
1062 _iqk_check_cal(rtwdev, path, ktype: ID_FLOK_COARSE);
1063 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1064 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1065
1066 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: 0x12);
1067 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1068 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, data: 0x24);
1069 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, data: 0x00000319 + (path << 4));
1070
1071 _iqk_check_cal(rtwdev, path, ktype: ID_FLOK_VBUFFER);
1072
1073 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1074 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1075
1076 return false;
1077}
1078
1079static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1080{
1081 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n", __func__);
1082
1083 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: 0x0);
1084 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1085 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, data: 0x09);
1086 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, data: 0x021);
1087 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, data: 0x00000119 + (path << 4));
1088
1089 _iqk_check_cal(rtwdev, path, ktype: ID_FLOK_COARSE);
1090 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1091 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1092
1093 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: 0x12);
1094 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1095 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, data: 0x24);
1096 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, data: 0x00000319 + (path << 4));
1097
1098 _iqk_check_cal(rtwdev, path, ktype: ID_FLOK_VBUFFER);
1099 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1100 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1101
1102 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: 0x0);
1103 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1104 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, data: 0x09);
1105 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, data: 0x00000219 + (path << 4));
1106
1107 _iqk_check_cal(rtwdev, path, ktype: ID_FLOK_COARSE);
1108 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1109 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1110
1111 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: 0x12);
1112 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1113 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, data: 0x24);
1114 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, data: 0x00000319 + (path << 4));
1115
1116 _iqk_check_cal(rtwdev, path, ktype: ID_FLOK_VBUFFER);
1117 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1118 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1119
1120 return false;
1121}
1122
1123static bool _iqk_2g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1124{
1125 static const u32 g_power_range[4] = {0x0, 0x0, 0x0, 0x0};
1126 static const u32 g_track_range[4] = {0x4, 0x4, 0x6, 0x6};
1127 static const u32 g_gain_bb[4] = {0x08, 0x0e, 0x08, 0x0e};
1128 static const u32 g_itqt[4] = {0x09, 0x12, 0x1b, 0x24};
1129 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1130 bool notready = false;
1131 bool kfail = false;
1132 u8 gp;
1133
1134 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n", __func__);
1135
1136 for (gp = 0x0; gp < 0x4; gp++) {
1137 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR0,
1138 data: g_power_range[gp]);
1139 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR1,
1140 data: g_track_range[gp]);
1141 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG,
1142 data: g_gain_bb[gp]);
1143 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1144 mask: 0x00000100, data: 0x1);
1145 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1146 mask: 0x00000010, data: 0x1);
1147 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1148 mask: 0x00000004, data: 0x0);
1149 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1150 mask: 0x00000003, data: gp);
1151 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
1152 data: 0x009);
1153 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1154 B_KIP_IQP_IQSW, data: g_itqt[gp]);
1155 notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_NBTXK);
1156 iqk_info->nb_txcfir[path] =
1157 rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1158
1159 if (iqk_info->is_nbiqk)
1160 break;
1161
1162 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1163 B_KIP_IQP_IQSW, data: g_itqt[gp]);
1164 notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_TXK);
1165 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1166
1167 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
1168 fmt: "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n",
1169 path, gp, 1 << path, iqk_info->nb_txcfir[path]);
1170 }
1171
1172 if (!notready)
1173 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1174
1175 if (kfail) {
1176 iqk_info->nb_txcfir[path] = 0x40000002;
1177 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1178 B_IQK_RES_TXCFIR, data: 0x0);
1179 }
1180
1181 return kfail;
1182}
1183
1184static bool _iqk_5g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1185{
1186 static const u32 a_power_range[4] = {0x0, 0x0, 0x0, 0x0};
1187 static const u32 a_track_range[4] = {0x3, 0x3, 0x6, 0x6};
1188 static const u32 a_gain_bb[4] = {0x08, 0x10, 0x08, 0x0e};
1189 static const u32 a_itqt[4] = {0x09, 0x12, 0x1b, 0x24};
1190 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1191 bool notready = false;
1192 bool kfail = false;
1193 u8 gp;
1194
1195 for (gp = 0x0; gp < 0x4; gp++) {
1196 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR0, data: a_power_range[gp]);
1197 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR1, data: a_track_range[gp]);
1198 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: a_gain_bb[gp]);
1199
1200 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1201 MASKDWORD, data: a_itqt[gp]);
1202 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1203 mask: 0x00000100, data: 0x1);
1204 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1205 mask: 0x00000010, data: 0x1);
1206 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1207 mask: 0x00000004, data: 0x0);
1208 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1209 mask: 0x00000003, data: gp);
1210 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
1211 data: 0x009);
1212 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1213 B_KIP_IQP_IQSW, data: a_itqt[gp]);
1214
1215 notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_NBTXK);
1216 iqk_info->nb_txcfir[path] =
1217 rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1218
1219 if (iqk_info->is_nbiqk)
1220 break;
1221
1222 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1223 B_KIP_IQP_IQSW, data: a_itqt[gp]);
1224 notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_TXK);
1225 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1226
1227 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
1228 fmt: "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n",
1229 path, gp, 1 << path, iqk_info->nb_txcfir[path]);
1230 }
1231
1232 if (!notready)
1233 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1234
1235 if (kfail) {
1236 iqk_info->nb_txcfir[path] = 0x40000002;
1237 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1238 B_IQK_RES_TXCFIR, data: 0x0);
1239 }
1240
1241 return kfail;
1242}
1243
1244static void _iqk_adc_fifo_rst(struct rtw89_dev *rtwdev,
1245 enum rtw89_phy_idx phy_idx, u8 path)
1246{
1247 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, data: 0x0303);
1248 udelay(usec: 10);
1249 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, data: 0x3333);
1250}
1251
1252static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
1253{
1254 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1255
1256 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n", __func__);
1257 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, data: 0x0303);
1258
1259 if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
1260 _rxck_force(rtwdev, path: RF_PATH_A, force: true, ck: ADC_960M);
1261 _rxck_force(rtwdev, path: RF_PATH_B, force: true, ck: ADC_960M);
1262 udelay(usec: 1);
1263
1264 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1265 B_UPD_CLK_ADC_ON, data: 0x1);
1266 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1267 B_UPD_CLK_ADC_VAL, data: 0x1);
1268 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
1269 B_PATH0_SAMPL_DLY_T_MSK_V1, data: 0x2);
1270 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
1271 B_PATH1_SAMPL_DLY_T_MSK_V1, data: 0x2);
1272 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, data: 0x8);
1273 rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
1274 B_PATH1_BW_SEL_MSK_V1, data: 0x8);
1275 } else {
1276 _rxck_force(rtwdev, path: RF_PATH_A, force: true, ck: ADC_480M);
1277 _rxck_force(rtwdev, path: RF_PATH_B, force: true, ck: ADC_480M);
1278 udelay(usec: 1);
1279
1280 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1281 B_UPD_CLK_ADC_ON, data: 0x1);
1282 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1283 B_UPD_CLK_ADC_VAL, data: 0x0);
1284 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
1285 B_PATH0_SAMPL_DLY_T_MSK_V1, data: 0x3);
1286 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
1287 B_PATH1_SAMPL_DLY_T_MSK_V1, data: 0x3);
1288 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, data: 0xf);
1289 rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
1290 B_PATH1_BW_SEL_MSK_V1, data: 0xf);
1291 }
1292
1293 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, mask: 0x00000780, data: 0x8);
1294 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, mask: 0x00000780, data: 0x8);
1295 rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, mask: 0x00007800, data: 0x2);
1296 rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, mask: 0x00007800, data: 0x2);
1297 rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_MUL, data: 0x0);
1298 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, data: 0x1);
1299 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, data: 0x1);
1300 udelay(usec: 1);
1301 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, data: 0x0f);
1302 udelay(usec: 1);
1303 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, data: 0x03);
1304 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, data: 0xa001);
1305 udelay(usec: 1);
1306 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, data: 0xa041);
1307 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, data: 0x3333);
1308}
1309
1310static bool _iqk_2g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1311{
1312 static const u32 g_idxrxgain[2] = {0x212, 0x310};
1313 static const u32 g_idxattc2[2] = {0x00, 0x20};
1314 static const u32 g_idxattc1[2] = {0x3, 0x2};
1315 static const u32 g_idxrxagc[2] = {0x0, 0x2};
1316 static const u32 g_idx[2] = {0x0, 0x2};
1317 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1318 bool notready = false;
1319 bool kfail = false;
1320 u32 rf_18, tmp;
1321 u8 gp;
1322
1323 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n", __func__);
1324
1325 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_MASK, data: 0xc);
1326 rtw89_write_rf(rtwdev, rf_path: path, RR_RXK, RR_RXK_SEL2G, data: 0x1);
1327 rf_18 = rtw89_read_rf(rtwdev, rf_path: path, RR_CFGCH, RFREG_MASK);
1328 rtw89_write_rf(rtwdev, rf_path: path, RR_RSV4, RFREG_MASK, data: rf_18);
1329
1330 for (gp = 0x0; gp < 0x2; gp++) {
1331 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_RGM, data: g_idxrxgain[gp]);
1332 rtw89_write_rf(rtwdev, rf_path: path, RR_RXBB, RR_RXBB_C2G, data: g_idxattc2[gp]);
1333 rtw89_write_rf(rtwdev, rf_path: path, RR_RXBB, RR_RXBB_C1G, data: g_idxattc1[gp]);
1334
1335 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1336 mask: 0x00000100, data: 0x1);
1337 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1338 mask: 0x00000010, data: 0x0);
1339 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1340 mask: 0x00000007, data: g_idx[gp]);
1341 rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RFREG_MASK, data: 0x80013);
1342 udelay(usec: 100);
1343 udelay(usec: 100);
1344
1345 tmp = rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK);
1346 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, data: tmp);
1347 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, data: g_idxrxagc[gp]);
1348 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1349 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, data: 0x11);
1350
1351 notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_RXAGC);
1352 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1353 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]S%x, rf rxbb = %x\n", path,
1354 rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, mask: 0x003c0));
1355
1356 rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RFREG_MASK, data: 0x80013);
1357 udelay(usec: 100);
1358 udelay(usec: 100);
1359 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1360 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, data: 0x011);
1361
1362 notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_NBRXK);
1363 iqk_info->nb_rxcfir[path] =
1364 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1365 MASKDWORD) | 0x2;
1366 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
1367 fmt: "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n", path,
1368 g_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]);
1369
1370 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1371 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1372
1373 if (iqk_info->is_nbiqk)
1374 break;
1375
1376 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1377 notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_RXK);
1378 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1379 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1380 }
1381
1382 if (!notready)
1383 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1384
1385 if (kfail) {
1386 iqk_info->nb_txcfir[path] = 0x40000002;
1387 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1388 B_IQK_RES_RXCFIR, data: 0x0);
1389 }
1390 rtw89_write_rf(rtwdev, rf_path: path, RR_RXK, RR_RXK_SEL2G, data: 0x0);
1391
1392 return kfail;
1393}
1394
1395static bool _iqk_5g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1396{
1397 static const u32 a_idxrxgain[2] = {0x110, 0x290};
1398 static const u32 a_idxattc2[2] = {0x0f, 0x0f};
1399 static const u32 a_idxattc1[2] = {0x2, 0x2};
1400 static const u32 a_idxrxagc[2] = {0x4, 0x6};
1401 static const u32 a_idx[2] = {0x0, 0x2};
1402 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1403 bool notready = false;
1404 bool kfail = false;
1405 u32 rf_18, tmp;
1406 u8 gp;
1407
1408 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n", __func__);
1409
1410 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_MASK, data: 0xc);
1411 rtw89_write_rf(rtwdev, rf_path: path, RR_RXK, RR_RXK_SEL5G, data: 0x1);
1412 rf_18 = rtw89_read_rf(rtwdev, rf_path: path, RR_CFGCH, RFREG_MASK);
1413 rtw89_write_rf(rtwdev, rf_path: path, RR_RSV4, RFREG_MASK, data: rf_18);
1414
1415 for (gp = 0x0; gp < 0x2; gp++) {
1416 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_RGM, data: a_idxrxgain[gp]);
1417 rtw89_write_rf(rtwdev, rf_path: path, RR_RXA2, RR_RXA2_HATT, data: a_idxattc2[gp]);
1418 rtw89_write_rf(rtwdev, rf_path: path, RR_RXA2, RR_RXA2_CC2, data: a_idxattc1[gp]);
1419
1420 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1421 mask: 0x00000100, data: 0x1);
1422 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1423 mask: 0x00000010, data: 0x0);
1424 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1425 mask: 0x00000007, data: a_idx[gp]);
1426 rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RFREG_MASK, data: 0x80013);
1427 udelay(usec: 100);
1428 udelay(usec: 100);
1429
1430 tmp = rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK);
1431 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, data: tmp);
1432 rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, data: a_idxrxagc[gp]);
1433 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1434 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, data: 0x11);
1435
1436 notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_RXAGC);
1437 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1438 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]S%x, rf rxbb = %x\n", path,
1439 rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, mask: 0x003c0));
1440
1441 rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RFREG_MASK, data: 0x80013);
1442 udelay(usec: 200);
1443 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1444 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, data: 0x011);
1445 notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_NBRXK);
1446 iqk_info->nb_rxcfir[path] =
1447 rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1448 MASKDWORD) | 0x2;
1449 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
1450 fmt: "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n",
1451 path, a_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]);
1452 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1453 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1454
1455 if (iqk_info->is_nbiqk)
1456 break;
1457
1458 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1459 notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_RXK);
1460 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1461 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1462 }
1463
1464 if (!notready)
1465 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1466
1467 if (kfail) {
1468 iqk_info->nb_txcfir[path] = 0x40000002;
1469 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1470 B_IQK_RES_RXCFIR, data: 0x0);
1471 }
1472 rtw89_write_rf(rtwdev, rf_path: path, RR_RXK, RR_RXK_SEL2G, data: 0x0);
1473
1474 return kfail;
1475}
1476
1477static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1478{
1479 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1480 bool lok_result = false;
1481 bool txk_result = false;
1482 bool rxk_result = false;
1483 u8 i;
1484
1485 for (i = 0; i < 3; i++) {
1486 _iqk_txk_setting(rtwdev, path);
1487 if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1488 lok_result = _iqk_2g_lok(rtwdev, phy_idx, path);
1489 else
1490 lok_result = _iqk_5g_lok(rtwdev, phy_idx, path);
1491
1492 if (!lok_result)
1493 break;
1494 }
1495
1496 if (lok_result) {
1497 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
1498 fmt: "[IQK]!!!!!!!!!!LOK by Pass !!!!!!!!!!!\n");
1499 rtw89_write_rf(rtwdev, rf_path: path, RR_DTXLOK, RFREG_MASK, data: 0x80200);
1500 rtw89_write_rf(rtwdev, rf_path: path, RR_RSV2, RFREG_MASK, data: 0x80200);
1501 rtw89_write_rf(rtwdev, rf_path: path, RR_LOKVB, RFREG_MASK, data: 0x80200);
1502 }
1503
1504 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]RF_0x08[00:19] = 0x%x\n",
1505 rtw89_read_rf(rtwdev, rf_path: path, RR_DTXLOK, RFREG_MASK));
1506 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]RF_0x09[00:19] = 0x%x\n",
1507 rtw89_read_rf(rtwdev, rf_path: path, RR_RSV2, RFREG_MASK));
1508 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]RF_0x0a[00:19] = 0x%x\n",
1509 rtw89_read_rf(rtwdev, rf_path: path, RR_LOKVB, RFREG_MASK));
1510
1511 if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1512 txk_result = _iqk_2g_tx(rtwdev, phy_idx, path);
1513 else
1514 txk_result = _iqk_5g_tx(rtwdev, phy_idx, path);
1515
1516 _iqk_rxclk_setting(rtwdev, path);
1517 _iqk_adc_fifo_rst(rtwdev, phy_idx, path);
1518
1519 if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1520 rxk_result = _iqk_2g_rx(rtwdev, phy_idx, path);
1521 else
1522 rxk_result = _iqk_5g_rx(rtwdev, phy_idx, path);
1523
1524 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
1525 fmt: "[IQK]result : lok_= %x, txk_= %x, rxk_= %x\n",
1526 lok_result, txk_result, rxk_result);
1527}
1528
1529static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
1530 enum rtw89_chanctx_idx chanctx_idx)
1531{
1532 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: chanctx_idx);
1533 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
1534 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1535 u8 idx = rfk_mcc->table_idx;
1536 u32 reg_rf18;
1537 u32 reg_35c;
1538
1539 reg_rf18 = rtw89_read_rf(rtwdev, rf_path: path, RR_CFGCH, RFREG_MASK);
1540 reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
1541
1542 iqk_info->iqk_band[path] = chan->band_type;
1543 iqk_info->iqk_bw[path] = chan->band_width;
1544 iqk_info->iqk_ch[path] = chan->channel;
1545 iqk_info->iqk_mcc_ch[idx][path] = chan->channel;
1546 iqk_info->iqk_table_idx[path] = idx;
1547
1548 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]S%x, 0x18= 0x%x, idx = %x\n",
1549 path, reg_rf18, idx);
1550 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]S%x, 0x18= 0x%x\n",
1551 path, reg_rf18);
1552 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]S%x, 0x35c= 0x%x\n",
1553 path, reg_35c);
1554 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]times = 0x%x, ch =%x\n",
1555 iqk_info->iqk_times, idx);
1556 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n",
1557 idx, path, iqk_info->iqk_mcc_ch[idx][path]);
1558}
1559
1560static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1561{
1562 _iqk_by_path(rtwdev, phy_idx, path);
1563}
1564
1565static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1566{
1567 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1568
1569 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "===> %s\n", __func__);
1570
1571 if (iqk_info->is_nbiqk) {
1572 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
1573 MASKDWORD, data: iqk_info->nb_txcfir[path]);
1574 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1575 MASKDWORD, data: iqk_info->nb_rxcfir[path]);
1576 } else {
1577 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
1578 MASKDWORD, data: 0x40000000);
1579 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1580 MASKDWORD, data: 0x40000000);
1581 }
1582 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1583 data: 0x00000e19 + (path << 4));
1584
1585 _iqk_check_cal(rtwdev, path, ktype: 0x0);
1586
1587 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00);
1588 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, data: 0x00000000);
1589 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, data: 0x80000000);
1590
1591 rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, data: 0x0);
1592 rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, data: 0x0);
1593 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), BIT(28), data: 0x0);
1594
1595 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWE, RR_LUTWE_LOK, data: 0x0);
1596 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWE, RR_LUTWE_LOK, data: 0x0);
1597 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_MASK, data: 0x3);
1598 rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RR_RSV1_RST, data: 0x1);
1599 rtw89_write_rf(rtwdev, rf_path: path, RR_BBDC, RR_BBDC_SEL, data: 0x1);
1600}
1601
1602static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1603 enum rtw89_phy_idx phy_idx, u8 path)
1604{
1605 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "===> %s\n", __func__);
1606 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, data: 0x0303);
1607 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, data: 0x0);
1608 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, data: 0x0);
1609 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, data: 0x0);
1610 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, data: 0x0);
1611 rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, data: 0x0000000);
1612 rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, data: 0x0);
1613 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, data: 0x0);
1614 rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, mask: 0x0000001f, data: 0x03);
1615 rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, mask: 0x000003e0, data: 0x03);
1616 rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, data: 0x00);
1617 rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, data: 0x00);
1618 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1619 B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, data: 0x0);
1620 rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, data: 0x0);
1621 rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, data: 0x0);
1622 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, data: 0x0000);
1623 rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, data: 0x0);
1624 rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, data: 0x0);
1625}
1626
1627static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1628{
1629 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
1630 u8 idx = rfk_mcc->table_idx;
1631
1632 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), mask: 0x00000001, data: idx);
1633 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), mask: 0x00000008, data: idx);
1634 rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, data: 0x40000000);
1635 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, data: 0x40000000);
1636
1637 rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RR_RSV1_RST, data: 0x0);
1638 rtw89_write_rf(rtwdev, rf_path: path, RR_BBDC, RR_BBDC_SEL, data: 0x0);
1639 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, data: 0x00000080);
1640 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, data: 0x81ff010a);
1641}
1642
1643static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1644 enum rtw89_phy_idx phy_idx, u8 path)
1645{
1646 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, data: 0x0303);
1647 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_GOT_TXRX, data: 0x3);
1648 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_GOT_TXRX, data: 0x3);
1649 rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, data: 0x1);
1650 rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, data: 0x1);
1651 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, data: 0x3);
1652 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P0_CLKG_FORCE, data: 0x3);
1653 rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, data: 0x1ffffff);
1654 rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, data: 0x1);
1655 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, data: 0x1);
1656 rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, data: 0x3ff);
1657 rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, data: 0x3);
1658 rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, data: 0x1);
1659 rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, data: 0x1);
1660 rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, data: 0x1);
1661 rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, data: 0x1);
1662 rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, data: 0x1);
1663 rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, data: 0x1);
1664
1665 _txck_force(rtwdev, path: RF_PATH_A, force: true, ck: DAC_960M);
1666 _txck_force(rtwdev, path: RF_PATH_B, force: true, ck: DAC_960M);
1667 _rxck_force(rtwdev, path: RF_PATH_A, force: true, ck: ADC_1920M);
1668 _rxck_force(rtwdev, path: RF_PATH_B, force: true, ck: ADC_1920M);
1669
1670 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, data: 0x1);
1671 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, data: 0x2);
1672
1673 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, data: 0x1);
1674 rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, data: 0x1);
1675 udelay(usec: 10);
1676 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, data: 0x1f);
1677 udelay(usec: 10);
1678 rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, data: 0x13);
1679 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, data: 0x0001);
1680 udelay(usec: 10);
1681 rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, data: 0x0041);
1682 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, data: 0x1);
1683 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, data: 0x3333);
1684}
1685
1686static void _iqk_init(struct rtw89_dev *rtwdev)
1687{
1688 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1689 u8 idx, path;
1690
1691 rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, data: 0x0);
1692
1693 if (iqk_info->is_iqk_init)
1694 return;
1695
1696 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n", __func__);
1697 iqk_info->is_iqk_init = true;
1698 iqk_info->is_nbiqk = false;
1699 iqk_info->iqk_fft_en = false;
1700 iqk_info->iqk_sram_en = false;
1701 iqk_info->iqk_cfir_en = false;
1702 iqk_info->iqk_xym_en = false;
1703 iqk_info->iqk_times = 0x0;
1704
1705 for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1706 iqk_info->iqk_channel[idx] = 0x0;
1707 for (path = 0; path < RTW8852BT_SS; path++) {
1708 iqk_info->lok_cor_fail[idx][path] = false;
1709 iqk_info->lok_fin_fail[idx][path] = false;
1710 iqk_info->iqk_tx_fail[idx][path] = false;
1711 iqk_info->iqk_rx_fail[idx][path] = false;
1712 iqk_info->iqk_mcc_ch[idx][path] = 0x0;
1713 iqk_info->iqk_table_idx[path] = 0x0;
1714 }
1715 }
1716}
1717
1718static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
1719{
1720 u32 rf_mode;
1721 u8 path;
1722 int ret;
1723
1724 for (path = 0; path < RF_PATH_MAX; path++) {
1725 if (!(kpath & BIT(path)))
1726 continue;
1727
1728 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
1729 rf_mode != 2, 2, 5000, false,
1730 rtwdev, path, RR_MOD, RR_MOD_MASK);
1731 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
1732 fmt: "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret);
1733 }
1734}
1735
1736static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx,
1737 bool is_pause)
1738{
1739 if (!is_pause)
1740 return;
1741
1742 _wait_rx_mode(rtwdev, kpath: _kpath(rtwdev, phy_idx: band_idx));
1743}
1744
1745static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1746 enum rtw89_phy_idx phy_idx, u8 path,
1747 enum rtw89_chanctx_idx chanctx_idx)
1748{
1749 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1750 u32 backup_bb_val[BACKUP_BB_REGS_NR];
1751 u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
1752 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, paths: RF_AB, chanctx_idx);
1753
1754 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_IQK, state: BTC_WRFK_ONESHOT_START);
1755
1756 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
1757 fmt: "[IQK]==========IQK start!!!!!==========\n");
1758 iqk_info->iqk_times++;
1759 iqk_info->version = RTW8852BT_IQK_VER;
1760
1761 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]Test Ver 0x%x\n", iqk_info->version);
1762 _iqk_get_ch_info(rtwdev, phy: phy_idx, path, chanctx_idx);
1763
1764 _rfk_backup_bb_reg(rtwdev, backup_bb_reg_val: backup_bb_val);
1765 _rfk_backup_rf_reg(rtwdev, backup_rf_reg_val: backup_rf_val[path], rf_path: path);
1766 _iqk_macbb_setting(rtwdev, phy_idx, path);
1767 _iqk_preset(rtwdev, path);
1768 _iqk_start_iqk(rtwdev, phy_idx, path);
1769 _iqk_restore(rtwdev, path);
1770 _iqk_afebb_restore(rtwdev, phy_idx, path);
1771 _rfk_reload_bb_reg(rtwdev, backup_bb_reg_val: backup_bb_val);
1772 _rfk_reload_rf_reg(rtwdev, backup_rf_reg_val: backup_rf_val[path], rf_path: path);
1773
1774 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_IQK, state: BTC_WRFK_ONESHOT_STOP);
1775}
1776
1777static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
1778 enum rtw89_chanctx_idx chanctx_idx)
1779{
1780 u8 kpath = _kpath(rtwdev, phy_idx);
1781
1782 switch (kpath) {
1783 case RF_A:
1784 _doiqk(rtwdev, force, phy_idx, path: RF_PATH_A, chanctx_idx);
1785 break;
1786 case RF_B:
1787 _doiqk(rtwdev, force, phy_idx, path: RF_PATH_B, chanctx_idx);
1788 break;
1789 case RF_AB:
1790 _doiqk(rtwdev, force, phy_idx, path: RF_PATH_A, chanctx_idx);
1791 _doiqk(rtwdev, force, phy_idx, path: RF_PATH_B, chanctx_idx);
1792 break;
1793 default:
1794 break;
1795 }
1796}
1797
1798static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off)
1799{
1800 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1801 u8 val, kidx = dpk->cur_idx[path];
1802
1803 val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
1804
1805 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
1806 BIT(24), data: val);
1807
1808 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d[%d] DPK %s !!!\n", path,
1809 kidx, str_enable_disable(v: dpk->is_dpk_enable && !off));
1810}
1811
1812static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1813 enum rtw89_rf_path path, enum rtw8852bt_dpk_id id)
1814{
1815 u16 dpk_cmd;
1816 u32 val;
1817 int ret;
1818
1819 dpk_cmd = (id << 8) | (0x19 + (path << 4));
1820 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, data: dpk_cmd);
1821
1822 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1823 1, 30000, false,
1824 rtwdev, R_RFK_ST, MASKBYTE0);
1825 if (ret)
1826 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] one-shot 1 over 30ms!!!!\n");
1827
1828 udelay(usec: 1);
1829 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, data: 0x00030000);
1830
1831 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
1832 1, 2000, false,
1833 rtwdev, R_RPT_COM, MASKLWORD);
1834 if (ret)
1835 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] one-shot 2 over 2ms!!!!\n");
1836
1837 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, data: 0x0);
1838 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
1839 fmt: "[DPK] one-shot for %s = 0x%04x\n",
1840 id == 0x06 ? "LBK_RXIQK" :
1841 id == 0x10 ? "SYNC" :
1842 id == 0x11 ? "MDPK_IDL" :
1843 id == 0x12 ? "MDPK_MPA" :
1844 id == 0x13 ? "GAIN_LOSS" :
1845 id == 0x14 ? "PWR_CAL" :
1846 id == 0x15 ? "DPK_RXAGC" :
1847 id == 0x16 ? "KIP_PRESET" :
1848 id == 0x17 ? "KIP_RESTORE" :
1849 "DPK_TXAGC", dpk_cmd);
1850}
1851
1852static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1853 enum rtw89_rf_path path)
1854{
1855 rtw89_write_rf(rtwdev, rf_path: path, RR_DCK, RR_DCK_LV, data: 0x0);
1856 rtw89_write_rf(rtwdev, rf_path: path, RR_DCK, RR_DCK_LV, data: 0x1);
1857
1858 udelay(usec: 600);
1859
1860 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d RXDCK\n", path);
1861}
1862
1863static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1864 enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
1865{
1866 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: chanctx_idx);
1867 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1868
1869 u8 kidx = dpk->cur_idx[path];
1870
1871 dpk->bp[path][kidx].band = chan->band_type;
1872 dpk->bp[path][kidx].ch = chan->channel;
1873 dpk->bp[path][kidx].bw = chan->band_width;
1874
1875 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
1876 fmt: "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1877 path, dpk->cur_idx[path], phy,
1878 str_on_off(v: rtwdev->is_tssi_mode[path]),
1879 str_on_off(v: rtwdev->dbcc_en),
1880 dpk->bp[path][kidx].band == 0 ? "2G" :
1881 dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1882 dpk->bp[path][kidx].ch,
1883 dpk->bp[path][kidx].bw == 0 ? "20M" :
1884 dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1885}
1886
1887static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1888 enum rtw89_rf_path path, bool is_pause)
1889{
1890 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1891 B_P0_TSSI_TRK_EN, data: is_pause);
1892
1893 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d TSSI %s\n", path,
1894 is_pause ? "pause" : "resume");
1895}
1896
1897static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
1898 enum rtw89_rf_path path)
1899{
1900 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, data: 0x00000000);
1901 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, data: 0x80000000);
1902
1903 if (rtwdev->hal.cv > CHIP_CAV)
1904 rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8),
1905 B_DPD_COM_OF, data: 0x1);
1906
1907 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d restore KIP\n", path);
1908}
1909
1910static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1911 enum rtw89_rf_path path, u8 cur_rxbb, u32 rf_18)
1912{
1913 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, data: 0x1);
1914 rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, data: 0x0);
1915
1916 rtw89_write_rf(rtwdev, rf_path: path, RR_RSV4, RFREG_MASK, data: rf_18);
1917 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASKMODE, data: 0xd);
1918 rtw89_write_rf(rtwdev, rf_path: path, RR_RXK, RR_RXK_PLLEN, data: 0x1);
1919
1920 if (cur_rxbb >= 0x11)
1921 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIQK, RR_TXIQK_ATT1, data: 0x13);
1922 else if (cur_rxbb <= 0xa)
1923 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIQK, RR_TXIQK_ATT1, data: 0x00);
1924 else
1925 rtw89_write_rf(rtwdev, rf_path: path, RR_TXIQK, RR_TXIQK_ATT1, data: 0x05);
1926
1927 rtw89_write_rf(rtwdev, rf_path: path, RR_XGLNA2, RR_XGLNA2_SW, data: 0x0);
1928 rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RR_RXKPLL_POW, data: 0x0);
1929 rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RFREG_MASK, data: 0x80014);
1930
1931 udelay(usec: 100);
1932
1933 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
1934 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, data: 0x025);
1935
1936 _dpk_one_shot(rtwdev, phy, path, id: LBK_RXIQK);
1937
1938 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
1939
1940 rtw89_write_rf(rtwdev, rf_path: path, RR_RXK, RR_RXK_PLLEN, data: 0x0);
1941 rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, data: 0x0);
1942 rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, data: 0x0);
1943 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, data: 0x1);
1944 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASKMODE, data: 0x5);
1945}
1946
1947static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
1948 enum rtw89_rf_path path, u8 kidx)
1949{
1950 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1951
1952 if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
1953 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK, data: 0x50220);
1954 rtw89_write_rf(rtwdev, rf_path: path, RR_RXBB, RR_RXBB_FATT, data: 0xf2);
1955 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTDBG, RR_LUTDBG_TIA, data: 0x1);
1956 rtw89_write_rf(rtwdev, rf_path: path, RR_TIA, RR_TIA_N6, data: 0x1);
1957 } else {
1958 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK, data: 0x50220);
1959 rtw89_write_rf(rtwdev, rf_path: path, RR_RXA2, RR_RAA2_SWATT, data: 0x5);
1960 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTDBG, RR_LUTDBG_TIA, data: 0x1);
1961 rtw89_write_rf(rtwdev, rf_path: path, RR_TIA, RR_TIA_N6, data: 0x1);
1962 rtw89_write_rf(rtwdev, rf_path: path, RR_RXA_LNA, RFREG_MASK, data: 0x920FC);
1963 rtw89_write_rf(rtwdev, rf_path: path, RR_XALNA2, RFREG_MASK, data: 0x002C0);
1964 rtw89_write_rf(rtwdev, rf_path: path, RR_IQGEN, RFREG_MASK, data: 0x38800);
1965 }
1966
1967 rtw89_write_rf(rtwdev, rf_path: path, RR_RCKD, RR_RCKD_BW, data: 0x1);
1968 rtw89_write_rf(rtwdev, rf_path: path, RR_BTC, RR_BTC_TXBB, data: dpk->bp[path][kidx].bw + 1);
1969 rtw89_write_rf(rtwdev, rf_path: path, RR_BTC, RR_BTC_RXBB, data: 0x0);
1970}
1971
1972static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
1973 enum rtw89_rf_path path, bool is_bypass)
1974{
1975 if (is_bypass) {
1976 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1977 B_RXIQC_BYPASS2, data: 0x1);
1978 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1979 B_RXIQC_BYPASS, data: 0x1);
1980 } else {
1981 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1982 B_RXIQC_BYPASS2, data: 0x0);
1983 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1984 B_RXIQC_BYPASS, data: 0x0);
1985 }
1986}
1987
1988static
1989void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
1990{
1991 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1992
1993 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
1994 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, data: 0x0);
1995 else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
1996 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, data: 0x2);
1997 else
1998 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, data: 0x1);
1999
2000 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] TPG_Select for %s\n",
2001 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
2002 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
2003}
2004
2005static void _dpk_table_select(struct rtw89_dev *rtwdev,
2006 enum rtw89_rf_path path, u8 kidx, u8 gain)
2007{
2008 u8 val;
2009
2010 val = 0x80 + kidx * 0x20 + gain * 0x10;
2011 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, data: val);
2012 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2013 fmt: "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
2014 gain, val);
2015}
2016
2017static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2018{
2019#define DPK_SYNC_TH_DC_I 200
2020#define DPK_SYNC_TH_DC_Q 200
2021#define DPK_SYNC_TH_CORR 170
2022 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2023 u8 corr_val, corr_idx;
2024 u16 dc_i, dc_q;
2025 u32 corr, dc;
2026
2027 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, data: 0x0);
2028
2029 corr = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
2030 corr_idx = u32_get_bits(v: corr, B_PRT_COM_CORI);
2031 corr_val = u32_get_bits(v: corr, B_PRT_COM_CORV);
2032
2033 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2034 fmt: "[DPK] S%d Corr_idx / Corr_val = %d / %d\n",
2035 path, corr_idx, corr_val);
2036
2037 dpk->corr_idx[path][kidx] = corr_idx;
2038 dpk->corr_val[path][kidx] = corr_val;
2039
2040 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, data: 0x9);
2041
2042 dc = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
2043 dc_i = u32_get_bits(v: dc, B_PRT_COM_DCI);
2044 dc_q = u32_get_bits(v: dc, B_PRT_COM_DCQ);
2045
2046 dc_i = abs(sign_extend32(dc_i, 11));
2047 dc_q = abs(sign_extend32(dc_q, 11));
2048
2049 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d DC I/Q, = %d / %d\n",
2050 path, dc_i, dc_q);
2051
2052 dpk->dc_i[path][kidx] = dc_i;
2053 dpk->dc_q[path][kidx] = dc_q;
2054
2055 if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2056 corr_val < DPK_SYNC_TH_CORR)
2057 return true;
2058 else
2059 return false;
2060}
2061
2062static void _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2063 enum rtw89_rf_path path, u8 kidx)
2064{
2065 _dpk_one_shot(rtwdev, phy, path, id: SYNC);
2066}
2067
2068static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2069{
2070 u16 dgain;
2071
2072 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, data: 0x0);
2073
2074 dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2075
2076 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] DGain = 0x%x\n", dgain);
2077
2078 return dgain;
2079}
2080
2081static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
2082{
2083 static const u16 bnd[15] = {
2084 0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556,
2085 0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220
2086 };
2087 s8 offset;
2088
2089 if (dgain >= bnd[0])
2090 offset = 0x6;
2091 else if (bnd[0] > dgain && dgain >= bnd[1])
2092 offset = 0x6;
2093 else if (bnd[1] > dgain && dgain >= bnd[2])
2094 offset = 0x5;
2095 else if (bnd[2] > dgain && dgain >= bnd[3])
2096 offset = 0x4;
2097 else if (bnd[3] > dgain && dgain >= bnd[4])
2098 offset = 0x3;
2099 else if (bnd[4] > dgain && dgain >= bnd[5])
2100 offset = 0x2;
2101 else if (bnd[5] > dgain && dgain >= bnd[6])
2102 offset = 0x1;
2103 else if (bnd[6] > dgain && dgain >= bnd[7])
2104 offset = 0x0;
2105 else if (bnd[7] > dgain && dgain >= bnd[8])
2106 offset = 0xff;
2107 else if (bnd[8] > dgain && dgain >= bnd[9])
2108 offset = 0xfe;
2109 else if (bnd[9] > dgain && dgain >= bnd[10])
2110 offset = 0xfd;
2111 else if (bnd[10] > dgain && dgain >= bnd[11])
2112 offset = 0xfc;
2113 else if (bnd[11] > dgain && dgain >= bnd[12])
2114 offset = 0xfb;
2115 else if (bnd[12] > dgain && dgain >= bnd[13])
2116 offset = 0xfa;
2117 else if (bnd[13] > dgain && dgain >= bnd[14])
2118 offset = 0xf9;
2119 else if (bnd[14] > dgain)
2120 offset = 0xf8;
2121 else
2122 offset = 0x0;
2123
2124 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] DGain offset = %d\n", offset);
2125
2126 return offset;
2127}
2128
2129static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2130{
2131 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, data: 0x6);
2132 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, data: 0x1);
2133
2134 return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2135}
2136
2137static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2138 enum rtw89_rf_path path, u8 kidx)
2139{
2140 _dpk_one_shot(rtwdev, phy, path, id: GAIN_LOSS);
2141
2142 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, data: 0x6);
2143 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, data: 0x1);
2144}
2145
2146static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2147 enum rtw89_rf_path path, u8 kidx)
2148{
2149 _dpk_tpg_sel(rtwdev, path, kidx);
2150 _dpk_one_shot(rtwdev, phy, path, id: KIP_PRESET);
2151}
2152
2153static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev,
2154 enum rtw89_rf_path path)
2155{
2156 rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, data: 0x00000080);
2157 rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, data: 0x807f030a);
2158 rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, data: 0xce000a08);
2159
2160 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] KIP Power/CLK on\n");
2161}
2162
2163static
2164u8 _dpk_txagc_check_8852bt(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 txagc)
2165{
2166 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2167
2168 if (txagc >= dpk->max_dpk_txagc[path])
2169 txagc = dpk->max_dpk_txagc[path];
2170
2171 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] Set TxAGC = 0x%x\n", txagc);
2172
2173 return txagc;
2174}
2175
2176static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2177 enum rtw89_rf_path path, u8 txagc)
2178{
2179 u8 val;
2180
2181 val = _dpk_txagc_check_8852bt(rtwdev, path, txagc);
2182 rtw89_write_rf(rtwdev, rf_path: path, RR_TXAGC, RFREG_MASK, data: val);
2183 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
2184 _dpk_one_shot(rtwdev, phy, path, id: DPK_TXAGC);
2185 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
2186
2187 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] set TXAGC = 0x%x\n", txagc);
2188}
2189
2190static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2191 enum rtw89_rf_path path)
2192{
2193 rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, data: 0x50220);
2194 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x1);
2195 _dpk_one_shot(rtwdev, phy, path, id: DPK_RXAGC);
2196 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, data: 0x0);
2197}
2198
2199static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2200 enum rtw89_rf_path path, u8 txagc, s8 gain_offset)
2201{
2202 txagc = rtw89_read_rf(rtwdev, rf_path: path, RR_TXAGC, RFREG_MASK);
2203
2204 if ((txagc - gain_offset) < DPK_TXAGC_LOWER)
2205 txagc = DPK_TXAGC_LOWER;
2206 else if ((txagc - gain_offset) > DPK_TXAGC_UPPER)
2207 txagc = DPK_TXAGC_UPPER;
2208 else
2209 txagc = txagc - gain_offset;
2210
2211 _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2212
2213 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
2214 gain_offset, txagc);
2215 return txagc;
2216}
2217
2218static bool _dpk_pas_read(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
2219 u8 is_check)
2220{
2221 u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2222 u8 i;
2223
2224 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, data: 0x06);
2225 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, data: 0x0);
2226 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, data: 0x08);
2227
2228 if (is_check) {
2229 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, data: 0x00);
2230 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2231 val1_i = abs(sign_extend32(val1_i, 11));
2232 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2233 val1_q = abs(sign_extend32(val1_q, 11));
2234
2235 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, data: 0x1f);
2236 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2237 val2_i = abs(sign_extend32(val2_i, 11));
2238 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2239 val2_q = abs(sign_extend32(val2_q, 11));
2240
2241 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] PAS_delta = 0x%x\n",
2242 phy_div(val1_i * val1_i + val1_q * val1_q,
2243 val2_i * val2_i + val2_q * val2_q));
2244 } else {
2245 for (i = 0; i < 32; i++) {
2246 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, data: i);
2247 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2248 fmt: "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2249 rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2250 }
2251 }
2252
2253 if (val1_i * val1_i + val1_q * val1_q >=
2254 (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
2255 return true;
2256
2257 return false;
2258}
2259
2260static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2261 enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
2262 bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
2263{
2264 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: chanctx_idx);
2265 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2266 u8 goout = 0, agc_cnt = 0, limited_rxbb = 0, gl_cnt = 0;
2267 u8 tmp_txagc, tmp_rxbb, tmp_gl_idx = 0;
2268 u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2269 int limit = 200;
2270 s8 offset = 0;
2271 u16 dgain = 0;
2272 u32 rf_18;
2273
2274 tmp_txagc = init_txagc;
2275
2276 tmp_rxbb = rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASKRXBB);
2277 rf_18 = rtw89_read_rf(rtwdev, rf_path: path, RR_CFGCH, RFREG_MASK);
2278
2279 do {
2280 switch (step) {
2281 case DPK_AGC_STEP_SYNC_DGAIN:
2282 _dpk_sync(rtwdev, phy, path, kidx);
2283 if (agc_cnt == 0) {
2284 if (chan->band_width < 2)
2285 _dpk_bypass_rxcfir(rtwdev, path, is_bypass: true);
2286 else
2287 _dpk_lbk_rxiqk(rtwdev, phy, path,
2288 cur_rxbb: tmp_rxbb, rf_18);
2289 }
2290
2291 if (_dpk_sync_check(rtwdev, path, kidx) == true) {
2292 tmp_txagc = 0xff;
2293 goout = 1;
2294 break;
2295 }
2296
2297 dgain = _dpk_dgain_read(rtwdev);
2298 offset = _dpk_dgain_mapping(rtwdev, dgain);
2299
2300 if (loss_only == 1 || limited_rxbb == 1 || offset == 0)
2301 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2302 else
2303 step = DPK_AGC_STEP_GAIN_ADJ;
2304 break;
2305 case DPK_AGC_STEP_GAIN_ADJ:
2306 tmp_rxbb = rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASKRXBB);
2307
2308 if (tmp_rxbb + offset > 0x1f) {
2309 tmp_rxbb = 0x1f;
2310 limited_rxbb = 1;
2311 } else if (tmp_rxbb + offset < 0) {
2312 tmp_rxbb = 0;
2313 limited_rxbb = 1;
2314 } else {
2315 tmp_rxbb = tmp_rxbb + offset;
2316 }
2317
2318 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASKRXBB, data: tmp_rxbb);
2319 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2320 fmt: "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb);
2321
2322 if (chan->band_width == RTW89_CHANNEL_WIDTH_80)
2323 _dpk_lbk_rxiqk(rtwdev, phy, path, cur_rxbb: tmp_rxbb, rf_18);
2324 if (dgain > 1922 || dgain < 342)
2325 step = DPK_AGC_STEP_SYNC_DGAIN;
2326 else
2327 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2328
2329 agc_cnt++;
2330 break;
2331 case DPK_AGC_STEP_GAIN_LOSS_IDX:
2332 _dpk_gainloss(rtwdev, phy, path, kidx);
2333
2334 tmp_gl_idx = _dpk_gainloss_read(rtwdev);
2335
2336 if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, path, is_check: true)) ||
2337 tmp_gl_idx >= 7)
2338 step = DPK_AGC_STEP_GL_GT_CRITERION;
2339 else if (tmp_gl_idx == 0)
2340 step = DPK_AGC_STEP_GL_LT_CRITERION;
2341 else
2342 step = DPK_AGC_STEP_SET_TX_GAIN;
2343
2344 gl_cnt++;
2345 break;
2346 case DPK_AGC_STEP_GL_GT_CRITERION:
2347 if (tmp_txagc == 0x2e ||
2348 tmp_txagc == dpk->max_dpk_txagc[path]) {
2349 goout = 1;
2350 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2351 fmt: "[DPK] Txagc@lower bound!!\n");
2352 } else {
2353 tmp_txagc = _dpk_set_offset(rtwdev, phy, path,
2354 txagc: tmp_txagc, gain_offset: 0x3);
2355 }
2356 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2357 agc_cnt++;
2358 break;
2359
2360 case DPK_AGC_STEP_GL_LT_CRITERION:
2361 if (tmp_txagc == 0x3f || tmp_txagc == dpk->max_dpk_txagc[path]) {
2362 goout = 1;
2363 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2364 fmt: "[DPK] Txagc@upper bound!!\n");
2365 } else {
2366 tmp_txagc = _dpk_set_offset(rtwdev, phy, path,
2367 txagc: tmp_txagc, gain_offset: 0xfe);
2368 }
2369 step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2370 agc_cnt++;
2371 break;
2372
2373 case DPK_AGC_STEP_SET_TX_GAIN:
2374 tmp_txagc = _dpk_set_offset(rtwdev, phy, path, txagc: tmp_txagc,
2375 gain_offset: tmp_gl_idx);
2376 goout = 1;
2377 agc_cnt++;
2378 break;
2379
2380 default:
2381 goout = 1;
2382 break;
2383 }
2384 } while (!goout && agc_cnt < 6 && limit-- > 0);
2385
2386 if (gl_cnt >= 6)
2387 _dpk_pas_read(rtwdev, path, is_check: false);
2388
2389 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2390 fmt: "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc, tmp_rxbb);
2391
2392 return tmp_txagc;
2393}
2394
2395static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev,
2396 enum rtw89_rf_path path, u8 order)
2397{
2398 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2399
2400 switch (order) {
2401 case 0: /* (5,3,1) */
2402 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, data: order);
2403 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, data: 0x3);
2404 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, data: 0x1);
2405 dpk->dpk_order[path] = 0x3;
2406 break;
2407 case 1: /* (5,3,0) */
2408 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, data: order);
2409 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, data: 0x0);
2410 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, data: 0x0);
2411 dpk->dpk_order[path] = 0x1;
2412 break;
2413 case 2: /* (5,0,0) */
2414 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, data: order);
2415 rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, data: 0x0);
2416 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, data: 0x0);
2417 dpk->dpk_order[path] = 0x0;
2418 break;
2419 default:
2420 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2421 fmt: "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2422 break;
2423 }
2424
2425 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] Set %s for IDL\n",
2426 order == 0x0 ? "(5,3,1)" :
2427 order == 0x1 ? "(5,3,0)" : "(5,0,0)");
2428
2429 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2430 fmt: "[DPK] Set MDPD order to 0x%x for IDL\n", order);
2431}
2432
2433static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2434 enum rtw89_rf_path path, u8 kidx, u8 gain)
2435{
2436 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2437
2438 if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 &&
2439 dpk->bp[path][kidx].band == RTW89_BAND_5G)
2440 _dpk_set_mdpd_para(rtwdev, path, order: 0x2);
2441 else
2442 _dpk_set_mdpd_para(rtwdev, path, order: 0x0);
2443
2444 _dpk_one_shot(rtwdev, phy, path, id: MDPK_IDL);
2445}
2446
2447static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2448 enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc)
2449{
2450 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2451 u8 gs = dpk->dpk_gs[phy];
2452 u16 pwsf = 0x78;
2453
2454 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), BIT(8), data: kidx);
2455
2456 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2457 fmt: "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n",
2458 txagc, pwsf, gs);
2459
2460 dpk->bp[path][kidx].txagc_dpk = txagc;
2461 rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
2462 mask: 0x3F << ((gain << 3) + (kidx << 4)), data: txagc);
2463
2464 dpk->bp[path][kidx].pwsf = pwsf;
2465 rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2466 mask: 0x1FF << (gain << 4), data: pwsf);
2467
2468 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, data: 0x1);
2469 rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, data: 0x0);
2470
2471 dpk->bp[path][kidx].gs = gs;
2472 if (dpk->dpk_gs[phy] == 0x7f)
2473 rtw89_phy_write32_mask(rtwdev,
2474 R_DPD_CH0A + (path << 8) + (kidx << 2),
2475 MASKDWORD, data: 0x007f7f7f);
2476 else
2477 rtw89_phy_write32_mask(rtwdev,
2478 R_DPD_CH0A + (path << 8) + (kidx << 2),
2479 MASKDWORD, data: 0x005b5b5b);
2480
2481 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2482 B_DPD_ORDER_V1, data: dpk->dpk_order[path]);
2483
2484 rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, data: 0x0);
2485 rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, data: 0x0);
2486}
2487
2488static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2489 enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
2490{
2491 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: chanctx_idx);
2492 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2493 u8 idx, cur_band, cur_ch;
2494 bool is_reload = false;
2495
2496 cur_band = chan->band_type;
2497 cur_ch = chan->channel;
2498
2499 for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2500 if (cur_band != dpk->bp[path][idx].band ||
2501 cur_ch != dpk->bp[path][idx].ch)
2502 continue;
2503
2504 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2505 B_COEF_SEL_MDPD, data: idx);
2506 dpk->cur_idx[path] = idx;
2507 is_reload = true;
2508 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2509 fmt: "[DPK] reload S%d[%d] success\n", path, idx);
2510 }
2511
2512 return is_reload;
2513}
2514
2515static
2516void _rf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb)
2517{
2518 if (is_bybb)
2519 rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RR_RSV1_RST, data: 0x1);
2520 else
2521 rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RR_RSV1_RST, data: 0x0);
2522}
2523
2524static
2525void _drf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb)
2526{
2527 if (is_bybb)
2528 rtw89_write_rf(rtwdev, rf_path: path, RR_BBDC, RR_BBDC_SEL, data: 0x1);
2529 else
2530 rtw89_write_rf(rtwdev, rf_path: path, RR_BBDC, RR_BBDC_SEL, data: 0x0);
2531}
2532
2533static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2534 enum rtw89_rf_path path, u8 gain,
2535 enum rtw89_chanctx_idx chanctx_idx)
2536{
2537 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2538 u8 txagc = 0x38, kidx = dpk->cur_idx[path];
2539 bool is_fail = false;
2540
2541 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2542 fmt: "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
2543
2544 _rf_direct_cntrl(rtwdev, path, is_bybb: false);
2545 _drf_direct_cntrl(rtwdev, path, is_bybb: false);
2546
2547 _dpk_kip_pwr_clk_on(rtwdev, path);
2548 _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2549 _dpk_rf_setting(rtwdev, gain, path, kidx);
2550 _dpk_rx_dck(rtwdev, phy, path);
2551 _dpk_kip_preset(rtwdev, phy, path, kidx);
2552 _dpk_kip_set_rxagc(rtwdev, phy, path);
2553 _dpk_table_select(rtwdev, path, kidx, gain);
2554
2555 txagc = _dpk_agc(rtwdev, phy, path, kidx, init_txagc: txagc, loss_only: false, chanctx_idx);
2556
2557 _rfk_get_thermal(rtwdev, kidx, path);
2558
2559 if (txagc == 0xff) {
2560 is_fail = true;
2561 goto _error;
2562 }
2563
2564 _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
2565
2566 rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASKMODE, data: RF_RX);
2567 _dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc);
2568
2569_error:
2570 if (!is_fail)
2571 dpk->bp[path][kidx].path_ok = 1;
2572 else
2573 dpk->bp[path][kidx].path_ok = 0;
2574
2575 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d[%d] DPK %s\n", path, kidx,
2576 is_fail ? "Check" : "Success");
2577
2578 _dpk_onoff(rtwdev, path, off: is_fail);
2579
2580 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d[%d] DPK %s\n", path, kidx,
2581 is_fail ? "Check" : "Success");
2582
2583 return is_fail;
2584}
2585
2586static void _dpk_cal_select(struct rtw89_dev *rtwdev,
2587 enum rtw89_phy_idx phy, u8 kpath,
2588 enum rtw89_chanctx_idx chanctx_idx)
2589{
2590 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2591 u32 backup_kip_val[BACKUP_KIP_REGS_NR];
2592 u32 backup_bb_val[BACKUP_BB_REGS_NR];
2593 u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
2594 bool reloaded[2] = {false};
2595 u8 path;
2596
2597 for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
2598 reloaded[path] = _dpk_reload_check(rtwdev, phy, path, chanctx_idx);
2599 if (!reloaded[path] && dpk->bp[path][0].ch != 0)
2600 dpk->cur_idx[path] = !dpk->cur_idx[path];
2601 else
2602 _dpk_onoff(rtwdev, path, off: false);
2603 }
2604
2605 _rfk_backup_bb_reg(rtwdev, backup_bb_reg_val: backup_bb_val);
2606 _rfk_backup_kip_reg(rtwdev, backup_kip_reg_val: backup_kip_val);
2607
2608 for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
2609 _rfk_backup_rf_reg(rtwdev, backup_rf_reg_val: backup_rf_val[path], rf_path: path);
2610 _dpk_information(rtwdev, phy, path, chanctx_idx);
2611 if (rtwdev->is_tssi_mode[path])
2612 _dpk_tssi_pause(rtwdev, path, is_pause: true);
2613 }
2614
2615 _rfk_bb_afe_setting(rtwdev, phy, path, kpath);
2616
2617 for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++)
2618 _dpk_main(rtwdev, phy, path, gain: 1, chanctx_idx);
2619
2620 _rfk_bb_afe_restore(rtwdev, phy, path, kpath);
2621
2622 _dpk_kip_restore(rtwdev, path);
2623 _rfk_reload_bb_reg(rtwdev, backup_bb_reg_val: backup_bb_val);
2624 _rfk_reload_kip_reg(rtwdev, backup_kip_reg_val: backup_kip_val);
2625
2626 for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
2627 _rfk_reload_rf_reg(rtwdev, backup_rf_reg_val: backup_rf_val[path], rf_path: path);
2628 if (rtwdev->is_tssi_mode[path])
2629 _dpk_tssi_pause(rtwdev, path, is_pause: false);
2630 }
2631}
2632
2633static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2634 enum rtw89_chanctx_idx chanctx_idx)
2635{
2636 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: chanctx_idx);
2637 struct rtw89_fem_info *fem = &rtwdev->fem;
2638
2639 if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
2640 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2641 fmt: "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2642 return true;
2643 } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
2644 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2645 fmt: "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2646 return true;
2647 } else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) {
2648 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2649 fmt: "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
2650 return true;
2651 }
2652
2653 return false;
2654}
2655
2656static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2657{
2658 u8 path, kpath;
2659
2660 kpath = _kpath(rtwdev, phy_idx: phy);
2661
2662 for (path = 0; path < RTW8852BT_SS; path++) {
2663 if (kpath & BIT(path))
2664 _dpk_onoff(rtwdev, path, off: true);
2665 }
2666}
2667
2668static void _dpk_track(struct rtw89_dev *rtwdev)
2669{
2670 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2671 s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst;
2672 s8 delta_ther[2] = {};
2673 u8 trk_idx, txagc_rf;
2674 u8 path, kidx;
2675 u16 pwsf[2];
2676 u8 cur_ther;
2677 u32 tmp;
2678
2679 for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
2680 kidx = dpk->cur_idx[path];
2681
2682 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK,
2683 fmt: "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2684 path, kidx, dpk->bp[path][kidx].ch);
2685
2686 cur_ther = ewma_thermal_read(e: &rtwdev->phystat.avg_thermal[path]);
2687
2688 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK,
2689 fmt: "[DPK_TRK] thermal now = %d\n", cur_ther);
2690
2691 if (dpk->bp[path][kidx].ch && cur_ther)
2692 delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
2693
2694 if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2695 delta_ther[path] = delta_ther[path] * 3 / 2;
2696 else
2697 delta_ther[path] = delta_ther[path] * 5 / 2;
2698
2699 txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2700 B_TXAGC_RF);
2701
2702 if (rtwdev->is_tssi_mode[path]) {
2703 trk_idx = rtw89_read_rf(rtwdev, rf_path: path, RR_TXA, RR_TXA_TRK);
2704
2705 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK,
2706 fmt: "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
2707 txagc_rf, trk_idx);
2708
2709 txagc_bb =
2710 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2711 MASKBYTE2);
2712 txagc_bb_tp =
2713 rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13),
2714 B_TXAGC_TP);
2715
2716 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK,
2717 fmt: "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2718 txagc_bb_tp, txagc_bb);
2719
2720 txagc_ofst =
2721 rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2722 MASKBYTE3);
2723
2724 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK,
2725 fmt: "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
2726 txagc_ofst, delta_ther[path]);
2727 tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
2728 B_DPD_COM_OF);
2729 if (tmp == 0x1) {
2730 txagc_ofst = 0;
2731 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK,
2732 fmt: "[DPK_TRK] HW txagc offset mode\n");
2733 }
2734
2735 if (txagc_rf && cur_ther)
2736 ini_diff = txagc_ofst + (delta_ther[path]);
2737
2738 tmp = rtw89_phy_read32_mask(rtwdev,
2739 R_P0_TXDPD + (path << 13),
2740 B_P0_TXDPD);
2741 if (tmp == 0x0) {
2742 pwsf[0] = dpk->bp[path][kidx].pwsf +
2743 txagc_bb_tp - txagc_bb + ini_diff;
2744 pwsf[1] = dpk->bp[path][kidx].pwsf +
2745 txagc_bb_tp - txagc_bb + ini_diff;
2746 } else {
2747 pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff;
2748 pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff;
2749 }
2750 } else {
2751 pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2752 pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2753 }
2754
2755 tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS);
2756 if (!tmp && txagc_rf) {
2757 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK,
2758 fmt: "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
2759 pwsf[0], pwsf[1]);
2760
2761 rtw89_phy_write32_mask(rtwdev,
2762 R_DPD_BND + (path << 8) + (kidx << 2),
2763 B_DPD_BND_0, data: pwsf[0]);
2764 rtw89_phy_write32_mask(rtwdev,
2765 R_DPD_BND + (path << 8) + (kidx << 2),
2766 B_DPD_BND_1, data: pwsf[1]);
2767 }
2768 }
2769}
2770
2771static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2772{
2773 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2774 u8 tx_scale, ofdm_bkof, path, kpath;
2775
2776 kpath = _kpath(rtwdev, phy_idx: phy);
2777
2778 ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM);
2779 tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA);
2780
2781 if (ofdm_bkof + tx_scale >= 44) {
2782 /* move dpd backoff to bb, and set dpd backoff to 0 */
2783 dpk->dpk_gs[phy] = 0x7f;
2784 for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
2785 if (!(kpath & BIT(path)))
2786 continue;
2787
2788 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8),
2789 B_DPD_CFG, data: 0x7f7f7f);
2790 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
2791 fmt: "[RFK] Set S%d DPD backoff to 0dB\n", path);
2792 }
2793 } else {
2794 dpk->dpk_gs[phy] = 0x5b;
2795 }
2796}
2797
2798static void _tssi_dpk_off(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2799{
2800 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A, BIT(24), data: 0x0);
2801 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0B, BIT(24), data: 0x0);
2802}
2803
2804static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2805 enum rtw89_rf_path path, const struct rtw89_chan *chan)
2806{
2807 enum rtw89_band band = chan->band_type;
2808
2809 if (band == RTW89_BAND_2G)
2810 rtw89_write_rf(rtwdev, rf_path: path, RR_TXPOW, RR_TXPOW_TXG, data: 0x1);
2811 else
2812 rtw89_write_rf(rtwdev, rf_path: path, RR_TXPOW, RR_TXPOW_TXA, data: 0x1);
2813}
2814
2815static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2816 enum rtw89_rf_path path, const struct rtw89_chan *chan)
2817{
2818 enum rtw89_band band = chan->band_type;
2819
2820 rtw89_rfk_parser(rtwdev, tbl: &rtw8852bt_tssi_sys_defs_tbl);
2821
2822 if (chan->band_width == RTW89_CHANNEL_WIDTH_80)
2823 rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, data: 0x1);
2824 else
2825 rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, data: 0x0);
2826
2827 if (path == RF_PATH_A)
2828 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2829 &rtw8852bt_tssi_sys_a_defs_2g_tbl,
2830 &rtw8852bt_tssi_sys_a_defs_5g_tbl);
2831 else
2832 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2833 &rtw8852bt_tssi_sys_b_defs_2g_tbl,
2834 &rtw8852bt_tssi_sys_b_defs_5g_tbl);
2835}
2836
2837static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev,
2838 enum rtw89_phy_idx phy,
2839 enum rtw89_rf_path path)
2840{
2841 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2842 &rtw8852bt_tssi_init_txpwr_defs_a_tbl,
2843 &rtw8852bt_tssi_init_txpwr_defs_b_tbl);
2844}
2845
2846static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2847 enum rtw89_phy_idx phy,
2848 enum rtw89_rf_path path)
2849{
2850 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2851 &rtw8852bt_tssi_init_txpwr_he_tb_defs_a_tbl,
2852 &rtw8852bt_tssi_init_txpwr_he_tb_defs_b_tbl);
2853}
2854
2855static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2856 enum rtw89_rf_path path)
2857{
2858 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2859 &rtw8852bt_tssi_dck_defs_a_tbl,
2860 &rtw8852bt_tssi_dck_defs_b_tbl);
2861}
2862
2863static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2864 enum rtw89_rf_path path, const struct rtw89_chan *chan)
2865{
2866#define RTW8852BT_TSSI_GET_VAL(ptr, idx) \
2867({ \
2868 s8 *__ptr = (ptr); \
2869 u8 __idx = (idx), __i, __v; \
2870 u32 __val = 0; \
2871 for (__i = 0; __i < 4; __i++) { \
2872 __v = (__ptr[__idx + __i]); \
2873 __val |= (__v << (8 * __i)); \
2874 } \
2875 __val; \
2876})
2877 struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
2878 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2879 u8 ch = chan->channel;
2880 u8 subband = chan->subband_type;
2881 const s8 *thm_up_a = NULL;
2882 const s8 *thm_down_a = NULL;
2883 const s8 *thm_up_b = NULL;
2884 const s8 *thm_down_b = NULL;
2885 u8 thermal = 0xff;
2886 s8 thm_ofst[64] = {0};
2887 u32 tmp = 0;
2888 u8 i, j;
2889
2890 switch (subband) {
2891 default:
2892 case RTW89_CH_2G:
2893 thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
2894 thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
2895 thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
2896 thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
2897 break;
2898 case RTW89_CH_5G_BAND_1:
2899 thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
2900 thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
2901 thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
2902 thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
2903 break;
2904 case RTW89_CH_5G_BAND_3:
2905 thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
2906 thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
2907 thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
2908 thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
2909 break;
2910 case RTW89_CH_5G_BAND_4:
2911 thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
2912 thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
2913 thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
2914 thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
2915 break;
2916 }
2917
2918 if (path == RF_PATH_A) {
2919 thermal = tssi_info->thermal[RF_PATH_A];
2920
2921 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
2922 fmt: "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
2923
2924 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, data: 0x0);
2925 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, data: 0x1);
2926
2927 if (thermal == 0xff) {
2928 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, data: 32);
2929 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, data: 32);
2930
2931 for (i = 0; i < 64; i += 4) {
2932 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, data: 0x0);
2933
2934 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
2935 fmt: "[TSSI] write 0x%x val=0x%08x\n",
2936 R_P0_TSSI_BASE + i, 0x0);
2937 }
2938
2939 } else {
2940 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER,
2941 data: thermal);
2942 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
2943 data: thermal);
2944
2945 i = 0;
2946 for (j = 0; j < 32; j++)
2947 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2948 -thm_down_a[i++] :
2949 -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
2950
2951 i = 1;
2952 for (j = 63; j >= 32; j--)
2953 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2954 thm_up_a[i++] :
2955 thm_up_a[DELTA_SWINGIDX_SIZE - 1];
2956
2957 for (i = 0; i < 64; i += 4) {
2958 tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i);
2959 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, data: tmp);
2960
2961 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
2962 fmt: "[TSSI] write 0x%x val=0x%08x\n",
2963 0x5c00 + i, tmp);
2964 }
2965 }
2966 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, data: 0x1);
2967 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, data: 0x0);
2968
2969 } else {
2970 thermal = tssi_info->thermal[RF_PATH_B];
2971
2972 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
2973 fmt: "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
2974
2975 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, data: 0x0);
2976 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, data: 0x1);
2977
2978 if (thermal == 0xff) {
2979 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, data: 32);
2980 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, data: 32);
2981
2982 for (i = 0; i < 64; i += 4) {
2983 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, data: 0x0);
2984
2985 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
2986 fmt: "[TSSI] write 0x%x val=0x%08x\n",
2987 0x7c00 + i, 0x0);
2988 }
2989
2990 } else {
2991 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER,
2992 data: thermal);
2993 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
2994 data: thermal);
2995
2996 i = 0;
2997 for (j = 0; j < 32; j++)
2998 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2999 -thm_down_b[i++] :
3000 -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
3001
3002 i = 1;
3003 for (j = 63; j >= 32; j--)
3004 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3005 thm_up_b[i++] :
3006 thm_up_b[DELTA_SWINGIDX_SIZE - 1];
3007
3008 for (i = 0; i < 64; i += 4) {
3009 tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i);
3010 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, data: tmp);
3011
3012 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
3013 fmt: "[TSSI] write 0x%x val=0x%08x\n",
3014 0x7c00 + i, tmp);
3015 }
3016 }
3017 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, data: 0x1);
3018 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, data: 0x0);
3019 }
3020#undef RTW8852BT_TSSI_GET_VAL
3021}
3022
3023static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3024 enum rtw89_rf_path path)
3025{
3026 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3027 &rtw8852bt_tssi_dac_gain_defs_a_tbl,
3028 &rtw8852bt_tssi_dac_gain_defs_b_tbl);
3029}
3030
3031static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3032 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3033{
3034 enum rtw89_band band = chan->band_type;
3035
3036 if (path == RF_PATH_A)
3037 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
3038 &rtw8852bt_tssi_slope_a_defs_2g_tbl,
3039 &rtw8852bt_tssi_slope_a_defs_5g_tbl);
3040 else
3041 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
3042 &rtw8852bt_tssi_slope_b_defs_2g_tbl,
3043 &rtw8852bt_tssi_slope_b_defs_5g_tbl);
3044}
3045
3046static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3047 enum rtw89_rf_path path, bool all,
3048 const struct rtw89_chan *chan)
3049{
3050 enum rtw89_band band = chan->band_type;
3051 const struct rtw89_rfk_tbl *tbl = NULL;
3052 u8 ch = chan->channel;
3053
3054 if (path == RF_PATH_A) {
3055 if (band == RTW89_BAND_2G)
3056 tbl = &rtw8852bt_tssi_align_a_2g_all_defs_tbl;
3057 else if (ch >= 36 && ch <= 64)
3058 tbl = &rtw8852bt_tssi_align_a_5g1_all_defs_tbl;
3059 else if (ch >= 100 && ch <= 144)
3060 tbl = &rtw8852bt_tssi_align_a_5g2_all_defs_tbl;
3061 else if (ch >= 149 && ch <= 177)
3062 tbl = &rtw8852bt_tssi_align_a_5g3_all_defs_tbl;
3063 } else {
3064 if (ch >= 1 && ch <= 14)
3065 tbl = &rtw8852bt_tssi_align_b_2g_all_defs_tbl;
3066 else if (ch >= 36 && ch <= 64)
3067 tbl = &rtw8852bt_tssi_align_b_5g1_all_defs_tbl;
3068 else if (ch >= 100 && ch <= 144)
3069 tbl = &rtw8852bt_tssi_align_b_5g2_all_defs_tbl;
3070 else if (ch >= 149 && ch <= 177)
3071 tbl = &rtw8852bt_tssi_align_b_5g3_all_defs_tbl;
3072 }
3073
3074 if (tbl)
3075 rtw89_rfk_parser(rtwdev, tbl);
3076}
3077
3078static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3079 enum rtw89_rf_path path)
3080{
3081 rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3082 &rtw8852bt_tssi_slope_defs_a_tbl,
3083 &rtw8852bt_tssi_slope_defs_b_tbl);
3084}
3085
3086static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3087 enum rtw89_rf_path path)
3088{
3089 if (path == RF_PATH_A)
3090 rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, data: 0x0);
3091 else
3092 rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, data: 0x0);
3093}
3094
3095static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3096 enum rtw89_phy_idx phy,
3097 enum rtw89_rf_path path)
3098{
3099 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, fmt: "======>%s path=%d\n", __func__,
3100 path);
3101
3102 if (path == RF_PATH_A)
3103 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
3104 B_P0_TSSI_MV_MIX, data: 0x010);
3105 else
3106 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
3107 B_P1_RFCTM_DEL, data: 0x010);
3108}
3109
3110static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3111{
3112 u8 i;
3113
3114 for (i = 0; i < RF_PATH_NUM_8852BT; i++) {
3115 _tssi_set_tssi_track(rtwdev, phy, path: i);
3116 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, path: i);
3117
3118 if (i == RF_PATH_A) {
3119 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
3120 B_P0_TSSI_MV_CLR, data: 0x0);
3121 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3122 B_P0_TSSI_EN, data: 0x0);
3123 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3124 B_P0_TSSI_EN, data: 0x1);
3125 rtw89_write_rf(rtwdev, rf_path: i, RR_TXGA_V1,
3126 RR_TXGA_V1_TRK_EN, data: 0x1);
3127 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3128 B_P0_TSSI_RFC, data: 0x3);
3129
3130 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3131 B_P0_TSSI_OFT, data: 0xc0);
3132 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3133 B_P0_TSSI_OFT_EN, data: 0x0);
3134 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3135 B_P0_TSSI_OFT_EN, data: 0x1);
3136
3137 rtwdev->is_tssi_mode[RF_PATH_A] = true;
3138 } else {
3139 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
3140 B_P1_TSSI_MV_CLR, data: 0x0);
3141 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3142 B_P1_TSSI_EN, data: 0x0);
3143 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3144 B_P1_TSSI_EN, data: 0x1);
3145 rtw89_write_rf(rtwdev, rf_path: i, RR_TXGA_V1,
3146 RR_TXGA_V1_TRK_EN, data: 0x1);
3147 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3148 B_P1_TSSI_RFC, data: 0x3);
3149
3150 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3151 B_P1_TSSI_OFT, data: 0xc0);
3152 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3153 B_P1_TSSI_OFT_EN, data: 0x0);
3154 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3155 B_P1_TSSI_OFT_EN, data: 0x1);
3156
3157 rtwdev->is_tssi_mode[RF_PATH_B] = true;
3158 }
3159 }
3160}
3161
3162static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3163{
3164 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, data: 0x0);
3165 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, data: 0x1);
3166 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, data: 0x1);
3167 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, data: 0x0);
3168 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, data: 0x1);
3169 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, data: 0x1);
3170
3171 rtwdev->is_tssi_mode[RF_PATH_A] = false;
3172 rtwdev->is_tssi_mode[RF_PATH_B] = false;
3173}
3174
3175static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3176{
3177 switch (ch) {
3178 case 1 ... 2:
3179 return 0;
3180 case 3 ... 5:
3181 return 1;
3182 case 6 ... 8:
3183 return 2;
3184 case 9 ... 11:
3185 return 3;
3186 case 12 ... 13:
3187 return 4;
3188 case 14:
3189 return 5;
3190 }
3191
3192 return 0;
3193}
3194
3195#define TSSI_EXTRA_GROUP_BIT (BIT(31))
3196#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3197#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3198#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3199#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3200
3201static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3202{
3203 switch (ch) {
3204 case 1 ... 2:
3205 return 0;
3206 case 3 ... 5:
3207 return 1;
3208 case 6 ... 8:
3209 return 2;
3210 case 9 ... 11:
3211 return 3;
3212 case 12 ... 14:
3213 return 4;
3214 case 36 ... 40:
3215 return 5;
3216 case 41 ... 43:
3217 return TSSI_EXTRA_GROUP(5);
3218 case 44 ... 48:
3219 return 6;
3220 case 49 ... 51:
3221 return TSSI_EXTRA_GROUP(6);
3222 case 52 ... 56:
3223 return 7;
3224 case 57 ... 59:
3225 return TSSI_EXTRA_GROUP(7);
3226 case 60 ... 64:
3227 return 8;
3228 case 100 ... 104:
3229 return 9;
3230 case 105 ... 107:
3231 return TSSI_EXTRA_GROUP(9);
3232 case 108 ... 112:
3233 return 10;
3234 case 113 ... 115:
3235 return TSSI_EXTRA_GROUP(10);
3236 case 116 ... 120:
3237 return 11;
3238 case 121 ... 123:
3239 return TSSI_EXTRA_GROUP(11);
3240 case 124 ... 128:
3241 return 12;
3242 case 129 ... 131:
3243 return TSSI_EXTRA_GROUP(12);
3244 case 132 ... 136:
3245 return 13;
3246 case 137 ... 139:
3247 return TSSI_EXTRA_GROUP(13);
3248 case 140 ... 144:
3249 return 14;
3250 case 149 ... 153:
3251 return 15;
3252 case 154 ... 156:
3253 return TSSI_EXTRA_GROUP(15);
3254 case 157 ... 161:
3255 return 16;
3256 case 162 ... 164:
3257 return TSSI_EXTRA_GROUP(16);
3258 case 165 ... 169:
3259 return 17;
3260 case 170 ... 172:
3261 return TSSI_EXTRA_GROUP(17);
3262 case 173 ... 177:
3263 return 18;
3264 }
3265
3266 return 0;
3267}
3268
3269static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3270{
3271 switch (ch) {
3272 case 1 ... 8:
3273 return 0;
3274 case 9 ... 14:
3275 return 1;
3276 case 36 ... 48:
3277 return 2;
3278 case 52 ... 64:
3279 return 3;
3280 case 100 ... 112:
3281 return 4;
3282 case 116 ... 128:
3283 return 5;
3284 case 132 ... 144:
3285 return 6;
3286 case 149 ... 177:
3287 return 7;
3288 }
3289
3290 return 0;
3291}
3292
3293static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3294 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3295{
3296 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3297 u8 ch = chan->channel;
3298 u32 gidx, gidx_1st, gidx_2nd;
3299 s8 de_1st;
3300 s8 de_2nd;
3301 s8 val;
3302
3303 gidx = _tssi_get_ofdm_group(rtwdev, ch);
3304
3305 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
3306 fmt: "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx);
3307
3308 if (IS_TSSI_EXTRA_GROUP(gidx)) {
3309 gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3310 gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3311 de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3312 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3313 val = (de_1st + de_2nd) / 2;
3314
3315 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
3316 fmt: "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3317 path, val, de_1st, de_2nd);
3318 } else {
3319 val = tssi_info->tssi_mcs[path][gidx];
3320
3321 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
3322 fmt: "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3323 }
3324
3325 return val;
3326}
3327
3328static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3329 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3330{
3331 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3332 u8 ch = chan->channel;
3333 u32 tgidx, tgidx_1st, tgidx_2nd;
3334 s8 tde_1st;
3335 s8 tde_2nd;
3336 s8 val;
3337
3338 tgidx = _tssi_get_trim_group(rtwdev, ch);
3339
3340 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
3341 fmt: "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3342 path, tgidx);
3343
3344 if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3345 tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3346 tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3347 tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3348 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3349 val = (tde_1st + tde_2nd) / 2;
3350
3351 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
3352 fmt: "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3353 path, val, tde_1st, tde_2nd);
3354 } else {
3355 val = tssi_info->tssi_trim[path][tgidx];
3356
3357 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
3358 fmt: "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3359 path, val);
3360 }
3361
3362 return val;
3363}
3364
3365static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3366 const struct rtw89_chan *chan)
3367{
3368 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3369 u8 ch = chan->channel;
3370 u8 gidx;
3371 s8 ofdm_de;
3372 s8 trim_de;
3373 s32 val;
3374 u32 i;
3375
3376 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, fmt: "[TSSI][TRIM]: phy=%d ch=%d\n",
3377 phy, ch);
3378
3379 for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
3380 gidx = _tssi_get_cck_group(rtwdev, ch);
3381 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, path: i, chan);
3382 val = tssi_info->tssi_cck[i][gidx] + trim_de;
3383
3384 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
3385 fmt: "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3386 i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3387
3388 rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_cck_long[i], _TSSI_DE_MASK, data: val);
3389 rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_cck_short[i], _TSSI_DE_MASK, data: val);
3390
3391 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
3392 fmt: "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3393 _tssi_de_cck_long[i],
3394 rtw89_phy_read32_mask(rtwdev, addr: _tssi_de_cck_long[i],
3395 _TSSI_DE_MASK));
3396
3397 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, path: i, chan);
3398 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, path: i, chan);
3399 val = ofdm_de + trim_de;
3400
3401 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
3402 fmt: "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3403 i, ofdm_de, trim_de);
3404
3405 rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_mcs_20m[i], _TSSI_DE_MASK, data: val);
3406 rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_mcs_40m[i], _TSSI_DE_MASK, data: val);
3407 rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_mcs_80m[i], _TSSI_DE_MASK, data: val);
3408 rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_mcs_80m_80m[i],
3409 _TSSI_DE_MASK, data: val);
3410 rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_mcs_5m[i], _TSSI_DE_MASK, data: val);
3411 rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_mcs_10m[i], _TSSI_DE_MASK, data: val);
3412
3413 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI,
3414 fmt: "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3415 _tssi_de_mcs_20m[i],
3416 rtw89_phy_read32_mask(rtwdev, addr: _tssi_de_mcs_20m[i],
3417 _TSSI_DE_MASK));
3418 }
3419}
3420
3421static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
3422{
3423 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3424 fmt: "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n"
3425 "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n",
3426 R_TSSI_PA_K1 + (path << 13),
3427 rtw89_phy_read32(rtwdev, R_TSSI_PA_K1 + (path << 13)),
3428 R_TSSI_PA_K2 + (path << 13),
3429 rtw89_phy_read32(rtwdev, R_TSSI_PA_K2 + (path << 13)),
3430 R_P0_TSSI_ALIM1 + (path << 13),
3431 rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM1 + (path << 13)),
3432 R_P0_TSSI_ALIM3 + (path << 13),
3433 rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM3 + (path << 13)),
3434 R_TSSI_PA_K5 + (path << 13),
3435 rtw89_phy_read32(rtwdev, R_TSSI_PA_K5 + (path << 13)),
3436 R_P0_TSSI_ALIM2 + (path << 13),
3437 rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM2 + (path << 13)),
3438 R_P0_TSSI_ALIM4 + (path << 13),
3439 rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM4 + (path << 13)),
3440 R_TSSI_PA_K8 + (path << 13),
3441 rtw89_phy_read32(rtwdev, R_TSSI_PA_K8 + (path << 13)));
3442}
3443
3444static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
3445 enum rtw89_phy_idx phy, enum rtw89_rf_path path,
3446 const struct rtw89_chan *chan)
3447{
3448 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3449 u8 channel = chan->channel;
3450 u8 band;
3451
3452 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3453 fmt: "======>%s phy=%d path=%d\n", __func__, phy, path);
3454
3455 if (channel >= 1 && channel <= 14)
3456 band = TSSI_ALIMK_2G;
3457 else if (channel >= 36 && channel <= 64)
3458 band = TSSI_ALIMK_5GL;
3459 else if (channel >= 100 && channel <= 144)
3460 band = TSSI_ALIMK_5GM;
3461 else if (channel >= 149 && channel <= 177)
3462 band = TSSI_ALIMK_5GH;
3463 else
3464 band = TSSI_ALIMK_2G;
3465
3466 if (tssi_info->alignment_done[path][band]) {
3467 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
3468 data: tssi_info->alignment_value[path][band][0]);
3469 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
3470 data: tssi_info->alignment_value[path][band][1]);
3471 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
3472 data: tssi_info->alignment_value[path][band][2]);
3473 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
3474 data: tssi_info->alignment_value[path][band][3]);
3475 }
3476
3477 _tssi_alimentk_dump_result(rtwdev, path);
3478}
3479
3480static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3481 enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
3482 u8 enable, const struct rtw89_chan *chan)
3483{
3484 enum rtw89_rf_path_bit rx_path;
3485
3486 if (path == RF_PATH_A)
3487 rx_path = RF_A;
3488 else if (path == RF_PATH_B)
3489 rx_path = RF_B;
3490 else if (path == RF_PATH_AB)
3491 rx_path = RF_AB;
3492 else
3493 rx_path = RF_ABCD; /* don't change path, but still set others */
3494
3495 if (enable) {
3496 rtw8852bx_bb_set_plcp_tx(rtwdev);
3497 rtw8852bx_bb_cfg_tx_path(rtwdev, tx_path: path);
3498 rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
3499 rtw8852bx_bb_set_power(rtwdev, pwr_dbm, idx: phy);
3500 }
3501
3502 rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt: cnt, period, tx_time: 20, idx: phy, chan);
3503}
3504
3505static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
3506 enum rtw89_phy_idx phy, const u32 reg[],
3507 u32 reg_backup[], u32 reg_num)
3508{
3509 u32 i;
3510
3511 for (i = 0; i < reg_num; i++) {
3512 reg_backup[i] = rtw89_phy_read32_mask(rtwdev, addr: reg[i], MASKDWORD);
3513
3514 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3515 fmt: "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i],
3516 reg_backup[i]);
3517 }
3518}
3519
3520static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev,
3521 enum rtw89_phy_idx phy, const u32 reg[],
3522 u32 reg_backup[], u32 reg_num)
3523
3524{
3525 u32 i;
3526
3527 for (i = 0; i < reg_num; i++) {
3528 rtw89_phy_write32_mask(rtwdev, addr: reg[i], MASKDWORD, data: reg_backup[i]);
3529
3530 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3531 fmt: "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i],
3532 reg_backup[i]);
3533 }
3534}
3535
3536static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
3537{
3538 u8 channel_index;
3539
3540 if (channel >= 1 && channel <= 14)
3541 channel_index = channel - 1;
3542 else if (channel >= 36 && channel <= 64)
3543 channel_index = (channel - 36) / 2 + 14;
3544 else if (channel >= 100 && channel <= 144)
3545 channel_index = ((channel - 100) / 2) + 15 + 14;
3546 else if (channel >= 149 && channel <= 177)
3547 channel_index = ((channel - 149) / 2) + 38 + 14;
3548 else
3549 channel_index = 0;
3550
3551 return channel_index;
3552}
3553
3554static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3555 enum rtw89_rf_path path, const s16 *power,
3556 u32 *tssi_cw_rpt, const struct rtw89_chan *chan)
3557{
3558 u32 tx_counter, tx_counter_tmp;
3559 const int retry = 100;
3560 u32 tmp;
3561 int j, k;
3562
3563 for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) {
3564 rtw89_phy_write32_mask(rtwdev, addr: _tssi_trigger[path], B_P0_TSSI_EN, data: 0x0);
3565 rtw89_phy_write32_mask(rtwdev, addr: _tssi_trigger[path], B_P0_TSSI_EN, data: 0x1);
3566
3567 tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3568
3569 tmp = rtw89_phy_read32_mask(rtwdev, addr: _tssi_trigger[path], MASKDWORD);
3570 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3571 fmt: "[TSSI PA K] 0x%x = 0x%08x path=%d\n",
3572 _tssi_trigger[path], tmp, path);
3573
3574 if (j == 0)
3575 _tssi_hw_tx(rtwdev, phy, path, cnt: 100, period: 5000, pwr_dbm: power[j], enable: true,
3576 chan);
3577 else
3578 _tssi_hw_tx(rtwdev, phy, path: RF_PATH_ABCD, cnt: 100, period: 5000, pwr_dbm: power[j], enable: true,
3579 chan);
3580
3581 tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3582 tx_counter_tmp -= tx_counter;
3583
3584 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3585 fmt: "[TSSI PA K] First HWTXcounter=%d path=%d\n",
3586 tx_counter_tmp, path);
3587
3588 for (k = 0; k < retry; k++) {
3589 tmp = rtw89_phy_read32_mask(rtwdev, addr: _tssi_cw_rpt_addr[path],
3590 B_TSSI_CWRPT_RDY);
3591 if (tmp)
3592 break;
3593
3594 udelay(usec: 30);
3595
3596 tx_counter_tmp =
3597 rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3598 tx_counter_tmp -= tx_counter;
3599
3600 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3601 fmt: "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n",
3602 k, tx_counter_tmp, path);
3603 }
3604
3605 if (k >= retry) {
3606 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3607 fmt: "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
3608 k, path);
3609
3610 _tssi_hw_tx(rtwdev, phy, path, cnt: 100, period: 5000, pwr_dbm: power[j], enable: false, chan);
3611 return false;
3612 }
3613
3614 tssi_cw_rpt[j] =
3615 rtw89_phy_read32_mask(rtwdev, addr: _tssi_cw_rpt_addr[path],
3616 B_TSSI_CWRPT);
3617
3618 _tssi_hw_tx(rtwdev, phy, path, cnt: 100, period: 5000, pwr_dbm: power[j], enable: false, chan);
3619
3620 tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3621 tx_counter_tmp -= tx_counter;
3622
3623 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3624 fmt: "[TSSI PA K] Final HWTXcounter=%d path=%d\n",
3625 tx_counter_tmp, path);
3626 }
3627
3628 return true;
3629}
3630
3631static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3632 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3633{
3634 static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
3635 0x78e4, 0x49c0, 0x0d18, 0x0d80};
3636 static const s16 power_2g[4] = {48, 20, 4, -8};
3637 static const s16 power_5g[4] = {48, 20, 4, 4};
3638 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3639 s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
3640 u32 tssi_cw_rpt[RTW8852BT_TSSI_PATH_NR] = {};
3641 u8 channel = chan->channel;
3642 u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
3643 struct rtw8852bx_bb_tssi_bak tssi_bak;
3644 s32 aliment_diff, tssi_cw_default;
3645 u32 bb_reg_backup[8] = {};
3646 ktime_t start_time;
3647 const s16 *power;
3648 s64 this_time;
3649 u8 band;
3650 bool ok;
3651 u32 tmp;
3652 u8 j;
3653
3654 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3655 fmt: "======> %s channel=%d path=%d\n", __func__, channel,
3656 path);
3657
3658 start_time = ktime_get();
3659
3660 if (chan->band_type == RTW89_BAND_2G)
3661 power = power_2g;
3662 else
3663 power = power_5g;
3664
3665 if (channel >= 1 && channel <= 14)
3666 band = TSSI_ALIMK_2G;
3667 else if (channel >= 36 && channel <= 64)
3668 band = TSSI_ALIMK_5GL;
3669 else if (channel >= 100 && channel <= 144)
3670 band = TSSI_ALIMK_5GM;
3671 else if (channel >= 149 && channel <= 177)
3672 band = TSSI_ALIMK_5GH;
3673 else
3674 band = TSSI_ALIMK_2G;
3675
3676 rtw8852bx_bb_backup_tssi(rtwdev, idx: phy, bak: &tssi_bak);
3677 _tssi_backup_bb_registers(rtwdev, phy, reg: bb_reg, reg_backup: bb_reg_backup,
3678 ARRAY_SIZE(bb_reg_backup));
3679
3680 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, data: 0x8);
3681 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, data: 0x8);
3682 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, data: 0x2);
3683 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, data: 0x2);
3684
3685 ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan);
3686 if (!ok)
3687 goto out;
3688
3689 for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) {
3690 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3691 fmt: "[TSSI PA K] power[%d]=%d tssi_cw_rpt[%d]=%d\n", j,
3692 power[j], j, tssi_cw_rpt[j]);
3693 }
3694
3695 tmp = rtw89_phy_read32_mask(rtwdev, addr: _tssi_cw_default_addr[path][1],
3696 mask: _tssi_cw_default_mask[1]);
3697 tssi_cw_default = sign_extend32(value: tmp, index: 8);
3698 tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) -
3699 tssi_cw_rpt[1] + tssi_cw_default;
3700 aliment_diff = tssi_alim_offset_1 - tssi_cw_default;
3701
3702 tmp = rtw89_phy_read32_mask(rtwdev, addr: _tssi_cw_default_addr[path][2],
3703 mask: _tssi_cw_default_mask[2]);
3704 tssi_cw_default = sign_extend32(value: tmp, index: 8);
3705 tssi_alim_offset_2 = tssi_cw_default + aliment_diff;
3706
3707 tmp = rtw89_phy_read32_mask(rtwdev, addr: _tssi_cw_default_addr[path][3],
3708 mask: _tssi_cw_default_mask[3]);
3709 tssi_cw_default = sign_extend32(value: tmp, index: 8);
3710 tssi_alim_offset_3 = tssi_cw_default + aliment_diff;
3711
3712 if (path == RF_PATH_A) {
3713 tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3714 FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3715 FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3716
3717 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, data: tmp);
3718 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, data: tmp);
3719
3720 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3721 fmt: "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
3722 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31),
3723 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11),
3724 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12),
3725 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13));
3726 } else {
3727 tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3728 FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3729 FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3730
3731 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, data: tmp);
3732 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, data: tmp);
3733
3734 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3735 fmt: "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
3736 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31),
3737 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11),
3738 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12),
3739 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13));
3740 }
3741
3742 tssi_info->alignment_done[path][band] = true;
3743 tssi_info->alignment_value[path][band][0] =
3744 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3745 tssi_info->alignment_value[path][band][1] =
3746 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3747 tssi_info->alignment_value[path][band][2] =
3748 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3749 tssi_info->alignment_value[path][band][3] =
3750 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3751
3752 tssi_info->check_backup_aligmk[path][ch_idx] = true;
3753 tssi_info->alignment_backup_by_ch[path][ch_idx][0] =
3754 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3755 tssi_info->alignment_backup_by_ch[path][ch_idx][1] =
3756 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3757 tssi_info->alignment_backup_by_ch[path][ch_idx][2] =
3758 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3759 tssi_info->alignment_backup_by_ch[path][ch_idx][3] =
3760 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3761
3762 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3763 fmt: "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n",
3764 path, band, R_P0_TSSI_ALIM1 + (path << 13),
3765 tssi_info->alignment_value[path][band][0]);
3766 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3767 fmt: "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n",
3768 path, band, R_P0_TSSI_ALIM3 + (path << 13),
3769 tssi_info->alignment_value[path][band][1]);
3770 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3771 fmt: "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n",
3772 path, band, R_P0_TSSI_ALIM2 + (path << 13),
3773 tssi_info->alignment_value[path][band][2]);
3774 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3775 fmt: "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n",
3776 path, band, R_P0_TSSI_ALIM4 + (path << 13),
3777 tssi_info->alignment_value[path][band][3]);
3778
3779out:
3780 _tssi_reload_bb_registers(rtwdev, phy, reg: bb_reg, reg_backup: bb_reg_backup,
3781 ARRAY_SIZE(bb_reg_backup));
3782 rtw8852bx_bb_restore_tssi(rtwdev, idx: phy, bak: &tssi_bak);
3783 rtw8852bx_bb_tx_mode_switch(rtwdev, idx: phy, mode: 0);
3784
3785 this_time = ktime_us_delta(later: ktime_get(), earlier: start_time);
3786 tssi_info->tssi_alimk_time += this_time;
3787
3788 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3789 fmt: "[TSSI PA K] %s processing time = %lld us (acc = %llu us)\n",
3790 __func__, this_time, tssi_info->tssi_alimk_time);
3791}
3792
3793void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev)
3794{
3795 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
3796
3797 u8 path;
3798
3799 for (path = 0; path < 2; path++) {
3800 dpk->cur_idx[path] = 0;
3801 dpk->max_dpk_txagc[path] = 0x3F;
3802 }
3803
3804 dpk->is_dpk_enable = true;
3805 dpk->is_dpk_reload_en = false;
3806 _set_dpd_backoff(rtwdev, phy: RTW89_PHY_0);
3807}
3808
3809void rtw8852bt_rck(struct rtw89_dev *rtwdev)
3810{
3811 u8 path;
3812
3813 for (path = 0; path < RF_PATH_NUM_8852BT; path++)
3814 _rck(rtwdev, path);
3815}
3816
3817void rtw8852bt_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
3818{
3819 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx: RTW89_PHY_0, paths: 0, chanctx_idx);
3820
3821 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_DACK, state: BTC_WRFK_START);
3822 _dac_cal(rtwdev, force: false);
3823 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_DACK, state: BTC_WRFK_STOP);
3824}
3825
3826void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3827 enum rtw89_chanctx_idx chanctx_idx)
3828{
3829 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, paths: 0, chanctx_idx);
3830 u32 tx_en;
3831
3832 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_IQK, state: BTC_WRFK_START);
3833 rtw89_chip_stop_sch_tx(rtwdev, mac_idx: phy_idx, tx_en: &tx_en, sel: RTW89_SCH_TX_SEL_ALL);
3834 _wait_rx_mode(rtwdev, kpath: _kpath(rtwdev, phy_idx));
3835
3836 _iqk_init(rtwdev);
3837 _iqk(rtwdev, phy_idx, force: false, chanctx_idx);
3838
3839 rtw89_chip_resume_sch_tx(rtwdev, mac_idx: phy_idx, tx_en);
3840 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_IQK, state: BTC_WRFK_STOP);
3841}
3842
3843void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3844 enum rtw89_chanctx_idx chanctx_idx)
3845{
3846 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, paths: 0, chanctx_idx);
3847 u32 tx_en;
3848
3849 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_RXDCK, state: BTC_WRFK_START);
3850 rtw89_chip_stop_sch_tx(rtwdev, mac_idx: phy_idx, tx_en: &tx_en, sel: RTW89_SCH_TX_SEL_ALL);
3851 _wait_rx_mode(rtwdev, kpath: _kpath(rtwdev, phy_idx));
3852
3853 _rx_dck(rtwdev, phy: phy_idx);
3854
3855 rtw89_chip_resume_sch_tx(rtwdev, mac_idx: phy_idx, tx_en);
3856 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_RXDCK, state: BTC_WRFK_STOP);
3857}
3858
3859void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3860 enum rtw89_chanctx_idx chanctx_idx)
3861{
3862 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3863 fmt: "[DPK] ****** DPK Start (Ver: 0x%x) ******\n", RTW8852BT_DPK_VER);
3864
3865 if (_dpk_bypass_check(rtwdev, phy: phy_idx, chanctx_idx))
3866 _dpk_force_bypass(rtwdev, phy: phy_idx);
3867 else
3868 _dpk_cal_select(rtwdev, phy: phy_idx, kpath: RF_AB, chanctx_idx);
3869}
3870
3871void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
3872{
3873 _dpk_track(rtwdev);
3874}
3875
3876void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3877 bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
3878{
3879 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: chanctx_idx);
3880 static const u32 reg[2] = {R_DPD_CH0A, R_DPD_CH0B};
3881 u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx: phy, paths: RF_AB, chanctx_idx);
3882 u32 reg_backup[2] = {};
3883 u32 tx_en;
3884 u8 i;
3885
3886 _tssi_backup_bb_registers(rtwdev, phy, reg, reg_backup, reg_num: 2);
3887 rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, fmt: "[TSSI] %s: phy=%d\n", __func__, phy);
3888 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_IQK, state: BTC_WRFK_ONESHOT_START);
3889
3890 _tssi_dpk_off(rtwdev, phy);
3891 _tssi_disable(rtwdev, phy);
3892
3893 for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
3894 _tssi_rf_setting(rtwdev, phy, path: i, chan);
3895 _tssi_set_sys(rtwdev, phy, path: i, chan);
3896 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, path: i);
3897 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, path: i);
3898 _tssi_set_dck(rtwdev, phy, path: i);
3899 _tssi_set_tmeter_tbl(rtwdev, phy, path: i, chan);
3900 _tssi_set_dac_gain_tbl(rtwdev, phy, path: i);
3901 _tssi_slope_cal_org(rtwdev, phy, path: i, chan);
3902 _tssi_alignment_default(rtwdev, phy, path: i, all: true, chan);
3903 _tssi_set_tssi_slope(rtwdev, phy, path: i);
3904
3905 rtw89_chip_stop_sch_tx(rtwdev, mac_idx: phy, tx_en: &tx_en, sel: RTW89_SCH_TX_SEL_ALL);
3906 _tmac_tx_pause(rtwdev, band_idx: phy, is_pause: true);
3907 if (hwtx_en)
3908 _tssi_alimentk(rtwdev, phy, path: i, chan);
3909 _tmac_tx_pause(rtwdev, band_idx: phy, is_pause: false);
3910 rtw89_chip_resume_sch_tx(rtwdev, mac_idx: phy, tx_en);
3911 }
3912
3913 _tssi_enable(rtwdev, phy);
3914 _tssi_set_efuse_to_de(rtwdev, phy, chan);
3915
3916 _tssi_reload_bb_registers(rtwdev, phy, reg, reg_backup, reg_num: 2);
3917
3918 rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_IQK, state: BTC_WRFK_ONESHOT_STOP);
3919}
3920
3921void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3922 const struct rtw89_chan *chan)
3923{
3924 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3925 u8 channel = chan->channel;
3926 u8 band;
3927 u32 i;
3928
3929 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3930 fmt: "======>%s phy=%d channel=%d\n", __func__, phy, channel);
3931
3932 if (channel >= 1 && channel <= 14)
3933 band = TSSI_ALIMK_2G;
3934 else if (channel >= 36 && channel <= 64)
3935 band = TSSI_ALIMK_5GL;
3936 else if (channel >= 100 && channel <= 144)
3937 band = TSSI_ALIMK_5GM;
3938 else if (channel >= 149 && channel <= 177)
3939 band = TSSI_ALIMK_5GH;
3940 else
3941 band = TSSI_ALIMK_2G;
3942
3943 _tssi_disable(rtwdev, phy);
3944
3945 for (i = RF_PATH_A; i < RTW8852BT_TSSI_PATH_NR; i++) {
3946 _tssi_rf_setting(rtwdev, phy, path: i, chan);
3947 _tssi_set_sys(rtwdev, phy, path: i, chan);
3948 _tssi_set_tmeter_tbl(rtwdev, phy, path: i, chan);
3949
3950 if (tssi_info->alignment_done[i][band])
3951 _tssi_alimentk_done(rtwdev, phy, path: i, chan);
3952 else
3953 _tssi_alignment_default(rtwdev, phy, path: i, all: true, chan);
3954 }
3955
3956 _tssi_enable(rtwdev, phy);
3957 _tssi_set_efuse_to_de(rtwdev, phy, chan);
3958}
3959
3960static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
3961 enum rtw89_phy_idx phy, bool enable,
3962 enum rtw89_chanctx_idx chanctx_idx)
3963{
3964 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: chanctx_idx);
3965 u8 channel = chan->channel;
3966
3967 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "======> %s ch=%d\n",
3968 __func__, channel);
3969
3970 if (enable)
3971 return;
3972
3973 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3974 fmt: "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3975 __func__,
3976 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
3977 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
3978
3979 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, data: 0xc0);
3980 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, data: 0xc0);
3981 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, data: 0x0);
3982 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, data: 0x1);
3983 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, data: 0x0);
3984 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, data: 0x1);
3985
3986 _tssi_alimentk_done(rtwdev, phy, path: RF_PATH_A, chan);
3987 _tssi_alimentk_done(rtwdev, phy, path: RF_PATH_B, chan);
3988
3989 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3990 fmt: "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3991 __func__,
3992 rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
3993 rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
3994
3995 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
3996 fmt: "======> %s SCAN_END\n", __func__);
3997}
3998
3999void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
4000 enum rtw89_phy_idx phy_idx,
4001 enum rtw89_chanctx_idx chanctx_idx)
4002{
4003 if (scan_start)
4004 rtw8852bt_tssi_default_txagc(rtwdev, phy: phy_idx, enable: true, chanctx_idx);
4005 else
4006 rtw8852bt_tssi_default_txagc(rtwdev, phy: phy_idx, enable: false, chanctx_idx);
4007}
4008
4009static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
4010 enum rtw89_bandwidth bw, bool dav)
4011{
4012 u32 rf_reg18;
4013 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
4014
4015 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RFK]===> %s\n", __func__);
4016
4017 rf_reg18 = rtw89_read_rf(rtwdev, rf_path: path, addr: reg18_addr, RFREG_MASK);
4018 if (rf_reg18 == INV_RF_DATA) {
4019 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
4020 fmt: "[RFK]Invalid RF_0x18 for Path-%d\n", path);
4021 return;
4022 }
4023 rf_reg18 &= ~RR_CFGCH_BW;
4024
4025 switch (bw) {
4026 case RTW89_CHANNEL_WIDTH_5:
4027 case RTW89_CHANNEL_WIDTH_10:
4028 case RTW89_CHANNEL_WIDTH_20:
4029 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
4030 break;
4031 case RTW89_CHANNEL_WIDTH_40:
4032 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
4033 break;
4034 case RTW89_CHANNEL_WIDTH_80:
4035 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
4036 break;
4037 default:
4038 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RFK]Fail to set CH\n");
4039 }
4040
4041 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
4042 RR_CFGCH_BW2) & RFREG_MASK;
4043 rf_reg18 |= RR_CFGCH_BW2;
4044 rtw89_write_rf(rtwdev, rf_path: path, addr: reg18_addr, RFREG_MASK, data: rf_reg18);
4045
4046 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RFK] set %x at path%d, %x =0x%x\n",
4047 bw, path, reg18_addr,
4048 rtw89_read_rf(rtwdev, rf_path: path, addr: reg18_addr, RFREG_MASK));
4049}
4050
4051static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4052 enum rtw89_bandwidth bw)
4053{
4054 _bw_setting(rtwdev, path: RF_PATH_A, bw, dav: true);
4055 _bw_setting(rtwdev, path: RF_PATH_B, bw, dav: true);
4056 _bw_setting(rtwdev, path: RF_PATH_A, bw, dav: false);
4057 _bw_setting(rtwdev, path: RF_PATH_B, bw, dav: false);
4058}
4059
4060static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val)
4061{
4062 u32 tmp;
4063 int ret;
4064
4065 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, data: 0x1);
4066 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_CFGCH, RFREG_MASK, data: val);
4067
4068 ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000,
4069 false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY);
4070 if (ret)
4071 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[LCK]LCK timeout\n");
4072
4073 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, data: 0x0);
4074 return !!ret;
4075}
4076
4077static void _lck_check(struct rtw89_dev *rtwdev)
4078{
4079 u32 tmp;
4080
4081 if (rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4082 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[LCK]SYN MMD reset\n");
4083
4084 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MMD, RR_MMD_RST_EN, data: 0x1);
4085 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, data: 0x0);
4086 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, data: 0x1);
4087 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MMD, RR_MMD_RST_EN, data: 0x0);
4088 }
4089
4090 udelay(usec: 10);
4091
4092 if (rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4093 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[LCK]re-set RF 0x18\n");
4094
4095 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, data: 0x1);
4096 tmp = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_CFGCH, RFREG_MASK);
4097 _set_s0_arfc18(rtwdev, val: tmp);
4098 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, data: 0x0);
4099 }
4100
4101 if (rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4102 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[LCK]SYN off/on\n");
4103
4104 tmp = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_POW, RFREG_MASK);
4105 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_POW, RFREG_MASK, data: tmp);
4106 tmp = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_SX, RFREG_MASK);
4107 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_SX, RFREG_MASK, data: tmp);
4108
4109 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, data: 0x1);
4110 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_POW, RR_POW_SYN, data: 0x0);
4111 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_POW, RR_POW_SYN, data: 0x3);
4112 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, data: 0x0);
4113
4114 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, data: 0x1);
4115 tmp = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_CFGCH, RFREG_MASK);
4116 _set_s0_arfc18(rtwdev, val: tmp);
4117 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, data: 0x0);
4118
4119 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[LCK]0xb2=%x, 0xc5=%x\n",
4120 rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_VCO, RFREG_MASK),
4121 rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_SYNFB, RFREG_MASK));
4122 }
4123}
4124
4125static void _set_ch(struct rtw89_dev *rtwdev, u32 val)
4126{
4127 bool timeout;
4128 u32 bak;
4129
4130 bak = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_LDO, RFREG_MASK);
4131 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LDO, RR_LDO_SEL, data: 0x1);
4132 timeout = _set_s0_arfc18(rtwdev, val);
4133 rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LDO, RFREG_MASK, data: bak);
4134 if (!timeout)
4135 _lck_check(rtwdev);
4136}
4137
4138static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
4139 u8 central_ch, bool dav)
4140{
4141 u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
4142 bool is_2g_ch = central_ch <= 14;
4143 u32 rf_reg18;
4144
4145 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RFK]===> %s\n", __func__);
4146
4147 rf_reg18 = rtw89_read_rf(rtwdev, rf_path: path, addr: reg18_addr, RFREG_MASK);
4148 rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH |
4149 RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH);
4150 rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
4151
4152 if (!is_2g_ch)
4153 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) |
4154 FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
4155
4156 rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
4157 RR_CFGCH_BW2) & RFREG_MASK;
4158 rf_reg18 |= RR_CFGCH_BW2;
4159
4160 if (path == RF_PATH_A && dav)
4161 _set_ch(rtwdev, val: rf_reg18);
4162 else
4163 rtw89_write_rf(rtwdev, rf_path: path, addr: reg18_addr, RFREG_MASK, data: rf_reg18);
4164
4165 rtw89_write_rf(rtwdev, rf_path: path, RR_LCKST, RR_LCKST_BIN, data: 0);
4166 rtw89_write_rf(rtwdev, rf_path: path, RR_LCKST, RR_LCKST_BIN, data: 1);
4167
4168 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK,
4169 fmt: "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n",
4170 central_ch, path, reg18_addr,
4171 rtw89_read_rf(rtwdev, rf_path: path, addr: reg18_addr, RFREG_MASK));
4172}
4173
4174static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch)
4175{
4176 _ch_setting(rtwdev, path: RF_PATH_A, central_ch, dav: true);
4177 _ch_setting(rtwdev, path: RF_PATH_B, central_ch, dav: true);
4178 _ch_setting(rtwdev, path: RF_PATH_A, central_ch, dav: false);
4179 _ch_setting(rtwdev, path: RF_PATH_B, central_ch, dav: false);
4180}
4181
4182static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
4183 enum rtw89_rf_path path)
4184{
4185 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWE2, RR_LUTWE2_RTXBW, data: 0x1);
4186 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWA, RR_LUTWA_M2, data: 0x12);
4187
4188 if (bw == RTW89_CHANNEL_WIDTH_20)
4189 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWD0, RR_LUTWD0_LB, data: 0x1b);
4190 else if (bw == RTW89_CHANNEL_WIDTH_40)
4191 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWD0, RR_LUTWD0_LB, data: 0x13);
4192 else if (bw == RTW89_CHANNEL_WIDTH_80)
4193 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWD0, RR_LUTWD0_LB, data: 0xb);
4194 else
4195 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWD0, RR_LUTWD0_LB, data: 0x3);
4196
4197 rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RFK] set S%d RXBB BW 0x3F = 0x%x\n",
4198 path, rtw89_read_rf(rtwdev, rf_path: path, RR_LUTWD0, RR_LUTWD0_LB));
4199
4200 rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWE2, RR_LUTWE2_RTXBW, data: 0x0);
4201}
4202
4203static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4204 enum rtw89_bandwidth bw)
4205{
4206 u8 kpath, path;
4207
4208 kpath = _kpath(rtwdev, phy_idx: phy);
4209
4210 for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
4211 if (!(kpath & BIT(path)))
4212 continue;
4213
4214 _set_rxbb_bw(rtwdev, bw, path);
4215 }
4216}
4217
4218static void rtw8852bt_ctrl_bw_ch(struct rtw89_dev *rtwdev,
4219 enum rtw89_phy_idx phy, u8 central_ch,
4220 enum rtw89_band band, enum rtw89_bandwidth bw)
4221{
4222 _ctrl_ch(rtwdev, central_ch);
4223 _ctrl_bw(rtwdev, phy, bw);
4224 _rxbb_bw(rtwdev, phy, bw);
4225}
4226
4227void rtw8852bt_set_channel_rf(struct rtw89_dev *rtwdev,
4228 const struct rtw89_chan *chan,
4229 enum rtw89_phy_idx phy_idx)
4230{
4231 rtw8852bt_ctrl_bw_ch(rtwdev, phy: phy_idx, central_ch: chan->channel, band: chan->band_type,
4232 bw: chan->band_width);
4233}
4234
4235void rtw8852bt_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
4236{
4237 const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, 0);
4238 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
4239 struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V0] = {};
4240 u8 idx;
4241
4242 for (idx = 0; idx < ARRAY_SIZE(desc); idx++) {
4243 struct rtw89_rfk_chan_desc *p = &desc[idx];
4244
4245 p->ch = rfk_mcc->ch[idx];
4246
4247 p->has_band = true;
4248 p->band = rfk_mcc->band[idx];
4249 }
4250
4251 idx = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), target_chan: chan);
4252
4253 rfk_mcc->ch[idx] = chan->channel;
4254 rfk_mcc->band[idx] = chan->band_type;
4255 rfk_mcc->table_idx = idx;
4256}
4257
4258void rtw8852bt_rfk_chanctx_cb(struct rtw89_dev *rtwdev,
4259 enum rtw89_chanctx_state state)
4260{
4261 struct rtw89_dpk_info *dpk = &rtwdev->dpk;
4262 u8 path;
4263
4264 switch (state) {
4265 case RTW89_CHANCTX_STATE_MCC_START:
4266 dpk->is_dpk_enable = false;
4267 for (path = 0; path < RTW8852BT_SS; path++)
4268 _dpk_onoff(rtwdev, path, off: false);
4269 break;
4270 case RTW89_CHANCTX_STATE_MCC_STOP:
4271 dpk->is_dpk_enable = true;
4272 for (path = 0; path < RTW8852BT_SS; path++)
4273 _dpk_onoff(rtwdev, path, off: false);
4274 rtw8852bt_dpk(rtwdev, phy_idx: RTW89_PHY_0, chanctx_idx: RTW89_CHANCTX_0);
4275 break;
4276 default:
4277 break;
4278 }
4279}
4280

source code of linux/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c