1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __MLX5_CORE_H__
34#define __MLX5_CORE_H__
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/sched.h>
39#include <linux/if_link.h>
40#include <linux/firmware.h>
41#include <linux/mlx5/cq.h>
42#include <linux/mlx5/fs.h>
43#include <linux/mlx5/driver.h>
44#include "lib/devcom.h"
45
46extern uint mlx5_core_debug_mask;
47
48#define mlx5_core_dbg(__dev, format, ...) \
49 dev_dbg((__dev)->device, "%s:%d:(pid %d): " format, \
50 __func__, __LINE__, current->pid, \
51 ##__VA_ARGS__)
52
53#define mlx5_core_dbg_once(__dev, format, ...) \
54 dev_dbg_once((__dev)->device, \
55 "%s:%d:(pid %d): " format, \
56 __func__, __LINE__, current->pid, \
57 ##__VA_ARGS__)
58
59#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
60do { \
61 if ((mask) & mlx5_core_debug_mask) \
62 mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
63} while (0)
64
65#define mlx5_core_err(__dev, format, ...) \
66 dev_err((__dev)->device, "%s:%d:(pid %d): " format, \
67 __func__, __LINE__, current->pid, \
68 ##__VA_ARGS__)
69
70#define mlx5_core_err_rl(__dev, format, ...) \
71 dev_err_ratelimited((__dev)->device, \
72 "%s:%d:(pid %d): " format, \
73 __func__, __LINE__, current->pid, \
74 ##__VA_ARGS__)
75
76#define mlx5_core_warn(__dev, format, ...) \
77 dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \
78 __func__, __LINE__, current->pid, \
79 ##__VA_ARGS__)
80
81#define mlx5_core_warn_once(__dev, format, ...) \
82 dev_warn_once((__dev)->device, "%s:%d:(pid %d): " format, \
83 __func__, __LINE__, current->pid, \
84 ##__VA_ARGS__)
85
86#define mlx5_core_warn_rl(__dev, format, ...) \
87 dev_warn_ratelimited((__dev)->device, \
88 "%s:%d:(pid %d): " format, \
89 __func__, __LINE__, current->pid, \
90 ##__VA_ARGS__)
91
92#define mlx5_core_info(__dev, format, ...) \
93 dev_info((__dev)->device, format, ##__VA_ARGS__)
94
95#define mlx5_core_info_rl(__dev, format, ...) \
96 dev_info_ratelimited((__dev)->device, \
97 "%s:%d:(pid %d): " format, \
98 __func__, __LINE__, current->pid, \
99 ##__VA_ARGS__)
100
101#define ACCESS_KEY_LEN 32
102#define FT_ID_FT_TYPE_OFFSET 24
103
104struct mlx5_cmd_allow_other_vhca_access_attr {
105 u16 obj_type;
106 u32 obj_id;
107 u8 access_key[ACCESS_KEY_LEN];
108};
109
110struct mlx5_cmd_alias_obj_create_attr {
111 u32 obj_id;
112 u16 vhca_id;
113 u16 obj_type;
114 u8 access_key[ACCESS_KEY_LEN];
115};
116
117struct mlx5_port_eth_proto {
118 u32 cap;
119 u32 admin;
120 u32 oper;
121};
122
123struct mlx5_module_eeprom_query_params {
124 u16 size;
125 u16 offset;
126 u16 i2c_address;
127 u32 page;
128 u32 bank;
129 u32 module_number;
130};
131
132struct mlx5_link_info {
133 u32 speed;
134 u32 lanes;
135};
136
137static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
138{
139 struct device *device = dev->device;
140 struct va_format vaf;
141 va_list args;
142
143 if (WARN_ONCE(level < LOGLEVEL_EMERG || level > LOGLEVEL_DEBUG,
144 "Level %d is out of range, set to default level\n", level))
145 level = LOGLEVEL_DEFAULT;
146
147 va_start(args, format);
148 vaf.fmt = format;
149 vaf.va = &args;
150
151 dev_printk_emit(level, dev: device, fmt: "%s %s: %pV", dev_driver_string(dev: device), dev_name(dev: device),
152 &vaf);
153 va_end(args);
154}
155
156#define mlx5_log(__dev, level, format, ...) \
157 mlx5_printk(__dev, level, "%s:%d:(pid %d): " format, \
158 __func__, __LINE__, current->pid, \
159 ##__VA_ARGS__)
160
161static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev)
162{
163 return &dev->pdev->dev;
164}
165
166enum {
167 MLX5_CMD_DATA, /* print command payload only */
168 MLX5_CMD_TIME, /* print command execution time */
169};
170
171enum {
172 MLX5_DRIVER_STATUS_ABORTED = 0xfe,
173 MLX5_DRIVER_SYND = 0xbadd00de,
174};
175
176enum mlx5_semaphore_space_address {
177 MLX5_SEMAPHORE_SPACE_DOMAIN = 0xA,
178 MLX5_SEMAPHORE_SW_RESET = 0x20,
179};
180
181#define MLX5_DEFAULT_PROF 2
182#define MLX5_SF_PROF 3
183#define MLX5_NUM_FW_CMD_THREADS 8
184#define MLX5_DEV_MAX_WQS MLX5_NUM_FW_CMD_THREADS
185
186static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
187 size_t item_size, size_t num_items,
188 const char *func, int line)
189{
190 int inlen;
191
192 if (fixed > INT_MAX || item_size > INT_MAX || num_items > INT_MAX) {
193 mlx5_core_err(dev, "%s: %s:%d: input values too big: %zu + %zu * %zu\n",
194 __func__, func, line, fixed, item_size, num_items);
195 return -ENOMEM;
196 }
197
198 if (check_mul_overflow((int)item_size, (int)num_items, &inlen)) {
199 mlx5_core_err(dev, "%s: %s:%d: multiplication overflow: %zu + %zu * %zu\n",
200 __func__, func, line, fixed, item_size, num_items);
201 return -ENOMEM;
202 }
203
204 if (check_add_overflow((int)fixed, inlen, &inlen)) {
205 mlx5_core_err(dev, "%s: %s:%d: addition overflow: %zu + %zu * %zu\n",
206 __func__, func, line, fixed, item_size, num_items);
207 return -ENOMEM;
208 }
209
210 return inlen;
211}
212
213#define MLX5_FLEXIBLE_INLEN(dev, fixed, item_size, num_items) \
214 mlx5_flexible_inlen(dev, fixed, item_size, num_items, __func__, __LINE__)
215
216int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
217int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
218 enum mlx5_cap_mode cap_mode);
219int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
220int mlx5_query_board_id(struct mlx5_core_dev *dev);
221int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num);
222int mlx5_cmd_init(struct mlx5_core_dev *dev);
223void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
224int mlx5_cmd_enable(struct mlx5_core_dev *dev);
225void mlx5_cmd_disable(struct mlx5_core_dev *dev);
226void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
227 enum mlx5_cmdif_state cmdif_state);
228int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id);
229int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
230int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
231int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
232void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
233void mlx5_error_sw_reset(struct mlx5_core_dev *dev);
234u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev);
235int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev);
236void mlx5_disable_device(struct mlx5_core_dev *dev);
237int mlx5_recover_device(struct mlx5_core_dev *dev);
238int mlx5_sriov_init(struct mlx5_core_dev *dev);
239void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
240int mlx5_sriov_attach(struct mlx5_core_dev *dev);
241void mlx5_sriov_detach(struct mlx5_core_dev *dev);
242int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
243void mlx5_sriov_disable(struct pci_dev *pdev, bool num_vf_change);
244int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count);
245int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
246int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
247bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy);
248bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy);
249int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
250 void *context, u32 *element_id);
251int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
252 void *context, u32 element_id,
253 u32 modify_bitmask);
254int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
255 u32 element_id);
256int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
257
258void mlx5_cmd_flush(struct mlx5_core_dev *dev);
259void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
260void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
261void mlx5_vhca_debugfs_init(struct mlx5_core_dev *dev);
262
263int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
264 u8 access_reg_group);
265int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
266 u8 access_reg_group);
267int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
268 u8 feature_group, u8 access_reg_group);
269int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir);
270
271void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
272void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
273void mlx5_lag_add_mdev(struct mlx5_core_dev *dev);
274void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev);
275void mlx5_lag_disable_change(struct mlx5_core_dev *dev);
276void mlx5_lag_enable_change(struct mlx5_core_dev *dev);
277
278int mlx5_events_init(struct mlx5_core_dev *dev);
279void mlx5_events_cleanup(struct mlx5_core_dev *dev);
280void mlx5_events_start(struct mlx5_core_dev *dev);
281void mlx5_events_stop(struct mlx5_core_dev *dev);
282
283int mlx5_adev_idx_alloc(void);
284void mlx5_adev_idx_free(int idx);
285void mlx5_adev_cleanup(struct mlx5_core_dev *dev);
286int mlx5_adev_init(struct mlx5_core_dev *dev);
287
288int mlx5_attach_device(struct mlx5_core_dev *dev);
289void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend);
290int mlx5_register_device(struct mlx5_core_dev *dev);
291void mlx5_unregister_device(struct mlx5_core_dev *dev);
292void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev);
293bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev);
294void mlx5_core_reps_aux_devs_remove(struct mlx5_core_dev *dev);
295
296void mlx5_fw_reporters_create(struct mlx5_core_dev *dev);
297int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
298int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
299int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
300int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
301
302struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
303void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
304
305#ifdef CONFIG_PCIE_TPH
306struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev);
307void mlx5_st_destroy(struct mlx5_core_dev *dev);
308#else
309static inline struct mlx5_st *
310mlx5_st_create(struct mlx5_core_dev *dev) { return NULL; }
311static inline void mlx5_st_destroy(struct mlx5_core_dev *dev) { return; }
312#endif
313
314void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
315int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
316 enum mlx5_port_status status);
317int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
318 enum mlx5_port_status *status);
319int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
320
321int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
322int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
323int mlx5_query_port_pause(struct mlx5_core_dev *dev,
324 u32 *rx_pause, u32 *tx_pause);
325
326int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
327int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
328 u8 *pfc_en_rx);
329
330int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
331 u16 stall_critical_watermark,
332 u16 stall_minor_watermark);
333int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
334 u16 *stall_critical_watermark,
335 u16 *stall_minor_watermark);
336
337int mlx5_max_tc(struct mlx5_core_dev *mdev);
338int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
339int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
340 u8 prio, u8 *tc);
341int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
342int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
343 u8 tc, u8 *tc_group);
344int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
345int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
346 u8 tc, u8 *bw_pct);
347int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
348 u8 *max_bw_value,
349 u8 *max_bw_unit);
350int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
351 u8 *max_bw_value,
352 u8 *max_bw_unit);
353int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
354int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
355
356int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
357int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
358int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
359void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
360 bool *enabled);
361int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
362 u16 offset, u16 size, u8 *data, u8 *status);
363int
364mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
365 struct mlx5_module_eeprom_query_params *params,
366 u8 *data, u8 *status);
367
368int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
369int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
370int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
371int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
372int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
373 u8 *buffer_ownership);
374int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
375int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
376
377int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
378 struct mlx5_port_eth_proto *eproto);
379bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
380const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
381 u32 eth_proto_oper,
382 bool force_legacy);
383u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
384 struct mlx5_link_info *info,
385 bool force_legacy);
386int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
387
388#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
389 MLX5_CAP_GEN((mdev), pps_modify) && \
390 MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
391 MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
392
393int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
394 struct netlink_ext_ack *extack);
395int mlx5_fw_version_query(struct mlx5_core_dev *dev,
396 u32 *running_ver, u32 *stored_ver);
397
398#ifdef CONFIG_MLX5_CORE_EN
399int mlx5e_init(void);
400void mlx5e_cleanup(void);
401#else
402static inline int mlx5e_init(void){ return 0; }
403static inline void mlx5e_cleanup(void){}
404#endif
405
406static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
407{
408 return pci_num_vf(dev: dev->pdev) ? true : false;
409}
410
411int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev);
412static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
413{
414 int ret;
415
416 mlx5_devcom_comp_lock(devcom: dev->priv.hca_devcom_comp);
417 ret = mlx5_rescan_drivers_locked(dev);
418 mlx5_devcom_comp_unlock(devcom: dev->priv.hca_devcom_comp);
419 return ret;
420}
421
422u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
423void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
424
425static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
426{
427 return dev->coredev_type == MLX5_COREDEV_SF;
428}
429
430static inline struct auxiliary_device *
431mlx5_sf_coredev_to_adev(struct mlx5_core_dev *mdev)
432{
433 return container_of(mdev->device, struct auxiliary_device, dev);
434}
435
436int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
437void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
438int mlx5_init_one(struct mlx5_core_dev *dev);
439int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev);
440void mlx5_uninit_one(struct mlx5_core_dev *dev);
441void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend);
442void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend);
443int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
444int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
445int mlx5_init_one_light(struct mlx5_core_dev *dev);
446void mlx5_uninit_one_light(struct mlx5_core_dev *dev);
447void mlx5_unload_one_light(struct mlx5_core_dev *dev);
448
449void mlx5_query_nic_sw_system_image_guid(struct mlx5_core_dev *mdev, u8 *buf,
450 u8 *len);
451int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 vport,
452 u16 opmod);
453#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
454 mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
455
456static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
457{
458 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
459
460 return MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
461}
462
463bool mlx5_eth_supported(struct mlx5_core_dev *dev);
464bool mlx5_rdma_supported(struct mlx5_core_dev *dev);
465bool mlx5_vnet_supported(struct mlx5_core_dev *dev);
466bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev);
467int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev,
468 struct mlx5_cmd_allow_other_vhca_access_attr *attr);
469int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev,
470 struct mlx5_cmd_alias_obj_create_attr *alias_attr,
471 u32 *obj_id);
472int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, u16 obj_type);
473
474static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev)
475{
476 return MLX5_CAP_GEN_2(dev, ec_vf_vport_base);
477}
478
479static inline u16 mlx5_core_ec_sriov_enabled(const struct mlx5_core_dev *dev)
480{
481 return mlx5_core_is_ecpf(dev) && mlx5_core_ec_vf_vport_base(dev);
482}
483
484static inline bool mlx5_core_is_ec_vf_vport(const struct mlx5_core_dev *dev, u16 vport_num)
485{
486 int base_vport = mlx5_core_ec_vf_vport_base(dev);
487 int max_vport = base_vport + mlx5_core_max_ec_vfs(dev);
488
489 if (!mlx5_core_ec_sriov_enabled(dev))
490 return false;
491
492 return (vport_num >= base_vport && vport_num < max_vport);
493}
494
495static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vport, bool ec_vf_func)
496{
497 return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev) + 1
498 : vport;
499}
500
501static inline int mlx5_max_eq_cap_get(const struct mlx5_core_dev *dev)
502{
503 if (MLX5_CAP_GEN_2(dev, max_num_eqs_24b))
504 return MLX5_CAP_GEN_2(dev, max_num_eqs_24b);
505
506 if (MLX5_CAP_GEN(dev, max_num_eqs))
507 return MLX5_CAP_GEN(dev, max_num_eqs);
508
509 return 1 << MLX5_CAP_GEN(dev, log_max_eq);
510}
511
512static inline bool mlx5_pcie_cong_event_supported(struct mlx5_core_dev *dev)
513{
514 u64 features = MLX5_CAP_GEN_2_64(dev, general_obj_types_127_64);
515
516 if (!(features & MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT))
517 return false;
518
519 if (dev->sd)
520 return false;
521
522 return true;
523}
524#endif /* __MLX5_CORE_H__ */
525

source code of linux/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h