| 1 | /******************************************************************* |
| 2 | * This file is part of the Emulex Linux Device Driver for * |
| 3 | * Fibre Channel Host Bus Adapters. * |
| 4 | * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * |
| 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
| 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
| 7 | * EMULEX and SLI are trademarks of Emulex. * |
| 8 | * www.broadcom.com * |
| 9 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
| 10 | * * |
| 11 | * This program is free software; you can redistribute it and/or * |
| 12 | * modify it under the terms of version 2 of the GNU General * |
| 13 | * Public License as published by the Free Software Foundation. * |
| 14 | * This program is distributed in the hope that it will be useful. * |
| 15 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * |
| 16 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * |
| 18 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * |
| 19 | * TO BE LEGALLY INVALID. See the GNU General Public License for * |
| 20 | * more details, a copy of which can be found in the file COPYING * |
| 21 | * included with this package. * |
| 22 | *******************************************************************/ |
| 23 | |
| 24 | #include <linux/blkdev.h> |
| 25 | #include <linux/pci.h> |
| 26 | #include <linux/interrupt.h> |
| 27 | #include <linux/delay.h> |
| 28 | #include <linux/slab.h> |
| 29 | #include <linux/lockdep.h> |
| 30 | #include <linux/dmi.h> |
| 31 | #include <linux/of.h> |
| 32 | |
| 33 | #include <scsi/scsi.h> |
| 34 | #include <scsi/scsi_cmnd.h> |
| 35 | #include <scsi/scsi_device.h> |
| 36 | #include <scsi/scsi_host.h> |
| 37 | #include <scsi/scsi_transport_fc.h> |
| 38 | #include <scsi/fc/fc_fs.h> |
| 39 | #include <linux/crash_dump.h> |
| 40 | #ifdef CONFIG_X86 |
| 41 | #include <asm/set_memory.h> |
| 42 | #endif |
| 43 | |
| 44 | #include "lpfc_hw4.h" |
| 45 | #include "lpfc_hw.h" |
| 46 | #include "lpfc_sli.h" |
| 47 | #include "lpfc_sli4.h" |
| 48 | #include "lpfc_nl.h" |
| 49 | #include "lpfc_disc.h" |
| 50 | #include "lpfc.h" |
| 51 | #include "lpfc_scsi.h" |
| 52 | #include "lpfc_nvme.h" |
| 53 | #include "lpfc_crtn.h" |
| 54 | #include "lpfc_logmsg.h" |
| 55 | #include "lpfc_compat.h" |
| 56 | #include "lpfc_debugfs.h" |
| 57 | #include "lpfc_vport.h" |
| 58 | #include "lpfc_version.h" |
| 59 | |
| 60 | /* There are only four IOCB completion types. */ |
| 61 | typedef enum _lpfc_iocb_type { |
| 62 | LPFC_UNKNOWN_IOCB, |
| 63 | LPFC_UNSOL_IOCB, |
| 64 | LPFC_SOL_IOCB, |
| 65 | LPFC_ABORT_IOCB |
| 66 | } lpfc_iocb_type; |
| 67 | |
| 68 | |
| 69 | /* Provide function prototypes local to this module. */ |
| 70 | static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, |
| 71 | uint32_t); |
| 72 | static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, |
| 73 | uint8_t *, uint32_t *); |
| 74 | static struct lpfc_iocbq * |
| 75 | lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, |
| 76 | struct lpfc_iocbq *rspiocbq); |
| 77 | static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, |
| 78 | struct hbq_dmabuf *); |
| 79 | static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, |
| 80 | struct hbq_dmabuf *dmabuf); |
| 81 | static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, |
| 82 | struct lpfc_queue *cq, struct lpfc_cqe *cqe); |
| 83 | static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, |
| 84 | int); |
| 85 | static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, |
| 86 | struct lpfc_queue *eq, |
| 87 | struct lpfc_eqe *eqe, |
| 88 | enum lpfc_poll_mode poll_mode); |
| 89 | static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); |
| 90 | static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); |
| 91 | static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q); |
| 92 | static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, |
| 93 | struct lpfc_queue *cq, |
| 94 | struct lpfc_cqe *cqe); |
| 95 | static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, |
| 96 | struct lpfc_iocbq *pwqeq, |
| 97 | struct lpfc_sglq *sglq); |
| 98 | |
| 99 | union lpfc_wqe128 lpfc_iread_cmd_template; |
| 100 | union lpfc_wqe128 lpfc_iwrite_cmd_template; |
| 101 | union lpfc_wqe128 lpfc_icmnd_cmd_template; |
| 102 | |
| 103 | /* Setup WQE templates for IOs */ |
| 104 | void lpfc_wqe_cmd_template(void) |
| 105 | { |
| 106 | union lpfc_wqe128 *wqe; |
| 107 | |
| 108 | /* IREAD template */ |
| 109 | wqe = &lpfc_iread_cmd_template; |
| 110 | memset(wqe, 0, sizeof(union lpfc_wqe128)); |
| 111 | |
| 112 | /* Word 0, 1, 2 - BDE is variable */ |
| 113 | |
| 114 | /* Word 3 - cmd_buff_len, payload_offset_len is zero */ |
| 115 | |
| 116 | /* Word 4 - total_xfer_len is variable */ |
| 117 | |
| 118 | /* Word 5 - is zero */ |
| 119 | |
| 120 | /* Word 6 - ctxt_tag, xri_tag is variable */ |
| 121 | |
| 122 | /* Word 7 */ |
| 123 | bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE); |
| 124 | bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK); |
| 125 | bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3); |
| 126 | bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI); |
| 127 | |
| 128 | /* Word 8 - abort_tag is variable */ |
| 129 | |
| 130 | /* Word 9 - reqtag is variable */ |
| 131 | |
| 132 | /* Word 10 - dbde, wqes is variable */ |
| 133 | bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0); |
| 134 | bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); |
| 135 | bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4); |
| 136 | bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); |
| 137 | bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); |
| 138 | |
| 139 | /* Word 11 - pbde is variable */ |
| 140 | bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN); |
| 141 | bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
| 142 | bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); |
| 143 | |
| 144 | /* Word 12 - is zero */ |
| 145 | |
| 146 | /* Word 13, 14, 15 - PBDE is variable */ |
| 147 | |
| 148 | /* IWRITE template */ |
| 149 | wqe = &lpfc_iwrite_cmd_template; |
| 150 | memset(wqe, 0, sizeof(union lpfc_wqe128)); |
| 151 | |
| 152 | /* Word 0, 1, 2 - BDE is variable */ |
| 153 | |
| 154 | /* Word 3 - cmd_buff_len, payload_offset_len is zero */ |
| 155 | |
| 156 | /* Word 4 - total_xfer_len is variable */ |
| 157 | |
| 158 | /* Word 5 - initial_xfer_len is variable */ |
| 159 | |
| 160 | /* Word 6 - ctxt_tag, xri_tag is variable */ |
| 161 | |
| 162 | /* Word 7 */ |
| 163 | bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE); |
| 164 | bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK); |
| 165 | bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3); |
| 166 | bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI); |
| 167 | |
| 168 | /* Word 8 - abort_tag is variable */ |
| 169 | |
| 170 | /* Word 9 - reqtag is variable */ |
| 171 | |
| 172 | /* Word 10 - dbde, wqes is variable */ |
| 173 | bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0); |
| 174 | bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); |
| 175 | bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4); |
| 176 | bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); |
| 177 | bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); |
| 178 | |
| 179 | /* Word 11 - pbde is variable */ |
| 180 | bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT); |
| 181 | bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
| 182 | bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); |
| 183 | |
| 184 | /* Word 12 - is zero */ |
| 185 | |
| 186 | /* Word 13, 14, 15 - PBDE is variable */ |
| 187 | |
| 188 | /* ICMND template */ |
| 189 | wqe = &lpfc_icmnd_cmd_template; |
| 190 | memset(wqe, 0, sizeof(union lpfc_wqe128)); |
| 191 | |
| 192 | /* Word 0, 1, 2 - BDE is variable */ |
| 193 | |
| 194 | /* Word 3 - payload_offset_len is variable */ |
| 195 | |
| 196 | /* Word 4, 5 - is zero */ |
| 197 | |
| 198 | /* Word 6 - ctxt_tag, xri_tag is variable */ |
| 199 | |
| 200 | /* Word 7 */ |
| 201 | bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE); |
| 202 | bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); |
| 203 | bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3); |
| 204 | bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI); |
| 205 | |
| 206 | /* Word 8 - abort_tag is variable */ |
| 207 | |
| 208 | /* Word 9 - reqtag is variable */ |
| 209 | |
| 210 | /* Word 10 - dbde, wqes is variable */ |
| 211 | bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); |
| 212 | bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE); |
| 213 | bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE); |
| 214 | bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); |
| 215 | bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); |
| 216 | |
| 217 | /* Word 11 */ |
| 218 | bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN); |
| 219 | bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
| 220 | bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0); |
| 221 | |
| 222 | /* Word 12, 13, 14, 15 - is zero */ |
| 223 | } |
| 224 | |
| 225 | #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) |
| 226 | /** |
| 227 | * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function |
| 228 | * @srcp: Source memory pointer. |
| 229 | * @destp: Destination memory pointer. |
| 230 | * @cnt: Number of words required to be copied. |
| 231 | * Must be a multiple of sizeof(uint64_t) |
| 232 | * |
| 233 | * This function is used for copying data between driver memory |
| 234 | * and the SLI WQ. This function also changes the endianness |
| 235 | * of each word if native endianness is different from SLI |
| 236 | * endianness. This function can be called with or without |
| 237 | * lock. |
| 238 | **/ |
| 239 | static void |
| 240 | lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) |
| 241 | { |
| 242 | uint64_t *src = srcp; |
| 243 | uint64_t *dest = destp; |
| 244 | int i; |
| 245 | |
| 246 | for (i = 0; i < (int)cnt; i += sizeof(uint64_t)) |
| 247 | *dest++ = *src++; |
| 248 | } |
| 249 | #else |
| 250 | #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) |
| 251 | #endif |
| 252 | |
| 253 | /** |
| 254 | * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue |
| 255 | * @q: The Work Queue to operate on. |
| 256 | * @wqe: The work Queue Entry to put on the Work queue. |
| 257 | * |
| 258 | * This routine will copy the contents of @wqe to the next available entry on |
| 259 | * the @q. This function will then ring the Work Queue Doorbell to signal the |
| 260 | * HBA to start processing the Work Queue Entry. This function returns 0 if |
| 261 | * successful. If no entries are available on @q then this function will return |
| 262 | * -ENOMEM. |
| 263 | * The caller is expected to hold the hbalock when calling this routine. |
| 264 | **/ |
| 265 | static int |
| 266 | lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) |
| 267 | { |
| 268 | union lpfc_wqe *temp_wqe; |
| 269 | struct lpfc_register doorbell; |
| 270 | uint32_t host_index; |
| 271 | uint32_t idx; |
| 272 | uint32_t i = 0; |
| 273 | uint8_t *tmp; |
| 274 | u32 if_type; |
| 275 | |
| 276 | /* sanity check on queue memory */ |
| 277 | if (unlikely(!q)) |
| 278 | return -ENOMEM; |
| 279 | |
| 280 | temp_wqe = lpfc_sli4_qe(q, idx: q->host_index); |
| 281 | |
| 282 | /* If the host has not yet processed the next entry then we are done */ |
| 283 | idx = ((q->host_index + 1) % q->entry_count); |
| 284 | if (idx == q->hba_index) { |
| 285 | q->WQ_overflow++; |
| 286 | return -EBUSY; |
| 287 | } |
| 288 | q->WQ_posted++; |
| 289 | /* set consumption flag every once in a while */ |
| 290 | if (!((q->host_index + 1) % q->notify_interval)) |
| 291 | bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); |
| 292 | else |
| 293 | bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); |
| 294 | if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) |
| 295 | bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); |
| 296 | lpfc_sli4_pcimem_bcopy(srcp: wqe, destp: temp_wqe, cnt: q->entry_size); |
| 297 | if (q->dpp_enable && q->phba->cfg_enable_dpp) { |
| 298 | /* write to DPP aperture taking advatage of Combined Writes */ |
| 299 | tmp = (uint8_t *)temp_wqe; |
| 300 | #ifdef __raw_writeq |
| 301 | for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) |
| 302 | __raw_writeq(val: *((uint64_t *)(tmp + i)), |
| 303 | addr: q->dpp_regaddr + i); |
| 304 | #else |
| 305 | for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) |
| 306 | __raw_writel(*((uint32_t *)(tmp + i)), |
| 307 | q->dpp_regaddr + i); |
| 308 | #endif |
| 309 | } |
| 310 | /* ensure WQE bcopy and DPP flushed before doorbell write */ |
| 311 | wmb(); |
| 312 | |
| 313 | /* Update the host index before invoking device */ |
| 314 | host_index = q->host_index; |
| 315 | |
| 316 | q->host_index = idx; |
| 317 | |
| 318 | /* Ring Doorbell */ |
| 319 | doorbell.word0 = 0; |
| 320 | if (q->db_format == LPFC_DB_LIST_FORMAT) { |
| 321 | if (q->dpp_enable && q->phba->cfg_enable_dpp) { |
| 322 | bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); |
| 323 | bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); |
| 324 | bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, |
| 325 | q->dpp_id); |
| 326 | bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell, |
| 327 | q->queue_id); |
| 328 | } else { |
| 329 | bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); |
| 330 | bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); |
| 331 | |
| 332 | /* Leave bits <23:16> clear for if_type 6 dpp */ |
| 333 | if_type = bf_get(lpfc_sli_intf_if_type, |
| 334 | &q->phba->sli4_hba.sli_intf); |
| 335 | if (if_type != LPFC_SLI_INTF_IF_TYPE_6) |
| 336 | bf_set(lpfc_wq_db_list_fm_index, &doorbell, |
| 337 | host_index); |
| 338 | } |
| 339 | } else if (q->db_format == LPFC_DB_RING_FORMAT) { |
| 340 | bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); |
| 341 | bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); |
| 342 | } else { |
| 343 | return -EINVAL; |
| 344 | } |
| 345 | writel(val: doorbell.word0, addr: q->db_regaddr); |
| 346 | |
| 347 | return 0; |
| 348 | } |
| 349 | |
| 350 | /** |
| 351 | * lpfc_sli4_wq_release - Updates internal hba index for WQ |
| 352 | * @q: The Work Queue to operate on. |
| 353 | * @index: The index to advance the hba index to. |
| 354 | * |
| 355 | * This routine will update the HBA index of a queue to reflect consumption of |
| 356 | * Work Queue Entries by the HBA. When the HBA indicates that it has consumed |
| 357 | * an entry the host calls this function to update the queue's internal |
| 358 | * pointers. |
| 359 | **/ |
| 360 | static void |
| 361 | lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) |
| 362 | { |
| 363 | /* sanity check on queue memory */ |
| 364 | if (unlikely(!q)) |
| 365 | return; |
| 366 | |
| 367 | q->hba_index = index; |
| 368 | } |
| 369 | |
| 370 | /** |
| 371 | * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue |
| 372 | * @q: The Mailbox Queue to operate on. |
| 373 | * @mqe: The Mailbox Queue Entry to put on the Work queue. |
| 374 | * |
| 375 | * This routine will copy the contents of @mqe to the next available entry on |
| 376 | * the @q. This function will then ring the Work Queue Doorbell to signal the |
| 377 | * HBA to start processing the Work Queue Entry. This function returns 0 if |
| 378 | * successful. If no entries are available on @q then this function will return |
| 379 | * -ENOMEM. |
| 380 | * The caller is expected to hold the hbalock when calling this routine. |
| 381 | **/ |
| 382 | static uint32_t |
| 383 | lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) |
| 384 | { |
| 385 | struct lpfc_mqe *temp_mqe; |
| 386 | struct lpfc_register doorbell; |
| 387 | |
| 388 | /* sanity check on queue memory */ |
| 389 | if (unlikely(!q)) |
| 390 | return -ENOMEM; |
| 391 | temp_mqe = lpfc_sli4_qe(q, idx: q->host_index); |
| 392 | |
| 393 | /* If the host has not yet processed the next entry then we are done */ |
| 394 | if (((q->host_index + 1) % q->entry_count) == q->hba_index) |
| 395 | return -ENOMEM; |
| 396 | lpfc_sli4_pcimem_bcopy(srcp: mqe, destp: temp_mqe, cnt: q->entry_size); |
| 397 | /* Save off the mailbox pointer for completion */ |
| 398 | q->phba->mbox = (MAILBOX_t *)temp_mqe; |
| 399 | |
| 400 | /* Update the host index before invoking device */ |
| 401 | q->host_index = ((q->host_index + 1) % q->entry_count); |
| 402 | |
| 403 | /* Ring Doorbell */ |
| 404 | doorbell.word0 = 0; |
| 405 | bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); |
| 406 | bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); |
| 407 | writel(val: doorbell.word0, addr: q->phba->sli4_hba.MQDBregaddr); |
| 408 | return 0; |
| 409 | } |
| 410 | |
| 411 | /** |
| 412 | * lpfc_sli4_mq_release - Updates internal hba index for MQ |
| 413 | * @q: The Mailbox Queue to operate on. |
| 414 | * |
| 415 | * This routine will update the HBA index of a queue to reflect consumption of |
| 416 | * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed |
| 417 | * an entry the host calls this function to update the queue's internal |
| 418 | * pointers. This routine returns the number of entries that were consumed by |
| 419 | * the HBA. |
| 420 | **/ |
| 421 | static uint32_t |
| 422 | lpfc_sli4_mq_release(struct lpfc_queue *q) |
| 423 | { |
| 424 | /* sanity check on queue memory */ |
| 425 | if (unlikely(!q)) |
| 426 | return 0; |
| 427 | |
| 428 | /* Clear the mailbox pointer for completion */ |
| 429 | q->phba->mbox = NULL; |
| 430 | q->hba_index = ((q->hba_index + 1) % q->entry_count); |
| 431 | return 1; |
| 432 | } |
| 433 | |
| 434 | /** |
| 435 | * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ |
| 436 | * @q: The Event Queue to get the first valid EQE from |
| 437 | * |
| 438 | * This routine will get the first valid Event Queue Entry from @q, update |
| 439 | * the queue's internal hba index, and return the EQE. If no valid EQEs are in |
| 440 | * the Queue (no more work to do), or the Queue is full of EQEs that have been |
| 441 | * processed, but not popped back to the HBA then this routine will return NULL. |
| 442 | **/ |
| 443 | static struct lpfc_eqe * |
| 444 | lpfc_sli4_eq_get(struct lpfc_queue *q) |
| 445 | { |
| 446 | struct lpfc_eqe *eqe; |
| 447 | |
| 448 | /* sanity check on queue memory */ |
| 449 | if (unlikely(!q)) |
| 450 | return NULL; |
| 451 | eqe = lpfc_sli4_qe(q, idx: q->host_index); |
| 452 | |
| 453 | /* If the next EQE is not valid then we are done */ |
| 454 | if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) |
| 455 | return NULL; |
| 456 | |
| 457 | /* |
| 458 | * insert barrier for instruction interlock : data from the hardware |
| 459 | * must have the valid bit checked before it can be copied and acted |
| 460 | * upon. Speculative instructions were allowing a bcopy at the start |
| 461 | * of lpfc_sli4_fp_handle_wcqe(), which is called immediately |
| 462 | * after our return, to copy data before the valid bit check above |
| 463 | * was done. As such, some of the copied data was stale. The barrier |
| 464 | * ensures the check is before any data is copied. |
| 465 | */ |
| 466 | mb(); |
| 467 | return eqe; |
| 468 | } |
| 469 | |
| 470 | /** |
| 471 | * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ |
| 472 | * @q: The Event Queue to disable interrupts |
| 473 | * |
| 474 | **/ |
| 475 | void |
| 476 | lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) |
| 477 | { |
| 478 | struct lpfc_register doorbell; |
| 479 | |
| 480 | doorbell.word0 = 0; |
| 481 | bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); |
| 482 | bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); |
| 483 | bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, |
| 484 | (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); |
| 485 | bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); |
| 486 | writel(val: doorbell.word0, addr: q->phba->sli4_hba.EQDBregaddr); |
| 487 | } |
| 488 | |
| 489 | /** |
| 490 | * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ |
| 491 | * @q: The Event Queue to disable interrupts |
| 492 | * |
| 493 | **/ |
| 494 | void |
| 495 | lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) |
| 496 | { |
| 497 | struct lpfc_register doorbell; |
| 498 | |
| 499 | doorbell.word0 = 0; |
| 500 | bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); |
| 501 | writel(val: doorbell.word0, addr: q->phba->sli4_hba.EQDBregaddr); |
| 502 | } |
| 503 | |
| 504 | /** |
| 505 | * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state |
| 506 | * @phba: adapter with EQ |
| 507 | * @q: The Event Queue that the host has completed processing for. |
| 508 | * @count: Number of elements that have been consumed |
| 509 | * @arm: Indicates whether the host wants to arms this CQ. |
| 510 | * |
| 511 | * This routine will notify the HBA, by ringing the doorbell, that count |
| 512 | * number of EQEs have been processed. The @arm parameter indicates whether |
| 513 | * the queue should be rearmed when ringing the doorbell. |
| 514 | **/ |
| 515 | void |
| 516 | lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, |
| 517 | uint32_t count, bool arm) |
| 518 | { |
| 519 | struct lpfc_register doorbell; |
| 520 | |
| 521 | /* sanity check on queue memory */ |
| 522 | if (unlikely(!q || (count == 0 && !arm))) |
| 523 | return; |
| 524 | |
| 525 | /* ring doorbell for number popped */ |
| 526 | doorbell.word0 = 0; |
| 527 | if (arm) { |
| 528 | bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); |
| 529 | bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); |
| 530 | } |
| 531 | bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); |
| 532 | bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); |
| 533 | bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, |
| 534 | (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); |
| 535 | bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); |
| 536 | writel(val: doorbell.word0, addr: q->phba->sli4_hba.EQDBregaddr); |
| 537 | /* PCI read to flush PCI pipeline on re-arming for INTx mode */ |
| 538 | if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) |
| 539 | readl(addr: q->phba->sli4_hba.EQDBregaddr); |
| 540 | } |
| 541 | |
| 542 | /** |
| 543 | * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state |
| 544 | * @phba: adapter with EQ |
| 545 | * @q: The Event Queue that the host has completed processing for. |
| 546 | * @count: Number of elements that have been consumed |
| 547 | * @arm: Indicates whether the host wants to arms this CQ. |
| 548 | * |
| 549 | * This routine will notify the HBA, by ringing the doorbell, that count |
| 550 | * number of EQEs have been processed. The @arm parameter indicates whether |
| 551 | * the queue should be rearmed when ringing the doorbell. |
| 552 | **/ |
| 553 | void |
| 554 | lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, |
| 555 | uint32_t count, bool arm) |
| 556 | { |
| 557 | struct lpfc_register doorbell; |
| 558 | |
| 559 | /* sanity check on queue memory */ |
| 560 | if (unlikely(!q || (count == 0 && !arm))) |
| 561 | return; |
| 562 | |
| 563 | /* ring doorbell for number popped */ |
| 564 | doorbell.word0 = 0; |
| 565 | if (arm) |
| 566 | bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); |
| 567 | bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); |
| 568 | bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); |
| 569 | writel(val: doorbell.word0, addr: q->phba->sli4_hba.EQDBregaddr); |
| 570 | /* PCI read to flush PCI pipeline on re-arming for INTx mode */ |
| 571 | if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) |
| 572 | readl(addr: q->phba->sli4_hba.EQDBregaddr); |
| 573 | } |
| 574 | |
| 575 | static void |
| 576 | __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, |
| 577 | struct lpfc_eqe *eqe) |
| 578 | { |
| 579 | if (!phba->sli4_hba.pc_sli4_params.eqav) |
| 580 | bf_set_le32(lpfc_eqe_valid, eqe, 0); |
| 581 | |
| 582 | eq->host_index = ((eq->host_index + 1) % eq->entry_count); |
| 583 | |
| 584 | /* if the index wrapped around, toggle the valid bit */ |
| 585 | if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index) |
| 586 | eq->qe_valid = (eq->qe_valid) ? 0 : 1; |
| 587 | } |
| 588 | |
| 589 | static void |
| 590 | lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) |
| 591 | { |
| 592 | struct lpfc_eqe *eqe = NULL; |
| 593 | u32 eq_count = 0, cq_count = 0; |
| 594 | struct lpfc_cqe *cqe = NULL; |
| 595 | struct lpfc_queue *cq = NULL, *childq = NULL; |
| 596 | int cqid = 0; |
| 597 | |
| 598 | /* walk all the EQ entries and drop on the floor */ |
| 599 | eqe = lpfc_sli4_eq_get(q: eq); |
| 600 | while (eqe) { |
| 601 | /* Get the reference to the corresponding CQ */ |
| 602 | cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); |
| 603 | cq = NULL; |
| 604 | |
| 605 | list_for_each_entry(childq, &eq->child_list, list) { |
| 606 | if (childq->queue_id == cqid) { |
| 607 | cq = childq; |
| 608 | break; |
| 609 | } |
| 610 | } |
| 611 | /* If CQ is valid, iterate through it and drop all the CQEs */ |
| 612 | if (cq) { |
| 613 | cqe = lpfc_sli4_cq_get(q: cq); |
| 614 | while (cqe) { |
| 615 | __lpfc_sli4_consume_cqe(phba, cq, cqe); |
| 616 | cq_count++; |
| 617 | cqe = lpfc_sli4_cq_get(q: cq); |
| 618 | } |
| 619 | /* Clear and re-arm the CQ */ |
| 620 | phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count, |
| 621 | LPFC_QUEUE_REARM); |
| 622 | cq_count = 0; |
| 623 | } |
| 624 | __lpfc_sli4_consume_eqe(phba, eq, eqe); |
| 625 | eq_count++; |
| 626 | eqe = lpfc_sli4_eq_get(q: eq); |
| 627 | } |
| 628 | |
| 629 | /* Clear and re-arm the EQ */ |
| 630 | phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM); |
| 631 | } |
| 632 | |
| 633 | static int |
| 634 | lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, |
| 635 | u8 rearm, enum lpfc_poll_mode poll_mode) |
| 636 | { |
| 637 | struct lpfc_eqe *eqe; |
| 638 | int count = 0, consumed = 0; |
| 639 | |
| 640 | if (cmpxchg(&eq->queue_claimed, 0, 1) != 0) |
| 641 | goto rearm_and_exit; |
| 642 | |
| 643 | eqe = lpfc_sli4_eq_get(q: eq); |
| 644 | while (eqe) { |
| 645 | lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode); |
| 646 | __lpfc_sli4_consume_eqe(phba, eq, eqe); |
| 647 | |
| 648 | consumed++; |
| 649 | if (!(++count % eq->max_proc_limit)) |
| 650 | break; |
| 651 | |
| 652 | if (!(count % eq->notify_interval)) { |
| 653 | phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, |
| 654 | LPFC_QUEUE_NOARM); |
| 655 | consumed = 0; |
| 656 | } |
| 657 | |
| 658 | eqe = lpfc_sli4_eq_get(q: eq); |
| 659 | } |
| 660 | eq->EQ_processed += count; |
| 661 | |
| 662 | /* Track the max number of EQEs processed in 1 intr */ |
| 663 | if (count > eq->EQ_max_eqe) |
| 664 | eq->EQ_max_eqe = count; |
| 665 | |
| 666 | xchg(&eq->queue_claimed, 0); |
| 667 | |
| 668 | rearm_and_exit: |
| 669 | /* Always clear the EQ. */ |
| 670 | phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm); |
| 671 | |
| 672 | return count; |
| 673 | } |
| 674 | |
| 675 | /** |
| 676 | * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ |
| 677 | * @q: The Completion Queue to get the first valid CQE from |
| 678 | * |
| 679 | * This routine will get the first valid Completion Queue Entry from @q, update |
| 680 | * the queue's internal hba index, and return the CQE. If no valid CQEs are in |
| 681 | * the Queue (no more work to do), or the Queue is full of CQEs that have been |
| 682 | * processed, but not popped back to the HBA then this routine will return NULL. |
| 683 | **/ |
| 684 | static struct lpfc_cqe * |
| 685 | lpfc_sli4_cq_get(struct lpfc_queue *q) |
| 686 | { |
| 687 | struct lpfc_cqe *cqe; |
| 688 | |
| 689 | /* sanity check on queue memory */ |
| 690 | if (unlikely(!q)) |
| 691 | return NULL; |
| 692 | cqe = lpfc_sli4_qe(q, idx: q->host_index); |
| 693 | |
| 694 | /* If the next CQE is not valid then we are done */ |
| 695 | if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) |
| 696 | return NULL; |
| 697 | |
| 698 | /* |
| 699 | * insert barrier for instruction interlock : data from the hardware |
| 700 | * must have the valid bit checked before it can be copied and acted |
| 701 | * upon. Given what was seen in lpfc_sli4_cq_get() of speculative |
| 702 | * instructions allowing action on content before valid bit checked, |
| 703 | * add barrier here as well. May not be needed as "content" is a |
| 704 | * single 32-bit entity here (vs multi word structure for cq's). |
| 705 | */ |
| 706 | mb(); |
| 707 | return cqe; |
| 708 | } |
| 709 | |
| 710 | static void |
| 711 | __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, |
| 712 | struct lpfc_cqe *cqe) |
| 713 | { |
| 714 | if (!phba->sli4_hba.pc_sli4_params.cqav) |
| 715 | bf_set_le32(lpfc_cqe_valid, cqe, 0); |
| 716 | |
| 717 | cq->host_index = ((cq->host_index + 1) % cq->entry_count); |
| 718 | |
| 719 | /* if the index wrapped around, toggle the valid bit */ |
| 720 | if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) |
| 721 | cq->qe_valid = (cq->qe_valid) ? 0 : 1; |
| 722 | } |
| 723 | |
| 724 | /** |
| 725 | * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state. |
| 726 | * @phba: the adapter with the CQ |
| 727 | * @q: The Completion Queue that the host has completed processing for. |
| 728 | * @count: the number of elements that were consumed |
| 729 | * @arm: Indicates whether the host wants to arms this CQ. |
| 730 | * |
| 731 | * This routine will notify the HBA, by ringing the doorbell, that the |
| 732 | * CQEs have been processed. The @arm parameter specifies whether the |
| 733 | * queue should be rearmed when ringing the doorbell. |
| 734 | **/ |
| 735 | void |
| 736 | lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, |
| 737 | uint32_t count, bool arm) |
| 738 | { |
| 739 | struct lpfc_register doorbell; |
| 740 | |
| 741 | /* sanity check on queue memory */ |
| 742 | if (unlikely(!q || (count == 0 && !arm))) |
| 743 | return; |
| 744 | |
| 745 | /* ring doorbell for number popped */ |
| 746 | doorbell.word0 = 0; |
| 747 | if (arm) |
| 748 | bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); |
| 749 | bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); |
| 750 | bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); |
| 751 | bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, |
| 752 | (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); |
| 753 | bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); |
| 754 | writel(val: doorbell.word0, addr: q->phba->sli4_hba.CQDBregaddr); |
| 755 | } |
| 756 | |
| 757 | /** |
| 758 | * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state. |
| 759 | * @phba: the adapter with the CQ |
| 760 | * @q: The Completion Queue that the host has completed processing for. |
| 761 | * @count: the number of elements that were consumed |
| 762 | * @arm: Indicates whether the host wants to arms this CQ. |
| 763 | * |
| 764 | * This routine will notify the HBA, by ringing the doorbell, that the |
| 765 | * CQEs have been processed. The @arm parameter specifies whether the |
| 766 | * queue should be rearmed when ringing the doorbell. |
| 767 | **/ |
| 768 | void |
| 769 | lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, |
| 770 | uint32_t count, bool arm) |
| 771 | { |
| 772 | struct lpfc_register doorbell; |
| 773 | |
| 774 | /* sanity check on queue memory */ |
| 775 | if (unlikely(!q || (count == 0 && !arm))) |
| 776 | return; |
| 777 | |
| 778 | /* ring doorbell for number popped */ |
| 779 | doorbell.word0 = 0; |
| 780 | if (arm) |
| 781 | bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); |
| 782 | bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count); |
| 783 | bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); |
| 784 | writel(val: doorbell.word0, addr: q->phba->sli4_hba.CQDBregaddr); |
| 785 | } |
| 786 | |
| 787 | /* |
| 788 | * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue |
| 789 | * |
| 790 | * This routine will copy the contents of @wqe to the next available entry on |
| 791 | * the @q. This function will then ring the Receive Queue Doorbell to signal the |
| 792 | * HBA to start processing the Receive Queue Entry. This function returns the |
| 793 | * index that the rqe was copied to if successful. If no entries are available |
| 794 | * on @q then this function will return -ENOMEM. |
| 795 | * The caller is expected to hold the hbalock when calling this routine. |
| 796 | **/ |
| 797 | int |
| 798 | lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, |
| 799 | struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) |
| 800 | { |
| 801 | struct lpfc_rqe *temp_hrqe; |
| 802 | struct lpfc_rqe *temp_drqe; |
| 803 | struct lpfc_register doorbell; |
| 804 | int hq_put_index; |
| 805 | int dq_put_index; |
| 806 | |
| 807 | /* sanity check on queue memory */ |
| 808 | if (unlikely(!hq) || unlikely(!dq)) |
| 809 | return -ENOMEM; |
| 810 | hq_put_index = hq->host_index; |
| 811 | dq_put_index = dq->host_index; |
| 812 | temp_hrqe = lpfc_sli4_qe(q: hq, idx: hq_put_index); |
| 813 | temp_drqe = lpfc_sli4_qe(q: dq, idx: dq_put_index); |
| 814 | |
| 815 | if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) |
| 816 | return -EINVAL; |
| 817 | if (hq_put_index != dq_put_index) |
| 818 | return -EINVAL; |
| 819 | /* If the host has not yet processed the next entry then we are done */ |
| 820 | if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) |
| 821 | return -EBUSY; |
| 822 | lpfc_sli4_pcimem_bcopy(srcp: hrqe, destp: temp_hrqe, cnt: hq->entry_size); |
| 823 | lpfc_sli4_pcimem_bcopy(srcp: drqe, destp: temp_drqe, cnt: dq->entry_size); |
| 824 | |
| 825 | /* Update the host index to point to the next slot */ |
| 826 | hq->host_index = ((hq_put_index + 1) % hq->entry_count); |
| 827 | dq->host_index = ((dq_put_index + 1) % dq->entry_count); |
| 828 | hq->RQ_buf_posted++; |
| 829 | |
| 830 | /* Ring The Header Receive Queue Doorbell */ |
| 831 | if (!(hq->host_index % hq->notify_interval)) { |
| 832 | doorbell.word0 = 0; |
| 833 | if (hq->db_format == LPFC_DB_RING_FORMAT) { |
| 834 | bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, |
| 835 | hq->notify_interval); |
| 836 | bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); |
| 837 | } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { |
| 838 | bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, |
| 839 | hq->notify_interval); |
| 840 | bf_set(lpfc_rq_db_list_fm_index, &doorbell, |
| 841 | hq->host_index); |
| 842 | bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); |
| 843 | } else { |
| 844 | return -EINVAL; |
| 845 | } |
| 846 | writel(val: doorbell.word0, addr: hq->db_regaddr); |
| 847 | } |
| 848 | return hq_put_index; |
| 849 | } |
| 850 | |
| 851 | /* |
| 852 | * lpfc_sli4_rq_release - Updates internal hba index for RQ |
| 853 | * |
| 854 | * This routine will update the HBA index of a queue to reflect consumption of |
| 855 | * one Receive Queue Entry by the HBA. When the HBA indicates that it has |
| 856 | * consumed an entry the host calls this function to update the queue's |
| 857 | * internal pointers. This routine returns the number of entries that were |
| 858 | * consumed by the HBA. |
| 859 | **/ |
| 860 | static uint32_t |
| 861 | lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) |
| 862 | { |
| 863 | /* sanity check on queue memory */ |
| 864 | if (unlikely(!hq) || unlikely(!dq)) |
| 865 | return 0; |
| 866 | |
| 867 | if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) |
| 868 | return 0; |
| 869 | hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); |
| 870 | dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); |
| 871 | return 1; |
| 872 | } |
| 873 | |
| 874 | /** |
| 875 | * lpfc_cmd_iocb - Get next command iocb entry in the ring |
| 876 | * @phba: Pointer to HBA context object. |
| 877 | * @pring: Pointer to driver SLI ring object. |
| 878 | * |
| 879 | * This function returns pointer to next command iocb entry |
| 880 | * in the command ring. The caller must hold hbalock to prevent |
| 881 | * other threads consume the next command iocb. |
| 882 | * SLI-2/SLI-3 provide different sized iocbs. |
| 883 | **/ |
| 884 | static inline IOCB_t * |
| 885 | lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
| 886 | { |
| 887 | return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + |
| 888 | pring->sli.sli3.cmdidx * phba->iocb_cmd_size); |
| 889 | } |
| 890 | |
| 891 | /** |
| 892 | * lpfc_resp_iocb - Get next response iocb entry in the ring |
| 893 | * @phba: Pointer to HBA context object. |
| 894 | * @pring: Pointer to driver SLI ring object. |
| 895 | * |
| 896 | * This function returns pointer to next response iocb entry |
| 897 | * in the response ring. The caller must hold hbalock to make sure |
| 898 | * that no other thread consume the next response iocb. |
| 899 | * SLI-2/SLI-3 provide different sized iocbs. |
| 900 | **/ |
| 901 | static inline IOCB_t * |
| 902 | lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
| 903 | { |
| 904 | return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + |
| 905 | pring->sli.sli3.rspidx * phba->iocb_rsp_size); |
| 906 | } |
| 907 | |
| 908 | /** |
| 909 | * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool |
| 910 | * @phba: Pointer to HBA context object. |
| 911 | * |
| 912 | * This function is called with hbalock held. This function |
| 913 | * allocates a new driver iocb object from the iocb pool. If the |
| 914 | * allocation is successful, it returns pointer to the newly |
| 915 | * allocated iocb object else it returns NULL. |
| 916 | **/ |
| 917 | struct lpfc_iocbq * |
| 918 | __lpfc_sli_get_iocbq(struct lpfc_hba *phba) |
| 919 | { |
| 920 | struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; |
| 921 | struct lpfc_iocbq * iocbq = NULL; |
| 922 | |
| 923 | lockdep_assert_held(&phba->hbalock); |
| 924 | |
| 925 | list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); |
| 926 | if (iocbq) |
| 927 | phba->iocb_cnt++; |
| 928 | if (phba->iocb_cnt > phba->iocb_max) |
| 929 | phba->iocb_max = phba->iocb_cnt; |
| 930 | return iocbq; |
| 931 | } |
| 932 | |
| 933 | /** |
| 934 | * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. |
| 935 | * @phba: Pointer to HBA context object. |
| 936 | * @xritag: XRI value. |
| 937 | * |
| 938 | * This function clears the sglq pointer from the array of active |
| 939 | * sglq's. The xritag that is passed in is used to index into the |
| 940 | * array. Before the xritag can be used it needs to be adjusted |
| 941 | * by subtracting the xribase. |
| 942 | * |
| 943 | * Returns sglq ponter = success, NULL = Failure. |
| 944 | **/ |
| 945 | struct lpfc_sglq * |
| 946 | __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) |
| 947 | { |
| 948 | struct lpfc_sglq *sglq; |
| 949 | |
| 950 | sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; |
| 951 | phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; |
| 952 | return sglq; |
| 953 | } |
| 954 | |
| 955 | /** |
| 956 | * __lpfc_get_active_sglq - Get the active sglq for this XRI. |
| 957 | * @phba: Pointer to HBA context object. |
| 958 | * @xritag: XRI value. |
| 959 | * |
| 960 | * This function returns the sglq pointer from the array of active |
| 961 | * sglq's. The xritag that is passed in is used to index into the |
| 962 | * array. Before the xritag can be used it needs to be adjusted |
| 963 | * by subtracting the xribase. |
| 964 | * |
| 965 | * Returns sglq ponter = success, NULL = Failure. |
| 966 | **/ |
| 967 | struct lpfc_sglq * |
| 968 | __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) |
| 969 | { |
| 970 | struct lpfc_sglq *sglq; |
| 971 | |
| 972 | sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; |
| 973 | return sglq; |
| 974 | } |
| 975 | |
| 976 | /** |
| 977 | * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. |
| 978 | * @phba: Pointer to HBA context object. |
| 979 | * @xritag: xri used in this exchange. |
| 980 | * @rrq: The RRQ to be cleared. |
| 981 | * |
| 982 | **/ |
| 983 | void |
| 984 | lpfc_clr_rrq_active(struct lpfc_hba *phba, |
| 985 | uint16_t xritag, |
| 986 | struct lpfc_node_rrq *rrq) |
| 987 | { |
| 988 | struct lpfc_nodelist *ndlp = NULL; |
| 989 | |
| 990 | /* Lookup did to verify if did is still active on this vport */ |
| 991 | if (rrq->vport) |
| 992 | ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); |
| 993 | |
| 994 | if (!ndlp) |
| 995 | goto out; |
| 996 | |
| 997 | if (test_and_clear_bit(nr: xritag, addr: ndlp->active_rrqs_xri_bitmap)) { |
| 998 | rrq->send_rrq = 0; |
| 999 | rrq->xritag = 0; |
| 1000 | rrq->rrq_stop_time = 0; |
| 1001 | } |
| 1002 | out: |
| 1003 | mempool_free(element: rrq, pool: phba->rrq_pool); |
| 1004 | } |
| 1005 | |
| 1006 | /** |
| 1007 | * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. |
| 1008 | * @phba: Pointer to HBA context object. |
| 1009 | * |
| 1010 | * This function is called with hbalock held. This function |
| 1011 | * Checks if stop_time (ratov from setting rrq active) has |
| 1012 | * been reached, if it has and the send_rrq flag is set then |
| 1013 | * it will call lpfc_send_rrq. If the send_rrq flag is not set |
| 1014 | * then it will just call the routine to clear the rrq and |
| 1015 | * free the rrq resource. |
| 1016 | * The timer is set to the next rrq that is going to expire before |
| 1017 | * leaving the routine. |
| 1018 | * |
| 1019 | **/ |
| 1020 | void |
| 1021 | lpfc_handle_rrq_active(struct lpfc_hba *phba) |
| 1022 | { |
| 1023 | struct lpfc_node_rrq *rrq; |
| 1024 | struct lpfc_node_rrq *nextrrq; |
| 1025 | unsigned long next_time; |
| 1026 | unsigned long iflags; |
| 1027 | LIST_HEAD(send_rrq); |
| 1028 | |
| 1029 | clear_bit(nr: HBA_RRQ_ACTIVE, addr: &phba->hba_flag); |
| 1030 | next_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1); |
| 1031 | spin_lock_irqsave(&phba->rrq_list_lock, iflags); |
| 1032 | list_for_each_entry_safe(rrq, nextrrq, |
| 1033 | &phba->active_rrq_list, list) { |
| 1034 | if (time_after(jiffies, rrq->rrq_stop_time)) |
| 1035 | list_move(list: &rrq->list, head: &send_rrq); |
| 1036 | else if (time_before(rrq->rrq_stop_time, next_time)) |
| 1037 | next_time = rrq->rrq_stop_time; |
| 1038 | } |
| 1039 | spin_unlock_irqrestore(lock: &phba->rrq_list_lock, flags: iflags); |
| 1040 | if ((!list_empty(head: &phba->active_rrq_list)) && |
| 1041 | (!test_bit(FC_UNLOADING, &phba->pport->load_flag))) |
| 1042 | mod_timer(timer: &phba->rrq_tmr, expires: next_time); |
| 1043 | list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { |
| 1044 | list_del(entry: &rrq->list); |
| 1045 | if (!rrq->send_rrq) { |
| 1046 | /* this call will free the rrq */ |
| 1047 | lpfc_clr_rrq_active(phba, xritag: rrq->xritag, rrq); |
| 1048 | } else if (lpfc_send_rrq(phba, rrq)) { |
| 1049 | /* if we send the rrq then the completion handler |
| 1050 | * will clear the bit in the xribitmap. |
| 1051 | */ |
| 1052 | lpfc_clr_rrq_active(phba, xritag: rrq->xritag, |
| 1053 | rrq); |
| 1054 | } |
| 1055 | } |
| 1056 | } |
| 1057 | |
| 1058 | /** |
| 1059 | * lpfc_get_active_rrq - Get the active RRQ for this exchange. |
| 1060 | * @vport: Pointer to vport context object. |
| 1061 | * @xri: The xri used in the exchange. |
| 1062 | * @did: The targets DID for this exchange. |
| 1063 | * |
| 1064 | * returns NULL = rrq not found in the phba->active_rrq_list. |
| 1065 | * rrq = rrq for this xri and target. |
| 1066 | **/ |
| 1067 | struct lpfc_node_rrq * |
| 1068 | lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) |
| 1069 | { |
| 1070 | struct lpfc_hba *phba = vport->phba; |
| 1071 | struct lpfc_node_rrq *rrq; |
| 1072 | struct lpfc_node_rrq *nextrrq; |
| 1073 | unsigned long iflags; |
| 1074 | |
| 1075 | if (phba->sli_rev != LPFC_SLI_REV4) |
| 1076 | return NULL; |
| 1077 | spin_lock_irqsave(&phba->rrq_list_lock, iflags); |
| 1078 | list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { |
| 1079 | if (rrq->vport == vport && rrq->xritag == xri && |
| 1080 | rrq->nlp_DID == did){ |
| 1081 | list_del(entry: &rrq->list); |
| 1082 | spin_unlock_irqrestore(lock: &phba->rrq_list_lock, flags: iflags); |
| 1083 | return rrq; |
| 1084 | } |
| 1085 | } |
| 1086 | spin_unlock_irqrestore(lock: &phba->rrq_list_lock, flags: iflags); |
| 1087 | return NULL; |
| 1088 | } |
| 1089 | |
| 1090 | /** |
| 1091 | * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. |
| 1092 | * @vport: Pointer to vport context object. |
| 1093 | * @ndlp: Pointer to the lpfc_node_list structure. |
| 1094 | * If ndlp is NULL Remove all active RRQs for this vport from the |
| 1095 | * phba->active_rrq_list and clear the rrq. |
| 1096 | * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. |
| 1097 | **/ |
| 1098 | void |
| 1099 | lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
| 1100 | |
| 1101 | { |
| 1102 | struct lpfc_hba *phba = vport->phba; |
| 1103 | struct lpfc_node_rrq *rrq; |
| 1104 | struct lpfc_node_rrq *nextrrq; |
| 1105 | unsigned long iflags; |
| 1106 | LIST_HEAD(rrq_list); |
| 1107 | |
| 1108 | if (phba->sli_rev != LPFC_SLI_REV4) |
| 1109 | return; |
| 1110 | if (!ndlp) { |
| 1111 | lpfc_sli4_vport_delete_els_xri_aborted(vport); |
| 1112 | lpfc_sli4_vport_delete_fcp_xri_aborted(vport); |
| 1113 | } |
| 1114 | spin_lock_irqsave(&phba->rrq_list_lock, iflags); |
| 1115 | list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { |
| 1116 | if (rrq->vport != vport) |
| 1117 | continue; |
| 1118 | |
| 1119 | if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID)) |
| 1120 | list_move(list: &rrq->list, head: &rrq_list); |
| 1121 | |
| 1122 | } |
| 1123 | spin_unlock_irqrestore(lock: &phba->rrq_list_lock, flags: iflags); |
| 1124 | |
| 1125 | list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { |
| 1126 | list_del(entry: &rrq->list); |
| 1127 | lpfc_clr_rrq_active(phba, xritag: rrq->xritag, rrq); |
| 1128 | } |
| 1129 | } |
| 1130 | |
| 1131 | /** |
| 1132 | * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. |
| 1133 | * @phba: Pointer to HBA context object. |
| 1134 | * @ndlp: Targets nodelist pointer for this exchange. |
| 1135 | * @xritag: the xri in the bitmap to test. |
| 1136 | * |
| 1137 | * This function returns: |
| 1138 | * 0 = rrq not active for this xri |
| 1139 | * 1 = rrq is valid for this xri. |
| 1140 | **/ |
| 1141 | int |
| 1142 | lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, |
| 1143 | uint16_t xritag) |
| 1144 | { |
| 1145 | if (!ndlp) |
| 1146 | return 0; |
| 1147 | if (!ndlp->active_rrqs_xri_bitmap) |
| 1148 | return 0; |
| 1149 | if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) |
| 1150 | return 1; |
| 1151 | else |
| 1152 | return 0; |
| 1153 | } |
| 1154 | |
| 1155 | /** |
| 1156 | * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. |
| 1157 | * @phba: Pointer to HBA context object. |
| 1158 | * @ndlp: nodelist pointer for this target. |
| 1159 | * @xritag: xri used in this exchange. |
| 1160 | * @rxid: Remote Exchange ID. |
| 1161 | * @send_rrq: Flag used to determine if we should send rrq els cmd. |
| 1162 | * |
| 1163 | * This function takes the hbalock. |
| 1164 | * The active bit is always set in the active rrq xri_bitmap even |
| 1165 | * if there is no slot avaiable for the other rrq information. |
| 1166 | * |
| 1167 | * returns 0 rrq actived for this xri |
| 1168 | * < 0 No memory or invalid ndlp. |
| 1169 | **/ |
| 1170 | int |
| 1171 | lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, |
| 1172 | uint16_t xritag, uint16_t rxid, uint16_t send_rrq) |
| 1173 | { |
| 1174 | unsigned long iflags; |
| 1175 | struct lpfc_node_rrq *rrq; |
| 1176 | int empty; |
| 1177 | |
| 1178 | if (!ndlp) |
| 1179 | return -EINVAL; |
| 1180 | |
| 1181 | if (!phba->cfg_enable_rrq) |
| 1182 | return -EINVAL; |
| 1183 | |
| 1184 | if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { |
| 1185 | clear_bit(nr: HBA_RRQ_ACTIVE, addr: &phba->hba_flag); |
| 1186 | goto outnl; |
| 1187 | } |
| 1188 | |
| 1189 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 1190 | if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag)) |
| 1191 | goto out; |
| 1192 | |
| 1193 | if (!ndlp->active_rrqs_xri_bitmap) |
| 1194 | goto out; |
| 1195 | |
| 1196 | if (test_and_set_bit(nr: xritag, addr: ndlp->active_rrqs_xri_bitmap)) |
| 1197 | goto out; |
| 1198 | |
| 1199 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 1200 | rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC); |
| 1201 | if (!rrq) { |
| 1202 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 1203 | "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" |
| 1204 | " DID:0x%x Send:%d\n" , |
| 1205 | xritag, rxid, ndlp->nlp_DID, send_rrq); |
| 1206 | return -EINVAL; |
| 1207 | } |
| 1208 | if (phba->cfg_enable_rrq == 1) |
| 1209 | rrq->send_rrq = send_rrq; |
| 1210 | else |
| 1211 | rrq->send_rrq = 0; |
| 1212 | rrq->xritag = xritag; |
| 1213 | rrq->rrq_stop_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1); |
| 1214 | rrq->nlp_DID = ndlp->nlp_DID; |
| 1215 | rrq->vport = ndlp->vport; |
| 1216 | rrq->rxid = rxid; |
| 1217 | |
| 1218 | spin_lock_irqsave(&phba->rrq_list_lock, iflags); |
| 1219 | empty = list_empty(head: &phba->active_rrq_list); |
| 1220 | list_add_tail(new: &rrq->list, head: &phba->active_rrq_list); |
| 1221 | spin_unlock_irqrestore(lock: &phba->rrq_list_lock, flags: iflags); |
| 1222 | set_bit(nr: HBA_RRQ_ACTIVE, addr: &phba->hba_flag); |
| 1223 | if (empty) |
| 1224 | lpfc_worker_wake_up(phba); |
| 1225 | return 0; |
| 1226 | out: |
| 1227 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 1228 | outnl: |
| 1229 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 1230 | "2921 Can't set rrq active xri:0x%x rxid:0x%x" |
| 1231 | " DID:0x%x Send:%d\n" , |
| 1232 | xritag, rxid, ndlp->nlp_DID, send_rrq); |
| 1233 | return -EINVAL; |
| 1234 | } |
| 1235 | |
| 1236 | /** |
| 1237 | * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool |
| 1238 | * @phba: Pointer to HBA context object. |
| 1239 | * @piocbq: Pointer to the iocbq. |
| 1240 | * |
| 1241 | * The driver calls this function with either the nvme ls ring lock |
| 1242 | * or the fc els ring lock held depending on the iocb usage. This function |
| 1243 | * gets a new driver sglq object from the sglq list. If the list is not empty |
| 1244 | * then it is successful, it returns pointer to the newly allocated sglq |
| 1245 | * object else it returns NULL. |
| 1246 | **/ |
| 1247 | static struct lpfc_sglq * |
| 1248 | __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) |
| 1249 | { |
| 1250 | struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; |
| 1251 | struct lpfc_sglq *sglq = NULL; |
| 1252 | struct lpfc_sglq *start_sglq = NULL; |
| 1253 | struct lpfc_io_buf *lpfc_cmd; |
| 1254 | struct lpfc_nodelist *ndlp; |
| 1255 | int found = 0; |
| 1256 | u8 cmnd; |
| 1257 | |
| 1258 | cmnd = get_job_cmnd(phba, iocbq: piocbq); |
| 1259 | |
| 1260 | if (piocbq->cmd_flag & LPFC_IO_FCP) { |
| 1261 | lpfc_cmd = piocbq->io_buf; |
| 1262 | ndlp = lpfc_cmd->rdata->pnode; |
| 1263 | } else if ((cmnd == CMD_GEN_REQUEST64_CR) && |
| 1264 | !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) { |
| 1265 | ndlp = piocbq->ndlp; |
| 1266 | } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) { |
| 1267 | if (piocbq->cmd_flag & LPFC_IO_LOOPBACK) |
| 1268 | ndlp = NULL; |
| 1269 | else |
| 1270 | ndlp = piocbq->ndlp; |
| 1271 | } else { |
| 1272 | ndlp = piocbq->ndlp; |
| 1273 | } |
| 1274 | |
| 1275 | spin_lock(lock: &phba->sli4_hba.sgl_list_lock); |
| 1276 | list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); |
| 1277 | start_sglq = sglq; |
| 1278 | while (!found) { |
| 1279 | if (!sglq) |
| 1280 | break; |
| 1281 | if (ndlp && ndlp->active_rrqs_xri_bitmap && |
| 1282 | test_bit(sglq->sli4_lxritag, |
| 1283 | ndlp->active_rrqs_xri_bitmap)) { |
| 1284 | /* This xri has an rrq outstanding for this DID. |
| 1285 | * put it back in the list and get another xri. |
| 1286 | */ |
| 1287 | list_add_tail(new: &sglq->list, head: lpfc_els_sgl_list); |
| 1288 | sglq = NULL; |
| 1289 | list_remove_head(lpfc_els_sgl_list, sglq, |
| 1290 | struct lpfc_sglq, list); |
| 1291 | if (sglq == start_sglq) { |
| 1292 | list_add_tail(new: &sglq->list, head: lpfc_els_sgl_list); |
| 1293 | sglq = NULL; |
| 1294 | break; |
| 1295 | } else |
| 1296 | continue; |
| 1297 | } |
| 1298 | sglq->ndlp = ndlp; |
| 1299 | found = 1; |
| 1300 | phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; |
| 1301 | sglq->state = SGL_ALLOCATED; |
| 1302 | } |
| 1303 | spin_unlock(lock: &phba->sli4_hba.sgl_list_lock); |
| 1304 | return sglq; |
| 1305 | } |
| 1306 | |
| 1307 | /** |
| 1308 | * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool |
| 1309 | * @phba: Pointer to HBA context object. |
| 1310 | * @piocbq: Pointer to the iocbq. |
| 1311 | * |
| 1312 | * This function is called with the sgl_list lock held. This function |
| 1313 | * gets a new driver sglq object from the sglq list. If the |
| 1314 | * list is not empty then it is successful, it returns pointer to the newly |
| 1315 | * allocated sglq object else it returns NULL. |
| 1316 | **/ |
| 1317 | struct lpfc_sglq * |
| 1318 | __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) |
| 1319 | { |
| 1320 | struct list_head *lpfc_nvmet_sgl_list; |
| 1321 | struct lpfc_sglq *sglq = NULL; |
| 1322 | |
| 1323 | lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; |
| 1324 | |
| 1325 | lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); |
| 1326 | |
| 1327 | list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); |
| 1328 | if (!sglq) |
| 1329 | return NULL; |
| 1330 | phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; |
| 1331 | sglq->state = SGL_ALLOCATED; |
| 1332 | return sglq; |
| 1333 | } |
| 1334 | |
| 1335 | /** |
| 1336 | * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool |
| 1337 | * @phba: Pointer to HBA context object. |
| 1338 | * |
| 1339 | * This function is called with no lock held. This function |
| 1340 | * allocates a new driver iocb object from the iocb pool. If the |
| 1341 | * allocation is successful, it returns pointer to the newly |
| 1342 | * allocated iocb object else it returns NULL. |
| 1343 | **/ |
| 1344 | struct lpfc_iocbq * |
| 1345 | lpfc_sli_get_iocbq(struct lpfc_hba *phba) |
| 1346 | { |
| 1347 | struct lpfc_iocbq * iocbq = NULL; |
| 1348 | unsigned long iflags; |
| 1349 | |
| 1350 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 1351 | iocbq = __lpfc_sli_get_iocbq(phba); |
| 1352 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 1353 | return iocbq; |
| 1354 | } |
| 1355 | |
| 1356 | /** |
| 1357 | * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool |
| 1358 | * @phba: Pointer to HBA context object. |
| 1359 | * @iocbq: Pointer to driver iocb object. |
| 1360 | * |
| 1361 | * This function is called to release the driver iocb object |
| 1362 | * to the iocb pool. The iotag in the iocb object |
| 1363 | * does not change for each use of the iocb object. This function |
| 1364 | * clears all other fields of the iocb object when it is freed. |
| 1365 | * The sqlq structure that holds the xritag and phys and virtual |
| 1366 | * mappings for the scatter gather list is retrieved from the |
| 1367 | * active array of sglq. The get of the sglq pointer also clears |
| 1368 | * the entry in the array. If the status of the IO indiactes that |
| 1369 | * this IO was aborted then the sglq entry it put on the |
| 1370 | * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the |
| 1371 | * IO has good status or fails for any other reason then the sglq |
| 1372 | * entry is added to the free list (lpfc_els_sgl_list). The hbalock is |
| 1373 | * asserted held in the code path calling this routine. |
| 1374 | **/ |
| 1375 | static void |
| 1376 | __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) |
| 1377 | { |
| 1378 | struct lpfc_sglq *sglq; |
| 1379 | unsigned long iflag = 0; |
| 1380 | struct lpfc_sli_ring *pring; |
| 1381 | |
| 1382 | if (iocbq->sli4_xritag == NO_XRI) |
| 1383 | sglq = NULL; |
| 1384 | else |
| 1385 | sglq = __lpfc_clear_active_sglq(phba, xritag: iocbq->sli4_lxritag); |
| 1386 | |
| 1387 | |
| 1388 | if (sglq) { |
| 1389 | if (iocbq->cmd_flag & LPFC_IO_NVMET) { |
| 1390 | spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, |
| 1391 | iflag); |
| 1392 | sglq->state = SGL_FREED; |
| 1393 | sglq->ndlp = NULL; |
| 1394 | list_add_tail(new: &sglq->list, |
| 1395 | head: &phba->sli4_hba.lpfc_nvmet_sgl_list); |
| 1396 | spin_unlock_irqrestore( |
| 1397 | lock: &phba->sli4_hba.sgl_list_lock, flags: iflag); |
| 1398 | goto out; |
| 1399 | } |
| 1400 | |
| 1401 | if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) && |
| 1402 | (!(unlikely(pci_channel_offline(phba->pcidev)))) && |
| 1403 | sglq->state != SGL_XRI_ABORTED) { |
| 1404 | spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, |
| 1405 | iflag); |
| 1406 | |
| 1407 | /* Check if we can get a reference on ndlp */ |
| 1408 | if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp)) |
| 1409 | sglq->ndlp = NULL; |
| 1410 | |
| 1411 | list_add(new: &sglq->list, |
| 1412 | head: &phba->sli4_hba.lpfc_abts_els_sgl_list); |
| 1413 | spin_unlock_irqrestore( |
| 1414 | lock: &phba->sli4_hba.sgl_list_lock, flags: iflag); |
| 1415 | } else { |
| 1416 | spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, |
| 1417 | iflag); |
| 1418 | sglq->state = SGL_FREED; |
| 1419 | sglq->ndlp = NULL; |
| 1420 | list_add_tail(new: &sglq->list, |
| 1421 | head: &phba->sli4_hba.lpfc_els_sgl_list); |
| 1422 | spin_unlock_irqrestore( |
| 1423 | lock: &phba->sli4_hba.sgl_list_lock, flags: iflag); |
| 1424 | pring = lpfc_phba_elsring(phba); |
| 1425 | /* Check if TXQ queue needs to be serviced */ |
| 1426 | if (pring && (!list_empty(head: &pring->txq))) |
| 1427 | lpfc_worker_wake_up(phba); |
| 1428 | } |
| 1429 | } |
| 1430 | |
| 1431 | out: |
| 1432 | /* |
| 1433 | * Clean all volatile data fields, preserve iotag and node struct. |
| 1434 | */ |
| 1435 | memset_startat(iocbq, 0, wqe); |
| 1436 | iocbq->sli4_lxritag = NO_XRI; |
| 1437 | iocbq->sli4_xritag = NO_XRI; |
| 1438 | iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF | |
| 1439 | LPFC_IO_NVME_LS); |
| 1440 | list_add_tail(new: &iocbq->list, head: &phba->lpfc_iocb_list); |
| 1441 | } |
| 1442 | |
| 1443 | |
| 1444 | /** |
| 1445 | * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool |
| 1446 | * @phba: Pointer to HBA context object. |
| 1447 | * @iocbq: Pointer to driver iocb object. |
| 1448 | * |
| 1449 | * This function is called to release the driver iocb object to the |
| 1450 | * iocb pool. The iotag in the iocb object does not change for each |
| 1451 | * use of the iocb object. This function clears all other fields of |
| 1452 | * the iocb object when it is freed. The hbalock is asserted held in |
| 1453 | * the code path calling this routine. |
| 1454 | **/ |
| 1455 | static void |
| 1456 | __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) |
| 1457 | { |
| 1458 | |
| 1459 | /* |
| 1460 | * Clean all volatile data fields, preserve iotag and node struct. |
| 1461 | */ |
| 1462 | memset_startat(iocbq, 0, iocb); |
| 1463 | iocbq->sli4_xritag = NO_XRI; |
| 1464 | list_add_tail(new: &iocbq->list, head: &phba->lpfc_iocb_list); |
| 1465 | } |
| 1466 | |
| 1467 | /** |
| 1468 | * __lpfc_sli_release_iocbq - Release iocb to the iocb pool |
| 1469 | * @phba: Pointer to HBA context object. |
| 1470 | * @iocbq: Pointer to driver iocb object. |
| 1471 | * |
| 1472 | * This function is called with hbalock held to release driver |
| 1473 | * iocb object to the iocb pool. The iotag in the iocb object |
| 1474 | * does not change for each use of the iocb object. This function |
| 1475 | * clears all other fields of the iocb object when it is freed. |
| 1476 | **/ |
| 1477 | static void |
| 1478 | __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) |
| 1479 | { |
| 1480 | lockdep_assert_held(&phba->hbalock); |
| 1481 | |
| 1482 | phba->__lpfc_sli_release_iocbq(phba, iocbq); |
| 1483 | phba->iocb_cnt--; |
| 1484 | } |
| 1485 | |
| 1486 | /** |
| 1487 | * lpfc_sli_release_iocbq - Release iocb to the iocb pool |
| 1488 | * @phba: Pointer to HBA context object. |
| 1489 | * @iocbq: Pointer to driver iocb object. |
| 1490 | * |
| 1491 | * This function is called with no lock held to release the iocb to |
| 1492 | * iocb pool. |
| 1493 | **/ |
| 1494 | void |
| 1495 | lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) |
| 1496 | { |
| 1497 | unsigned long iflags; |
| 1498 | |
| 1499 | /* |
| 1500 | * Clean all volatile data fields, preserve iotag and node struct. |
| 1501 | */ |
| 1502 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 1503 | __lpfc_sli_release_iocbq(phba, iocbq); |
| 1504 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 1505 | } |
| 1506 | |
| 1507 | /** |
| 1508 | * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. |
| 1509 | * @phba: Pointer to HBA context object. |
| 1510 | * @iocblist: List of IOCBs. |
| 1511 | * @ulpstatus: ULP status in IOCB command field. |
| 1512 | * @ulpWord4: ULP word-4 in IOCB command field. |
| 1513 | * |
| 1514 | * This function is called with a list of IOCBs to cancel. It cancels the IOCB |
| 1515 | * on the list by invoking the complete callback function associated with the |
| 1516 | * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond |
| 1517 | * fields. |
| 1518 | **/ |
| 1519 | void |
| 1520 | lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, |
| 1521 | uint32_t ulpstatus, uint32_t ulpWord4) |
| 1522 | { |
| 1523 | struct lpfc_iocbq *piocb; |
| 1524 | |
| 1525 | while (!list_empty(head: iocblist)) { |
| 1526 | list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); |
| 1527 | if (piocb->cmd_cmpl) { |
| 1528 | if (piocb->cmd_flag & LPFC_IO_NVME) { |
| 1529 | lpfc_nvme_cancel_iocb(phba, pwqeIn: piocb, |
| 1530 | stat: ulpstatus, param: ulpWord4); |
| 1531 | } else { |
| 1532 | if (phba->sli_rev == LPFC_SLI_REV4) { |
| 1533 | bf_set(lpfc_wcqe_c_status, |
| 1534 | &piocb->wcqe_cmpl, ulpstatus); |
| 1535 | piocb->wcqe_cmpl.parameter = ulpWord4; |
| 1536 | } else { |
| 1537 | piocb->iocb.ulpStatus = ulpstatus; |
| 1538 | piocb->iocb.un.ulpWord[4] = ulpWord4; |
| 1539 | } |
| 1540 | (piocb->cmd_cmpl) (phba, piocb, piocb); |
| 1541 | } |
| 1542 | } else { |
| 1543 | lpfc_sli_release_iocbq(phba, iocbq: piocb); |
| 1544 | } |
| 1545 | } |
| 1546 | return; |
| 1547 | } |
| 1548 | |
| 1549 | /** |
| 1550 | * lpfc_sli_iocb_cmd_type - Get the iocb type |
| 1551 | * @iocb_cmnd: iocb command code. |
| 1552 | * |
| 1553 | * This function is called by ring event handler function to get the iocb type. |
| 1554 | * This function translates the iocb command to an iocb command type used to |
| 1555 | * decide the final disposition of each completed IOCB. |
| 1556 | * The function returns |
| 1557 | * LPFC_UNKNOWN_IOCB if it is an unsupported iocb |
| 1558 | * LPFC_SOL_IOCB if it is a solicited iocb completion |
| 1559 | * LPFC_ABORT_IOCB if it is an abort iocb |
| 1560 | * LPFC_UNSOL_IOCB if it is an unsolicited iocb |
| 1561 | * |
| 1562 | * The caller is not required to hold any lock. |
| 1563 | **/ |
| 1564 | static lpfc_iocb_type |
| 1565 | lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) |
| 1566 | { |
| 1567 | lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; |
| 1568 | |
| 1569 | if (iocb_cmnd > CMD_MAX_IOCB_CMD) |
| 1570 | return 0; |
| 1571 | |
| 1572 | switch (iocb_cmnd) { |
| 1573 | case CMD_XMIT_SEQUENCE_CR: |
| 1574 | case CMD_XMIT_SEQUENCE_CX: |
| 1575 | case CMD_XMIT_BCAST_CN: |
| 1576 | case CMD_XMIT_BCAST_CX: |
| 1577 | case CMD_ELS_REQUEST_CR: |
| 1578 | case CMD_ELS_REQUEST_CX: |
| 1579 | case CMD_CREATE_XRI_CR: |
| 1580 | case CMD_CREATE_XRI_CX: |
| 1581 | case CMD_GET_RPI_CN: |
| 1582 | case CMD_XMIT_ELS_RSP_CX: |
| 1583 | case CMD_GET_RPI_CR: |
| 1584 | case CMD_FCP_IWRITE_CR: |
| 1585 | case CMD_FCP_IWRITE_CX: |
| 1586 | case CMD_FCP_IREAD_CR: |
| 1587 | case CMD_FCP_IREAD_CX: |
| 1588 | case CMD_FCP_ICMND_CR: |
| 1589 | case CMD_FCP_ICMND_CX: |
| 1590 | case CMD_FCP_TSEND_CX: |
| 1591 | case CMD_FCP_TRSP_CX: |
| 1592 | case CMD_FCP_TRECEIVE_CX: |
| 1593 | case CMD_FCP_AUTO_TRSP_CX: |
| 1594 | case CMD_ADAPTER_MSG: |
| 1595 | case CMD_ADAPTER_DUMP: |
| 1596 | case CMD_XMIT_SEQUENCE64_CR: |
| 1597 | case CMD_XMIT_SEQUENCE64_CX: |
| 1598 | case CMD_XMIT_BCAST64_CN: |
| 1599 | case CMD_XMIT_BCAST64_CX: |
| 1600 | case CMD_ELS_REQUEST64_CR: |
| 1601 | case CMD_ELS_REQUEST64_CX: |
| 1602 | case CMD_FCP_IWRITE64_CR: |
| 1603 | case CMD_FCP_IWRITE64_CX: |
| 1604 | case CMD_FCP_IREAD64_CR: |
| 1605 | case CMD_FCP_IREAD64_CX: |
| 1606 | case CMD_FCP_ICMND64_CR: |
| 1607 | case CMD_FCP_ICMND64_CX: |
| 1608 | case CMD_FCP_TSEND64_CX: |
| 1609 | case CMD_FCP_TRSP64_CX: |
| 1610 | case CMD_FCP_TRECEIVE64_CX: |
| 1611 | case CMD_GEN_REQUEST64_CR: |
| 1612 | case CMD_GEN_REQUEST64_CX: |
| 1613 | case CMD_XMIT_ELS_RSP64_CX: |
| 1614 | case DSSCMD_IWRITE64_CR: |
| 1615 | case DSSCMD_IWRITE64_CX: |
| 1616 | case DSSCMD_IREAD64_CR: |
| 1617 | case DSSCMD_IREAD64_CX: |
| 1618 | case CMD_SEND_FRAME: |
| 1619 | type = LPFC_SOL_IOCB; |
| 1620 | break; |
| 1621 | case CMD_ABORT_XRI_CN: |
| 1622 | case CMD_ABORT_XRI_CX: |
| 1623 | case CMD_CLOSE_XRI_CN: |
| 1624 | case CMD_CLOSE_XRI_CX: |
| 1625 | case CMD_XRI_ABORTED_CX: |
| 1626 | case CMD_ABORT_MXRI64_CN: |
| 1627 | case CMD_XMIT_BLS_RSP64_CX: |
| 1628 | type = LPFC_ABORT_IOCB; |
| 1629 | break; |
| 1630 | case CMD_RCV_SEQUENCE_CX: |
| 1631 | case CMD_RCV_ELS_REQ_CX: |
| 1632 | case CMD_RCV_SEQUENCE64_CX: |
| 1633 | case CMD_RCV_ELS_REQ64_CX: |
| 1634 | case CMD_ASYNC_STATUS: |
| 1635 | case CMD_IOCB_RCV_SEQ64_CX: |
| 1636 | case CMD_IOCB_RCV_ELS64_CX: |
| 1637 | case CMD_IOCB_RCV_CONT64_CX: |
| 1638 | case CMD_IOCB_RET_XRI64_CX: |
| 1639 | type = LPFC_UNSOL_IOCB; |
| 1640 | break; |
| 1641 | case CMD_IOCB_XMIT_MSEQ64_CR: |
| 1642 | case CMD_IOCB_XMIT_MSEQ64_CX: |
| 1643 | case CMD_IOCB_RCV_SEQ_LIST64_CX: |
| 1644 | case CMD_IOCB_RCV_ELS_LIST64_CX: |
| 1645 | case CMD_IOCB_CLOSE_EXTENDED_CN: |
| 1646 | case CMD_IOCB_ABORT_EXTENDED_CN: |
| 1647 | case CMD_IOCB_RET_HBQE64_CN: |
| 1648 | case CMD_IOCB_FCP_IBIDIR64_CR: |
| 1649 | case CMD_IOCB_FCP_IBIDIR64_CX: |
| 1650 | case CMD_IOCB_FCP_ITASKMGT64_CX: |
| 1651 | case CMD_IOCB_LOGENTRY_CN: |
| 1652 | case CMD_IOCB_LOGENTRY_ASYNC_CN: |
| 1653 | printk("%s - Unhandled SLI-3 Command x%x\n" , |
| 1654 | __func__, iocb_cmnd); |
| 1655 | type = LPFC_UNKNOWN_IOCB; |
| 1656 | break; |
| 1657 | default: |
| 1658 | type = LPFC_UNKNOWN_IOCB; |
| 1659 | break; |
| 1660 | } |
| 1661 | |
| 1662 | return type; |
| 1663 | } |
| 1664 | |
| 1665 | /** |
| 1666 | * lpfc_sli_ring_map - Issue config_ring mbox for all rings |
| 1667 | * @phba: Pointer to HBA context object. |
| 1668 | * |
| 1669 | * This function is called from SLI initialization code |
| 1670 | * to configure every ring of the HBA's SLI interface. The |
| 1671 | * caller is not required to hold any lock. This function issues |
| 1672 | * a config_ring mailbox command for each ring. |
| 1673 | * This function returns zero if successful else returns a negative |
| 1674 | * error code. |
| 1675 | **/ |
| 1676 | static int |
| 1677 | lpfc_sli_ring_map(struct lpfc_hba *phba) |
| 1678 | { |
| 1679 | struct lpfc_sli *psli = &phba->sli; |
| 1680 | LPFC_MBOXQ_t *pmb; |
| 1681 | MAILBOX_t *pmbox; |
| 1682 | int i, rc, ret = 0; |
| 1683 | |
| 1684 | pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 1685 | if (!pmb) |
| 1686 | return -ENOMEM; |
| 1687 | pmbox = &pmb->u.mb; |
| 1688 | phba->link_state = LPFC_INIT_MBX_CMDS; |
| 1689 | for (i = 0; i < psli->num_rings; i++) { |
| 1690 | lpfc_config_ring(phba, i, pmb); |
| 1691 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); |
| 1692 | if (rc != MBX_SUCCESS) { |
| 1693 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 1694 | "0446 Adapter failed to init (%d), " |
| 1695 | "mbxCmd x%x CFG_RING, mbxStatus x%x, " |
| 1696 | "ring %d\n" , |
| 1697 | rc, pmbox->mbxCommand, |
| 1698 | pmbox->mbxStatus, i); |
| 1699 | phba->link_state = LPFC_HBA_ERROR; |
| 1700 | ret = -ENXIO; |
| 1701 | break; |
| 1702 | } |
| 1703 | } |
| 1704 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
| 1705 | return ret; |
| 1706 | } |
| 1707 | |
| 1708 | /** |
| 1709 | * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq |
| 1710 | * @phba: Pointer to HBA context object. |
| 1711 | * @pring: Pointer to driver SLI ring object. |
| 1712 | * @piocb: Pointer to the driver iocb object. |
| 1713 | * |
| 1714 | * The driver calls this function with the hbalock held for SLI3 ports or |
| 1715 | * the ring lock held for SLI4 ports. The function adds the |
| 1716 | * new iocb to txcmplq of the given ring. This function always returns |
| 1717 | * 0. If this function is called for ELS ring, this function checks if |
| 1718 | * there is a vport associated with the ELS command. This function also |
| 1719 | * starts els_tmofunc timer if this is an ELS command. |
| 1720 | **/ |
| 1721 | static int |
| 1722 | lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 1723 | struct lpfc_iocbq *piocb) |
| 1724 | { |
| 1725 | u32 ulp_command = 0; |
| 1726 | |
| 1727 | BUG_ON(!piocb); |
| 1728 | ulp_command = get_job_cmnd(phba, iocbq: piocb); |
| 1729 | |
| 1730 | list_add_tail(new: &piocb->list, head: &pring->txcmplq); |
| 1731 | piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ; |
| 1732 | pring->txcmplq_cnt++; |
| 1733 | if ((unlikely(pring->ringno == LPFC_ELS_RING)) && |
| 1734 | (ulp_command != CMD_ABORT_XRI_WQE) && |
| 1735 | (ulp_command != CMD_ABORT_XRI_CN) && |
| 1736 | (ulp_command != CMD_CLOSE_XRI_CN)) { |
| 1737 | BUG_ON(!piocb->vport); |
| 1738 | if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag)) |
| 1739 | mod_timer(timer: &piocb->vport->els_tmofunc, |
| 1740 | expires: jiffies + secs_to_jiffies(phba->fc_ratov << 1)); |
| 1741 | } |
| 1742 | |
| 1743 | return 0; |
| 1744 | } |
| 1745 | |
| 1746 | /** |
| 1747 | * lpfc_sli_ringtx_get - Get first element of the txq |
| 1748 | * @phba: Pointer to HBA context object. |
| 1749 | * @pring: Pointer to driver SLI ring object. |
| 1750 | * |
| 1751 | * This function is called with hbalock held to get next |
| 1752 | * iocb in txq of the given ring. If there is any iocb in |
| 1753 | * the txq, the function returns first iocb in the list after |
| 1754 | * removing the iocb from the list, else it returns NULL. |
| 1755 | **/ |
| 1756 | struct lpfc_iocbq * |
| 1757 | lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
| 1758 | { |
| 1759 | struct lpfc_iocbq *cmd_iocb; |
| 1760 | |
| 1761 | lockdep_assert_held(&phba->hbalock); |
| 1762 | |
| 1763 | list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); |
| 1764 | return cmd_iocb; |
| 1765 | } |
| 1766 | |
| 1767 | /** |
| 1768 | * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl |
| 1769 | * @phba: Pointer to HBA context object. |
| 1770 | * @cmdiocb: Pointer to driver command iocb object. |
| 1771 | * @rspiocb: Pointer to driver response iocb object. |
| 1772 | * |
| 1773 | * This routine will inform the driver of any BW adjustments we need |
| 1774 | * to make. These changes will be picked up during the next CMF |
| 1775 | * timer interrupt. In addition, any BW changes will be logged |
| 1776 | * with LOG_CGN_MGMT. |
| 1777 | **/ |
| 1778 | static void |
| 1779 | lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
| 1780 | struct lpfc_iocbq *rspiocb) |
| 1781 | { |
| 1782 | union lpfc_wqe128 *wqe; |
| 1783 | uint32_t status, info; |
| 1784 | struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl; |
| 1785 | uint64_t bw, bwdif, slop; |
| 1786 | uint64_t pcent, bwpcent; |
| 1787 | int asig, afpin, sigcnt, fpincnt; |
| 1788 | int wsigmax, wfpinmax, cg, tdp; |
| 1789 | char *s; |
| 1790 | |
| 1791 | /* First check for error */ |
| 1792 | status = bf_get(lpfc_wcqe_c_status, wcqe); |
| 1793 | if (status) { |
| 1794 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 1795 | "6211 CMF_SYNC_WQE Error " |
| 1796 | "req_tag x%x status x%x hwstatus x%x " |
| 1797 | "tdatap x%x parm x%x\n" , |
| 1798 | bf_get(lpfc_wcqe_c_request_tag, wcqe), |
| 1799 | bf_get(lpfc_wcqe_c_status, wcqe), |
| 1800 | bf_get(lpfc_wcqe_c_hw_status, wcqe), |
| 1801 | wcqe->total_data_placed, |
| 1802 | wcqe->parameter); |
| 1803 | goto out; |
| 1804 | } |
| 1805 | |
| 1806 | /* Gather congestion information on a successful cmpl */ |
| 1807 | info = wcqe->parameter; |
| 1808 | phba->cmf_active_info = info; |
| 1809 | |
| 1810 | /* See if firmware info count is valid or has changed */ |
| 1811 | if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info) |
| 1812 | info = 0; |
| 1813 | else |
| 1814 | phba->cmf_info_per_interval = info; |
| 1815 | |
| 1816 | tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe); |
| 1817 | cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe); |
| 1818 | |
| 1819 | /* Get BW requirement from firmware */ |
| 1820 | bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE; |
| 1821 | if (!bw) { |
| 1822 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 1823 | "6212 CMF_SYNC_WQE x%x: NULL bw\n" , |
| 1824 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); |
| 1825 | goto out; |
| 1826 | } |
| 1827 | |
| 1828 | /* Gather information needed for logging if a BW change is required */ |
| 1829 | wqe = &cmdiocb->wqe; |
| 1830 | asig = bf_get(cmf_sync_asig, &wqe->cmf_sync); |
| 1831 | afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync); |
| 1832 | fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync); |
| 1833 | sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync); |
| 1834 | if (phba->cmf_max_bytes_per_interval != bw || |
| 1835 | (asig || afpin || sigcnt || fpincnt)) { |
| 1836 | /* Are we increasing or decreasing BW */ |
| 1837 | if (phba->cmf_max_bytes_per_interval < bw) { |
| 1838 | bwdif = bw - phba->cmf_max_bytes_per_interval; |
| 1839 | s = "Increase" ; |
| 1840 | } else { |
| 1841 | bwdif = phba->cmf_max_bytes_per_interval - bw; |
| 1842 | s = "Decrease" ; |
| 1843 | } |
| 1844 | |
| 1845 | /* What is the change percentage */ |
| 1846 | slop = div_u64(dividend: phba->cmf_link_byte_count, divisor: 200); /*For rounding*/ |
| 1847 | pcent = div64_u64(dividend: bwdif * 100 + slop, |
| 1848 | divisor: phba->cmf_link_byte_count); |
| 1849 | bwpcent = div64_u64(dividend: bw * 100 + slop, |
| 1850 | divisor: phba->cmf_link_byte_count); |
| 1851 | /* Because of bytes adjustment due to shorter timer in |
| 1852 | * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and |
| 1853 | * may seem like BW is above 100%. |
| 1854 | */ |
| 1855 | if (bwpcent > 100) |
| 1856 | bwpcent = 100; |
| 1857 | |
| 1858 | if (phba->cmf_max_bytes_per_interval < bw && |
| 1859 | bwpcent > 95) |
| 1860 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 1861 | "6208 Congestion bandwidth " |
| 1862 | "limits removed\n" ); |
| 1863 | else if ((phba->cmf_max_bytes_per_interval > bw) && |
| 1864 | ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95)) |
| 1865 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 1866 | "6209 Congestion bandwidth " |
| 1867 | "limits in effect\n" ); |
| 1868 | |
| 1869 | if (asig) { |
| 1870 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 1871 | "6237 BW Threshold %lld%% (%lld): " |
| 1872 | "%lld%% %s: Signal Alarm: cg:%d " |
| 1873 | "Info:%u\n" , |
| 1874 | bwpcent, bw, pcent, s, cg, |
| 1875 | phba->cmf_active_info); |
| 1876 | } else if (afpin) { |
| 1877 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 1878 | "6238 BW Threshold %lld%% (%lld): " |
| 1879 | "%lld%% %s: FPIN Alarm: cg:%d " |
| 1880 | "Info:%u\n" , |
| 1881 | bwpcent, bw, pcent, s, cg, |
| 1882 | phba->cmf_active_info); |
| 1883 | } else if (sigcnt) { |
| 1884 | wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync); |
| 1885 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 1886 | "6239 BW Threshold %lld%% (%lld): " |
| 1887 | "%lld%% %s: Signal Warning: " |
| 1888 | "Cnt %d Max %d: cg:%d Info:%u\n" , |
| 1889 | bwpcent, bw, pcent, s, sigcnt, |
| 1890 | wsigmax, cg, phba->cmf_active_info); |
| 1891 | } else if (fpincnt) { |
| 1892 | wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync); |
| 1893 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 1894 | "6240 BW Threshold %lld%% (%lld): " |
| 1895 | "%lld%% %s: FPIN Warning: " |
| 1896 | "Cnt %d Max %d: cg:%d Info:%u\n" , |
| 1897 | bwpcent, bw, pcent, s, fpincnt, |
| 1898 | wfpinmax, cg, phba->cmf_active_info); |
| 1899 | } else { |
| 1900 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 1901 | "6241 BW Threshold %lld%% (%lld): " |
| 1902 | "CMF %lld%% %s: cg:%d Info:%u\n" , |
| 1903 | bwpcent, bw, pcent, s, cg, |
| 1904 | phba->cmf_active_info); |
| 1905 | } |
| 1906 | } else if (info) { |
| 1907 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 1908 | "6246 Info Threshold %u\n" , info); |
| 1909 | } |
| 1910 | |
| 1911 | /* Save BW change to be picked up during next timer interrupt */ |
| 1912 | phba->cmf_last_sync_bw = bw; |
| 1913 | out: |
| 1914 | lpfc_sli_release_iocbq(phba, iocbq: cmdiocb); |
| 1915 | } |
| 1916 | |
| 1917 | /** |
| 1918 | * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE |
| 1919 | * @phba: Pointer to HBA context object. |
| 1920 | * @ms: ms to set in WQE interval, 0 means use init op |
| 1921 | * @total: Total rcv bytes for this interval |
| 1922 | * |
| 1923 | * This routine is called every CMF timer interrupt. Its purpose is |
| 1924 | * to issue a CMF_SYNC_WQE to the firmware to inform it of any events |
| 1925 | * that may indicate we have congestion (FPINs or Signals). Upon |
| 1926 | * completion, the firmware will indicate any BW restrictions the |
| 1927 | * driver may need to take. |
| 1928 | **/ |
| 1929 | int |
| 1930 | lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) |
| 1931 | { |
| 1932 | union lpfc_wqe128 *wqe; |
| 1933 | struct lpfc_iocbq *sync_buf; |
| 1934 | unsigned long iflags; |
| 1935 | u32 ret_val, cgn_sig_freq; |
| 1936 | u32 atot, wtot, max; |
| 1937 | u8 warn_sync_period = 0; |
| 1938 | |
| 1939 | /* First address any alarm / warning activity */ |
| 1940 | atot = atomic_xchg(v: &phba->cgn_sync_alarm_cnt, new: 0); |
| 1941 | wtot = atomic_xchg(v: &phba->cgn_sync_warn_cnt, new: 0); |
| 1942 | |
| 1943 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 1944 | |
| 1945 | /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */ |
| 1946 | if (phba->cmf_active_mode != LPFC_CFG_MANAGED || |
| 1947 | phba->link_state < LPFC_LINK_UP) { |
| 1948 | ret_val = 0; |
| 1949 | goto out_unlock; |
| 1950 | } |
| 1951 | |
| 1952 | sync_buf = __lpfc_sli_get_iocbq(phba); |
| 1953 | if (!sync_buf) { |
| 1954 | lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, |
| 1955 | "6244 No available WQEs for CMF_SYNC_WQE\n" ); |
| 1956 | ret_val = ENOMEM; |
| 1957 | goto out_unlock; |
| 1958 | } |
| 1959 | |
| 1960 | wqe = &sync_buf->wqe; |
| 1961 | |
| 1962 | /* WQEs are reused. Clear stale data and set key fields to zero */ |
| 1963 | memset(wqe, 0, sizeof(*wqe)); |
| 1964 | |
| 1965 | /* If this is the very first CMF_SYNC_WQE, issue an init operation */ |
| 1966 | if (!ms) { |
| 1967 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 1968 | "6441 CMF Init %d - CMF_SYNC_WQE\n" , |
| 1969 | phba->fc_eventTag); |
| 1970 | bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */ |
| 1971 | bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL); |
| 1972 | goto initpath; |
| 1973 | } |
| 1974 | |
| 1975 | bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */ |
| 1976 | bf_set(cmf_sync_interval, &wqe->cmf_sync, ms); |
| 1977 | |
| 1978 | /* Check for alarms / warnings */ |
| 1979 | if (atot) { |
| 1980 | if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { |
| 1981 | /* We hit an Signal alarm condition */ |
| 1982 | bf_set(cmf_sync_asig, &wqe->cmf_sync, 1); |
| 1983 | } else { |
| 1984 | /* We hit a FPIN alarm condition */ |
| 1985 | bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1); |
| 1986 | } |
| 1987 | } else if (wtot) { |
| 1988 | if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || |
| 1989 | phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { |
| 1990 | cgn_sig_freq = phba->cgn_sig_freq ? phba->cgn_sig_freq : |
| 1991 | lpfc_fabric_cgn_frequency; |
| 1992 | /* We hit an Signal warning condition */ |
| 1993 | max = LPFC_SEC_TO_MSEC / cgn_sig_freq * |
| 1994 | lpfc_acqe_cgn_frequency; |
| 1995 | bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max); |
| 1996 | bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot); |
| 1997 | warn_sync_period = lpfc_acqe_cgn_frequency; |
| 1998 | } else { |
| 1999 | /* We hit a FPIN warning condition */ |
| 2000 | bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1); |
| 2001 | bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1); |
| 2002 | if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) |
| 2003 | warn_sync_period = |
| 2004 | LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency); |
| 2005 | } |
| 2006 | } |
| 2007 | |
| 2008 | /* Update total read blocks during previous timer interval */ |
| 2009 | wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE); |
| 2010 | |
| 2011 | initpath: |
| 2012 | bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER); |
| 2013 | wqe->cmf_sync.event_tag = phba->fc_eventTag; |
| 2014 | bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE); |
| 2015 | |
| 2016 | /* Setup reqtag to match the wqe completion. */ |
| 2017 | bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag); |
| 2018 | |
| 2019 | bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1); |
| 2020 | bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period); |
| 2021 | |
| 2022 | bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND); |
| 2023 | bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1); |
| 2024 | bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT); |
| 2025 | |
| 2026 | sync_buf->vport = phba->pport; |
| 2027 | sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl; |
| 2028 | sync_buf->cmd_dmabuf = NULL; |
| 2029 | sync_buf->rsp_dmabuf = NULL; |
| 2030 | sync_buf->bpl_dmabuf = NULL; |
| 2031 | sync_buf->sli4_xritag = NO_XRI; |
| 2032 | |
| 2033 | sync_buf->cmd_flag |= LPFC_IO_CMF; |
| 2034 | ret_val = lpfc_sli4_issue_wqe(phba, qp: &phba->sli4_hba.hdwq[0], pwqe: sync_buf); |
| 2035 | if (ret_val) { |
| 2036 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 2037 | "6214 Cannot issue CMF_SYNC_WQE: x%x\n" , |
| 2038 | ret_val); |
| 2039 | __lpfc_sli_release_iocbq(phba, iocbq: sync_buf); |
| 2040 | } |
| 2041 | out_unlock: |
| 2042 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 2043 | return ret_val; |
| 2044 | } |
| 2045 | |
| 2046 | /** |
| 2047 | * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring |
| 2048 | * @phba: Pointer to HBA context object. |
| 2049 | * @pring: Pointer to driver SLI ring object. |
| 2050 | * |
| 2051 | * This function is called with hbalock held and the caller must post the |
| 2052 | * iocb without releasing the lock. If the caller releases the lock, |
| 2053 | * iocb slot returned by the function is not guaranteed to be available. |
| 2054 | * The function returns pointer to the next available iocb slot if there |
| 2055 | * is available slot in the ring, else it returns NULL. |
| 2056 | * If the get index of the ring is ahead of the put index, the function |
| 2057 | * will post an error attention event to the worker thread to take the |
| 2058 | * HBA to offline state. |
| 2059 | **/ |
| 2060 | static IOCB_t * |
| 2061 | lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
| 2062 | { |
| 2063 | struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; |
| 2064 | uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; |
| 2065 | |
| 2066 | lockdep_assert_held(&phba->hbalock); |
| 2067 | |
| 2068 | if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && |
| 2069 | (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) |
| 2070 | pring->sli.sli3.next_cmdidx = 0; |
| 2071 | |
| 2072 | if (unlikely(pring->sli.sli3.local_getidx == |
| 2073 | pring->sli.sli3.next_cmdidx)) { |
| 2074 | |
| 2075 | pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); |
| 2076 | |
| 2077 | if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { |
| 2078 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 2079 | "0315 Ring %d issue: portCmdGet %d " |
| 2080 | "is bigger than cmd ring %d\n" , |
| 2081 | pring->ringno, |
| 2082 | pring->sli.sli3.local_getidx, |
| 2083 | max_cmd_idx); |
| 2084 | |
| 2085 | phba->link_state = LPFC_HBA_ERROR; |
| 2086 | /* |
| 2087 | * All error attention handlers are posted to |
| 2088 | * worker thread |
| 2089 | */ |
| 2090 | phba->work_ha |= HA_ERATT; |
| 2091 | phba->work_hs = HS_FFER3; |
| 2092 | |
| 2093 | lpfc_worker_wake_up(phba); |
| 2094 | |
| 2095 | return NULL; |
| 2096 | } |
| 2097 | |
| 2098 | if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) |
| 2099 | return NULL; |
| 2100 | } |
| 2101 | |
| 2102 | return lpfc_cmd_iocb(phba, pring); |
| 2103 | } |
| 2104 | |
| 2105 | /** |
| 2106 | * lpfc_sli_next_iotag - Get an iotag for the iocb |
| 2107 | * @phba: Pointer to HBA context object. |
| 2108 | * @iocbq: Pointer to driver iocb object. |
| 2109 | * |
| 2110 | * This function gets an iotag for the iocb. If there is no unused iotag and |
| 2111 | * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup |
| 2112 | * array and assigns a new iotag. |
| 2113 | * The function returns the allocated iotag if successful, else returns zero. |
| 2114 | * Zero is not a valid iotag. |
| 2115 | * The caller is not required to hold any lock. |
| 2116 | **/ |
| 2117 | uint16_t |
| 2118 | lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) |
| 2119 | { |
| 2120 | struct lpfc_iocbq **new_arr; |
| 2121 | struct lpfc_iocbq **old_arr; |
| 2122 | size_t new_len; |
| 2123 | struct lpfc_sli *psli = &phba->sli; |
| 2124 | uint16_t iotag; |
| 2125 | |
| 2126 | spin_lock_irq(lock: &phba->hbalock); |
| 2127 | iotag = psli->last_iotag; |
| 2128 | if(++iotag < psli->iocbq_lookup_len) { |
| 2129 | psli->last_iotag = iotag; |
| 2130 | psli->iocbq_lookup[iotag] = iocbq; |
| 2131 | spin_unlock_irq(lock: &phba->hbalock); |
| 2132 | iocbq->iotag = iotag; |
| 2133 | return iotag; |
| 2134 | } else if (psli->iocbq_lookup_len < (0xffff |
| 2135 | - LPFC_IOCBQ_LOOKUP_INCREMENT)) { |
| 2136 | new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; |
| 2137 | spin_unlock_irq(lock: &phba->hbalock); |
| 2138 | new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *), |
| 2139 | GFP_KERNEL); |
| 2140 | if (new_arr) { |
| 2141 | spin_lock_irq(lock: &phba->hbalock); |
| 2142 | old_arr = psli->iocbq_lookup; |
| 2143 | if (new_len <= psli->iocbq_lookup_len) { |
| 2144 | /* highly unprobable case */ |
| 2145 | kfree(objp: new_arr); |
| 2146 | iotag = psli->last_iotag; |
| 2147 | if(++iotag < psli->iocbq_lookup_len) { |
| 2148 | psli->last_iotag = iotag; |
| 2149 | psli->iocbq_lookup[iotag] = iocbq; |
| 2150 | spin_unlock_irq(lock: &phba->hbalock); |
| 2151 | iocbq->iotag = iotag; |
| 2152 | return iotag; |
| 2153 | } |
| 2154 | spin_unlock_irq(lock: &phba->hbalock); |
| 2155 | return 0; |
| 2156 | } |
| 2157 | if (psli->iocbq_lookup) |
| 2158 | memcpy(new_arr, old_arr, |
| 2159 | ((psli->last_iotag + 1) * |
| 2160 | sizeof (struct lpfc_iocbq *))); |
| 2161 | psli->iocbq_lookup = new_arr; |
| 2162 | psli->iocbq_lookup_len = new_len; |
| 2163 | psli->last_iotag = iotag; |
| 2164 | psli->iocbq_lookup[iotag] = iocbq; |
| 2165 | spin_unlock_irq(lock: &phba->hbalock); |
| 2166 | iocbq->iotag = iotag; |
| 2167 | kfree(objp: old_arr); |
| 2168 | return iotag; |
| 2169 | } |
| 2170 | } else |
| 2171 | spin_unlock_irq(lock: &phba->hbalock); |
| 2172 | |
| 2173 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 2174 | "0318 Failed to allocate IOTAG.last IOTAG is %d\n" , |
| 2175 | psli->last_iotag); |
| 2176 | |
| 2177 | return 0; |
| 2178 | } |
| 2179 | |
| 2180 | /** |
| 2181 | * lpfc_sli_submit_iocb - Submit an iocb to the firmware |
| 2182 | * @phba: Pointer to HBA context object. |
| 2183 | * @pring: Pointer to driver SLI ring object. |
| 2184 | * @iocb: Pointer to iocb slot in the ring. |
| 2185 | * @nextiocb: Pointer to driver iocb object which need to be |
| 2186 | * posted to firmware. |
| 2187 | * |
| 2188 | * This function is called to post a new iocb to the firmware. This |
| 2189 | * function copies the new iocb to ring iocb slot and updates the |
| 2190 | * ring pointers. It adds the new iocb to txcmplq if there is |
| 2191 | * a completion call back for this iocb else the function will free the |
| 2192 | * iocb object. The hbalock is asserted held in the code path calling |
| 2193 | * this routine. |
| 2194 | **/ |
| 2195 | static void |
| 2196 | lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 2197 | IOCB_t *iocb, struct lpfc_iocbq *nextiocb) |
| 2198 | { |
| 2199 | /* |
| 2200 | * Set up an iotag |
| 2201 | */ |
| 2202 | nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0; |
| 2203 | |
| 2204 | |
| 2205 | if (pring->ringno == LPFC_ELS_RING) { |
| 2206 | lpfc_debugfs_slow_ring_trc(phba, |
| 2207 | "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x" , |
| 2208 | *(((uint32_t *) &nextiocb->iocb) + 4), |
| 2209 | *(((uint32_t *) &nextiocb->iocb) + 6), |
| 2210 | *(((uint32_t *) &nextiocb->iocb) + 7)); |
| 2211 | } |
| 2212 | |
| 2213 | /* |
| 2214 | * Issue iocb command to adapter |
| 2215 | */ |
| 2216 | lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); |
| 2217 | wmb(); |
| 2218 | pring->stats.iocb_cmd++; |
| 2219 | |
| 2220 | /* |
| 2221 | * If there is no completion routine to call, we can release the |
| 2222 | * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, |
| 2223 | * that have no rsp ring completion, cmd_cmpl MUST be NULL. |
| 2224 | */ |
| 2225 | if (nextiocb->cmd_cmpl) |
| 2226 | lpfc_sli_ringtxcmpl_put(phba, pring, piocb: nextiocb); |
| 2227 | else |
| 2228 | __lpfc_sli_release_iocbq(phba, iocbq: nextiocb); |
| 2229 | |
| 2230 | /* |
| 2231 | * Let the HBA know what IOCB slot will be the next one the |
| 2232 | * driver will put a command into. |
| 2233 | */ |
| 2234 | pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; |
| 2235 | writel(val: pring->sli.sli3.cmdidx, addr: &phba->host_gp[pring->ringno].cmdPutInx); |
| 2236 | } |
| 2237 | |
| 2238 | /** |
| 2239 | * lpfc_sli_update_full_ring - Update the chip attention register |
| 2240 | * @phba: Pointer to HBA context object. |
| 2241 | * @pring: Pointer to driver SLI ring object. |
| 2242 | * |
| 2243 | * The caller is not required to hold any lock for calling this function. |
| 2244 | * This function updates the chip attention bits for the ring to inform firmware |
| 2245 | * that there are pending work to be done for this ring and requests an |
| 2246 | * interrupt when there is space available in the ring. This function is |
| 2247 | * called when the driver is unable to post more iocbs to the ring due |
| 2248 | * to unavailability of space in the ring. |
| 2249 | **/ |
| 2250 | static void |
| 2251 | lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
| 2252 | { |
| 2253 | int ringno = pring->ringno; |
| 2254 | |
| 2255 | pring->flag |= LPFC_CALL_RING_AVAILABLE; |
| 2256 | |
| 2257 | wmb(); |
| 2258 | |
| 2259 | /* |
| 2260 | * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. |
| 2261 | * The HBA will tell us when an IOCB entry is available. |
| 2262 | */ |
| 2263 | writel(val: (CA_R0ATT|CA_R0CE_REQ) << (ringno*4), addr: phba->CAregaddr); |
| 2264 | readl(addr: phba->CAregaddr); /* flush */ |
| 2265 | |
| 2266 | pring->stats.iocb_cmd_full++; |
| 2267 | } |
| 2268 | |
| 2269 | /** |
| 2270 | * lpfc_sli_update_ring - Update chip attention register |
| 2271 | * @phba: Pointer to HBA context object. |
| 2272 | * @pring: Pointer to driver SLI ring object. |
| 2273 | * |
| 2274 | * This function updates the chip attention register bit for the |
| 2275 | * given ring to inform HBA that there is more work to be done |
| 2276 | * in this ring. The caller is not required to hold any lock. |
| 2277 | **/ |
| 2278 | static void |
| 2279 | lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
| 2280 | { |
| 2281 | int ringno = pring->ringno; |
| 2282 | |
| 2283 | /* |
| 2284 | * Tell the HBA that there is work to do in this ring. |
| 2285 | */ |
| 2286 | if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { |
| 2287 | wmb(); |
| 2288 | writel(CA_R0ATT << (ringno * 4), addr: phba->CAregaddr); |
| 2289 | readl(addr: phba->CAregaddr); /* flush */ |
| 2290 | } |
| 2291 | } |
| 2292 | |
| 2293 | /** |
| 2294 | * lpfc_sli_resume_iocb - Process iocbs in the txq |
| 2295 | * @phba: Pointer to HBA context object. |
| 2296 | * @pring: Pointer to driver SLI ring object. |
| 2297 | * |
| 2298 | * This function is called with hbalock held to post pending iocbs |
| 2299 | * in the txq to the firmware. This function is called when driver |
| 2300 | * detects space available in the ring. |
| 2301 | **/ |
| 2302 | static void |
| 2303 | lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
| 2304 | { |
| 2305 | IOCB_t *iocb; |
| 2306 | struct lpfc_iocbq *nextiocb; |
| 2307 | |
| 2308 | lockdep_assert_held(&phba->hbalock); |
| 2309 | |
| 2310 | /* |
| 2311 | * Check to see if: |
| 2312 | * (a) there is anything on the txq to send |
| 2313 | * (b) link is up |
| 2314 | * (c) link attention events can be processed (fcp ring only) |
| 2315 | * (d) IOCB processing is not blocked by the outstanding mbox command. |
| 2316 | */ |
| 2317 | |
| 2318 | if (lpfc_is_link_up(phba) && |
| 2319 | (!list_empty(head: &pring->txq)) && |
| 2320 | (pring->ringno != LPFC_FCP_RING || |
| 2321 | phba->sli.sli_flag & LPFC_PROCESS_LA)) { |
| 2322 | |
| 2323 | while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && |
| 2324 | (nextiocb = lpfc_sli_ringtx_get(phba, pring))) |
| 2325 | lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); |
| 2326 | |
| 2327 | if (iocb) |
| 2328 | lpfc_sli_update_ring(phba, pring); |
| 2329 | else |
| 2330 | lpfc_sli_update_full_ring(phba, pring); |
| 2331 | } |
| 2332 | |
| 2333 | return; |
| 2334 | } |
| 2335 | |
| 2336 | /** |
| 2337 | * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ |
| 2338 | * @phba: Pointer to HBA context object. |
| 2339 | * @hbqno: HBQ number. |
| 2340 | * |
| 2341 | * This function is called with hbalock held to get the next |
| 2342 | * available slot for the given HBQ. If there is free slot |
| 2343 | * available for the HBQ it will return pointer to the next available |
| 2344 | * HBQ entry else it will return NULL. |
| 2345 | **/ |
| 2346 | static struct lpfc_hbq_entry * |
| 2347 | lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) |
| 2348 | { |
| 2349 | struct hbq_s *hbqp = &phba->hbqs[hbqno]; |
| 2350 | |
| 2351 | lockdep_assert_held(&phba->hbalock); |
| 2352 | |
| 2353 | if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && |
| 2354 | ++hbqp->next_hbqPutIdx >= hbqp->entry_count) |
| 2355 | hbqp->next_hbqPutIdx = 0; |
| 2356 | |
| 2357 | if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { |
| 2358 | uint32_t raw_index = phba->hbq_get[hbqno]; |
| 2359 | uint32_t getidx = le32_to_cpu(raw_index); |
| 2360 | |
| 2361 | hbqp->local_hbqGetIdx = getidx; |
| 2362 | |
| 2363 | if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { |
| 2364 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 2365 | "1802 HBQ %d: local_hbqGetIdx " |
| 2366 | "%u is > than hbqp->entry_count %u\n" , |
| 2367 | hbqno, hbqp->local_hbqGetIdx, |
| 2368 | hbqp->entry_count); |
| 2369 | |
| 2370 | phba->link_state = LPFC_HBA_ERROR; |
| 2371 | return NULL; |
| 2372 | } |
| 2373 | |
| 2374 | if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) |
| 2375 | return NULL; |
| 2376 | } |
| 2377 | |
| 2378 | return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + |
| 2379 | hbqp->hbqPutIdx; |
| 2380 | } |
| 2381 | |
| 2382 | /** |
| 2383 | * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers |
| 2384 | * @phba: Pointer to HBA context object. |
| 2385 | * |
| 2386 | * This function is called with no lock held to free all the |
| 2387 | * hbq buffers while uninitializing the SLI interface. It also |
| 2388 | * frees the HBQ buffers returned by the firmware but not yet |
| 2389 | * processed by the upper layers. |
| 2390 | **/ |
| 2391 | void |
| 2392 | lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) |
| 2393 | { |
| 2394 | struct lpfc_dmabuf *dmabuf, *next_dmabuf; |
| 2395 | struct hbq_dmabuf *hbq_buf; |
| 2396 | unsigned long flags; |
| 2397 | int i, hbq_count; |
| 2398 | |
| 2399 | hbq_count = lpfc_sli_hbq_count(); |
| 2400 | /* Return all memory used by all HBQs */ |
| 2401 | spin_lock_irqsave(&phba->hbalock, flags); |
| 2402 | for (i = 0; i < hbq_count; ++i) { |
| 2403 | list_for_each_entry_safe(dmabuf, next_dmabuf, |
| 2404 | &phba->hbqs[i].hbq_buffer_list, list) { |
| 2405 | hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); |
| 2406 | list_del(entry: &hbq_buf->dbuf.list); |
| 2407 | (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); |
| 2408 | } |
| 2409 | phba->hbqs[i].buffer_count = 0; |
| 2410 | } |
| 2411 | |
| 2412 | /* Mark the HBQs not in use */ |
| 2413 | phba->hbq_in_use = 0; |
| 2414 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
| 2415 | } |
| 2416 | |
| 2417 | /** |
| 2418 | * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware |
| 2419 | * @phba: Pointer to HBA context object. |
| 2420 | * @hbqno: HBQ number. |
| 2421 | * @hbq_buf: Pointer to HBQ buffer. |
| 2422 | * |
| 2423 | * This function is called with the hbalock held to post a |
| 2424 | * hbq buffer to the firmware. If the function finds an empty |
| 2425 | * slot in the HBQ, it will post the buffer. The function will return |
| 2426 | * pointer to the hbq entry if it successfully post the buffer |
| 2427 | * else it will return NULL. |
| 2428 | **/ |
| 2429 | static int |
| 2430 | lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, |
| 2431 | struct hbq_dmabuf *hbq_buf) |
| 2432 | { |
| 2433 | lockdep_assert_held(&phba->hbalock); |
| 2434 | return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); |
| 2435 | } |
| 2436 | |
| 2437 | /** |
| 2438 | * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware |
| 2439 | * @phba: Pointer to HBA context object. |
| 2440 | * @hbqno: HBQ number. |
| 2441 | * @hbq_buf: Pointer to HBQ buffer. |
| 2442 | * |
| 2443 | * This function is called with the hbalock held to post a hbq buffer to the |
| 2444 | * firmware. If the function finds an empty slot in the HBQ, it will post the |
| 2445 | * buffer and place it on the hbq_buffer_list. The function will return zero if |
| 2446 | * it successfully post the buffer else it will return an error. |
| 2447 | **/ |
| 2448 | static int |
| 2449 | lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, |
| 2450 | struct hbq_dmabuf *hbq_buf) |
| 2451 | { |
| 2452 | struct lpfc_hbq_entry *hbqe; |
| 2453 | dma_addr_t physaddr = hbq_buf->dbuf.phys; |
| 2454 | |
| 2455 | lockdep_assert_held(&phba->hbalock); |
| 2456 | /* Get next HBQ entry slot to use */ |
| 2457 | hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); |
| 2458 | if (hbqe) { |
| 2459 | struct hbq_s *hbqp = &phba->hbqs[hbqno]; |
| 2460 | |
| 2461 | hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); |
| 2462 | hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); |
| 2463 | hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; |
| 2464 | hbqe->bde.tus.f.bdeFlags = 0; |
| 2465 | hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); |
| 2466 | hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); |
| 2467 | /* Sync SLIM */ |
| 2468 | hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; |
| 2469 | writel(val: hbqp->hbqPutIdx, addr: phba->hbq_put + hbqno); |
| 2470 | /* flush */ |
| 2471 | readl(addr: phba->hbq_put + hbqno); |
| 2472 | list_add_tail(new: &hbq_buf->dbuf.list, head: &hbqp->hbq_buffer_list); |
| 2473 | return 0; |
| 2474 | } else |
| 2475 | return -ENOMEM; |
| 2476 | } |
| 2477 | |
| 2478 | /** |
| 2479 | * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware |
| 2480 | * @phba: Pointer to HBA context object. |
| 2481 | * @hbqno: HBQ number. |
| 2482 | * @hbq_buf: Pointer to HBQ buffer. |
| 2483 | * |
| 2484 | * This function is called with the hbalock held to post an RQE to the SLI4 |
| 2485 | * firmware. If able to post the RQE to the RQ it will queue the hbq entry to |
| 2486 | * the hbq_buffer_list and return zero, otherwise it will return an error. |
| 2487 | **/ |
| 2488 | static int |
| 2489 | lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, |
| 2490 | struct hbq_dmabuf *hbq_buf) |
| 2491 | { |
| 2492 | int rc; |
| 2493 | struct lpfc_rqe hrqe; |
| 2494 | struct lpfc_rqe drqe; |
| 2495 | struct lpfc_queue *hrq; |
| 2496 | struct lpfc_queue *drq; |
| 2497 | |
| 2498 | if (hbqno != LPFC_ELS_HBQ) |
| 2499 | return 1; |
| 2500 | hrq = phba->sli4_hba.hdr_rq; |
| 2501 | drq = phba->sli4_hba.dat_rq; |
| 2502 | |
| 2503 | lockdep_assert_held(&phba->hbalock); |
| 2504 | hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); |
| 2505 | hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); |
| 2506 | drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); |
| 2507 | drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); |
| 2508 | rc = lpfc_sli4_rq_put(hq: hrq, dq: drq, hrqe: &hrqe, drqe: &drqe); |
| 2509 | if (rc < 0) |
| 2510 | return rc; |
| 2511 | hbq_buf->tag = (rc | (hbqno << 16)); |
| 2512 | list_add_tail(new: &hbq_buf->dbuf.list, head: &phba->hbqs[hbqno].hbq_buffer_list); |
| 2513 | return 0; |
| 2514 | } |
| 2515 | |
| 2516 | /* HBQ for ELS and CT traffic. */ |
| 2517 | static struct lpfc_hbq_init lpfc_els_hbq = { |
| 2518 | .rn = 1, |
| 2519 | .entry_count = 256, |
| 2520 | .mask_count = 0, |
| 2521 | .profile = 0, |
| 2522 | .ring_mask = (1 << LPFC_ELS_RING), |
| 2523 | .buffer_count = 0, |
| 2524 | .init_count = 40, |
| 2525 | .add_count = 40, |
| 2526 | }; |
| 2527 | |
| 2528 | /* Array of HBQs */ |
| 2529 | struct lpfc_hbq_init *lpfc_hbq_defs[] = { |
| 2530 | &lpfc_els_hbq, |
| 2531 | }; |
| 2532 | |
| 2533 | /** |
| 2534 | * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ |
| 2535 | * @phba: Pointer to HBA context object. |
| 2536 | * @hbqno: HBQ number. |
| 2537 | * @count: Number of HBQ buffers to be posted. |
| 2538 | * |
| 2539 | * This function is called with no lock held to post more hbq buffers to the |
| 2540 | * given HBQ. The function returns the number of HBQ buffers successfully |
| 2541 | * posted. |
| 2542 | **/ |
| 2543 | static int |
| 2544 | lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) |
| 2545 | { |
| 2546 | uint32_t i, posted = 0; |
| 2547 | unsigned long flags; |
| 2548 | struct hbq_dmabuf *hbq_buffer; |
| 2549 | LIST_HEAD(hbq_buf_list); |
| 2550 | if (!phba->hbqs[hbqno].hbq_alloc_buffer) |
| 2551 | return 0; |
| 2552 | |
| 2553 | if ((phba->hbqs[hbqno].buffer_count + count) > |
| 2554 | lpfc_hbq_defs[hbqno]->entry_count) |
| 2555 | count = lpfc_hbq_defs[hbqno]->entry_count - |
| 2556 | phba->hbqs[hbqno].buffer_count; |
| 2557 | if (!count) |
| 2558 | return 0; |
| 2559 | /* Allocate HBQ entries */ |
| 2560 | for (i = 0; i < count; i++) { |
| 2561 | hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); |
| 2562 | if (!hbq_buffer) |
| 2563 | break; |
| 2564 | list_add_tail(new: &hbq_buffer->dbuf.list, head: &hbq_buf_list); |
| 2565 | } |
| 2566 | /* Check whether HBQ is still in use */ |
| 2567 | spin_lock_irqsave(&phba->hbalock, flags); |
| 2568 | if (!phba->hbq_in_use) |
| 2569 | goto err; |
| 2570 | while (!list_empty(head: &hbq_buf_list)) { |
| 2571 | list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, |
| 2572 | dbuf.list); |
| 2573 | hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | |
| 2574 | (hbqno << 16)); |
| 2575 | if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf: hbq_buffer)) { |
| 2576 | phba->hbqs[hbqno].buffer_count++; |
| 2577 | posted++; |
| 2578 | } else |
| 2579 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); |
| 2580 | } |
| 2581 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
| 2582 | return posted; |
| 2583 | err: |
| 2584 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
| 2585 | while (!list_empty(head: &hbq_buf_list)) { |
| 2586 | list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, |
| 2587 | dbuf.list); |
| 2588 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); |
| 2589 | } |
| 2590 | return 0; |
| 2591 | } |
| 2592 | |
| 2593 | /** |
| 2594 | * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware |
| 2595 | * @phba: Pointer to HBA context object. |
| 2596 | * @qno: HBQ number. |
| 2597 | * |
| 2598 | * This function posts more buffers to the HBQ. This function |
| 2599 | * is called with no lock held. The function returns the number of HBQ entries |
| 2600 | * successfully allocated. |
| 2601 | **/ |
| 2602 | int |
| 2603 | lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) |
| 2604 | { |
| 2605 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 2606 | return 0; |
| 2607 | else |
| 2608 | return lpfc_sli_hbqbuf_fill_hbqs(phba, hbqno: qno, |
| 2609 | count: lpfc_hbq_defs[qno]->add_count); |
| 2610 | } |
| 2611 | |
| 2612 | /** |
| 2613 | * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ |
| 2614 | * @phba: Pointer to HBA context object. |
| 2615 | * @qno: HBQ queue number. |
| 2616 | * |
| 2617 | * This function is called from SLI initialization code path with |
| 2618 | * no lock held to post initial HBQ buffers to firmware. The |
| 2619 | * function returns the number of HBQ entries successfully allocated. |
| 2620 | **/ |
| 2621 | static int |
| 2622 | lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) |
| 2623 | { |
| 2624 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 2625 | return lpfc_sli_hbqbuf_fill_hbqs(phba, hbqno: qno, |
| 2626 | count: lpfc_hbq_defs[qno]->entry_count); |
| 2627 | else |
| 2628 | return lpfc_sli_hbqbuf_fill_hbqs(phba, hbqno: qno, |
| 2629 | count: lpfc_hbq_defs[qno]->init_count); |
| 2630 | } |
| 2631 | |
| 2632 | /* |
| 2633 | * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list |
| 2634 | * |
| 2635 | * This function removes the first hbq buffer on an hbq list and returns a |
| 2636 | * pointer to that buffer. If it finds no buffers on the list it returns NULL. |
| 2637 | **/ |
| 2638 | static struct hbq_dmabuf * |
| 2639 | lpfc_sli_hbqbuf_get(struct list_head *rb_list) |
| 2640 | { |
| 2641 | struct lpfc_dmabuf *d_buf; |
| 2642 | |
| 2643 | list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); |
| 2644 | if (!d_buf) |
| 2645 | return NULL; |
| 2646 | return container_of(d_buf, struct hbq_dmabuf, dbuf); |
| 2647 | } |
| 2648 | |
| 2649 | /** |
| 2650 | * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list |
| 2651 | * @phba: Pointer to HBA context object. |
| 2652 | * @hrq: HBQ number. |
| 2653 | * |
| 2654 | * This function removes the first RQ buffer on an RQ buffer list and returns a |
| 2655 | * pointer to that buffer. If it finds no buffers on the list it returns NULL. |
| 2656 | **/ |
| 2657 | static struct rqb_dmabuf * |
| 2658 | lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) |
| 2659 | { |
| 2660 | struct lpfc_dmabuf *h_buf; |
| 2661 | struct lpfc_rqb *rqbp; |
| 2662 | |
| 2663 | rqbp = hrq->rqbp; |
| 2664 | list_remove_head(&rqbp->rqb_buffer_list, h_buf, |
| 2665 | struct lpfc_dmabuf, list); |
| 2666 | if (!h_buf) |
| 2667 | return NULL; |
| 2668 | rqbp->buffer_count--; |
| 2669 | return container_of(h_buf, struct rqb_dmabuf, hbuf); |
| 2670 | } |
| 2671 | |
| 2672 | /** |
| 2673 | * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag |
| 2674 | * @phba: Pointer to HBA context object. |
| 2675 | * @tag: Tag of the hbq buffer. |
| 2676 | * |
| 2677 | * This function searches for the hbq buffer associated with the given tag in |
| 2678 | * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer |
| 2679 | * otherwise it returns NULL. |
| 2680 | **/ |
| 2681 | static struct hbq_dmabuf * |
| 2682 | lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) |
| 2683 | { |
| 2684 | struct lpfc_dmabuf *d_buf; |
| 2685 | struct hbq_dmabuf *hbq_buf; |
| 2686 | uint32_t hbqno; |
| 2687 | |
| 2688 | hbqno = tag >> 16; |
| 2689 | if (hbqno >= LPFC_MAX_HBQS) |
| 2690 | return NULL; |
| 2691 | |
| 2692 | spin_lock_irq(lock: &phba->hbalock); |
| 2693 | list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { |
| 2694 | hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); |
| 2695 | if (hbq_buf->tag == tag) { |
| 2696 | spin_unlock_irq(lock: &phba->hbalock); |
| 2697 | return hbq_buf; |
| 2698 | } |
| 2699 | } |
| 2700 | spin_unlock_irq(lock: &phba->hbalock); |
| 2701 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 2702 | "1803 Bad hbq tag. Data: x%x x%x\n" , |
| 2703 | tag, phba->hbqs[tag >> 16].buffer_count); |
| 2704 | return NULL; |
| 2705 | } |
| 2706 | |
| 2707 | /** |
| 2708 | * lpfc_sli_free_hbq - Give back the hbq buffer to firmware |
| 2709 | * @phba: Pointer to HBA context object. |
| 2710 | * @hbq_buffer: Pointer to HBQ buffer. |
| 2711 | * |
| 2712 | * This function is called with hbalock. This function gives back |
| 2713 | * the hbq buffer to firmware. If the HBQ does not have space to |
| 2714 | * post the buffer, it will free the buffer. |
| 2715 | **/ |
| 2716 | void |
| 2717 | lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) |
| 2718 | { |
| 2719 | uint32_t hbqno; |
| 2720 | |
| 2721 | if (hbq_buffer) { |
| 2722 | hbqno = hbq_buffer->tag >> 16; |
| 2723 | if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf: hbq_buffer)) |
| 2724 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); |
| 2725 | } |
| 2726 | } |
| 2727 | |
| 2728 | /** |
| 2729 | * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox |
| 2730 | * @mbxCommand: mailbox command code. |
| 2731 | * |
| 2732 | * This function is called by the mailbox event handler function to verify |
| 2733 | * that the completed mailbox command is a legitimate mailbox command. If the |
| 2734 | * completed mailbox is not known to the function, it will return MBX_SHUTDOWN |
| 2735 | * and the mailbox event handler will take the HBA offline. |
| 2736 | **/ |
| 2737 | static int |
| 2738 | lpfc_sli_chk_mbx_command(uint8_t mbxCommand) |
| 2739 | { |
| 2740 | uint8_t ret; |
| 2741 | |
| 2742 | switch (mbxCommand) { |
| 2743 | case MBX_LOAD_SM: |
| 2744 | case MBX_READ_NV: |
| 2745 | case MBX_WRITE_NV: |
| 2746 | case MBX_WRITE_VPARMS: |
| 2747 | case MBX_RUN_BIU_DIAG: |
| 2748 | case MBX_INIT_LINK: |
| 2749 | case MBX_DOWN_LINK: |
| 2750 | case MBX_CONFIG_LINK: |
| 2751 | case MBX_CONFIG_RING: |
| 2752 | case MBX_RESET_RING: |
| 2753 | case MBX_READ_CONFIG: |
| 2754 | case MBX_READ_RCONFIG: |
| 2755 | case MBX_READ_SPARM: |
| 2756 | case MBX_READ_STATUS: |
| 2757 | case MBX_READ_RPI: |
| 2758 | case MBX_READ_XRI: |
| 2759 | case MBX_READ_REV: |
| 2760 | case MBX_READ_LNK_STAT: |
| 2761 | case MBX_REG_LOGIN: |
| 2762 | case MBX_UNREG_LOGIN: |
| 2763 | case MBX_CLEAR_LA: |
| 2764 | case MBX_DUMP_MEMORY: |
| 2765 | case MBX_DUMP_CONTEXT: |
| 2766 | case MBX_RUN_DIAGS: |
| 2767 | case MBX_RESTART: |
| 2768 | case MBX_UPDATE_CFG: |
| 2769 | case MBX_DOWN_LOAD: |
| 2770 | case MBX_DEL_LD_ENTRY: |
| 2771 | case MBX_RUN_PROGRAM: |
| 2772 | case MBX_SET_MASK: |
| 2773 | case MBX_SET_VARIABLE: |
| 2774 | case MBX_UNREG_D_ID: |
| 2775 | case MBX_KILL_BOARD: |
| 2776 | case MBX_CONFIG_FARP: |
| 2777 | case MBX_BEACON: |
| 2778 | case MBX_LOAD_AREA: |
| 2779 | case MBX_RUN_BIU_DIAG64: |
| 2780 | case MBX_CONFIG_PORT: |
| 2781 | case MBX_READ_SPARM64: |
| 2782 | case MBX_READ_RPI64: |
| 2783 | case MBX_REG_LOGIN64: |
| 2784 | case MBX_READ_TOPOLOGY: |
| 2785 | case MBX_WRITE_WWN: |
| 2786 | case MBX_SET_DEBUG: |
| 2787 | case MBX_LOAD_EXP_ROM: |
| 2788 | case MBX_ASYNCEVT_ENABLE: |
| 2789 | case MBX_REG_VPI: |
| 2790 | case MBX_UNREG_VPI: |
| 2791 | case MBX_HEARTBEAT: |
| 2792 | case MBX_PORT_CAPABILITIES: |
| 2793 | case MBX_PORT_IOV_CONTROL: |
| 2794 | case MBX_SLI4_CONFIG: |
| 2795 | case MBX_SLI4_REQ_FTRS: |
| 2796 | case MBX_REG_FCFI: |
| 2797 | case MBX_UNREG_FCFI: |
| 2798 | case MBX_REG_VFI: |
| 2799 | case MBX_UNREG_VFI: |
| 2800 | case MBX_INIT_VPI: |
| 2801 | case MBX_INIT_VFI: |
| 2802 | case MBX_RESUME_RPI: |
| 2803 | case MBX_READ_EVENT_LOG_STATUS: |
| 2804 | case MBX_READ_EVENT_LOG: |
| 2805 | case MBX_SECURITY_MGMT: |
| 2806 | case MBX_AUTH_PORT: |
| 2807 | case MBX_ACCESS_VDATA: |
| 2808 | ret = mbxCommand; |
| 2809 | break; |
| 2810 | default: |
| 2811 | ret = MBX_SHUTDOWN; |
| 2812 | break; |
| 2813 | } |
| 2814 | return ret; |
| 2815 | } |
| 2816 | |
| 2817 | /** |
| 2818 | * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler |
| 2819 | * @phba: Pointer to HBA context object. |
| 2820 | * @pmboxq: Pointer to mailbox command. |
| 2821 | * |
| 2822 | * This is completion handler function for mailbox commands issued from |
| 2823 | * lpfc_sli_issue_mbox_wait function. This function is called by the |
| 2824 | * mailbox event handler function with no lock held. This function |
| 2825 | * will wake up thread waiting on the wait queue pointed by context1 |
| 2826 | * of the mailbox. |
| 2827 | **/ |
| 2828 | void |
| 2829 | lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) |
| 2830 | { |
| 2831 | unsigned long drvr_flag; |
| 2832 | struct completion *pmbox_done; |
| 2833 | |
| 2834 | /* |
| 2835 | * If pmbox_done is empty, the driver thread gave up waiting and |
| 2836 | * continued running. |
| 2837 | */ |
| 2838 | pmboxq->mbox_flag |= LPFC_MBX_WAKE; |
| 2839 | spin_lock_irqsave(&phba->hbalock, drvr_flag); |
| 2840 | pmbox_done = pmboxq->ctx_u.mbox_wait; |
| 2841 | if (pmbox_done) |
| 2842 | complete(pmbox_done); |
| 2843 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: drvr_flag); |
| 2844 | return; |
| 2845 | } |
| 2846 | |
| 2847 | /** |
| 2848 | * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler |
| 2849 | * @phba: Pointer to HBA context object. |
| 2850 | * @pmb: Pointer to mailbox object. |
| 2851 | * |
| 2852 | * This function is the default mailbox completion handler. It |
| 2853 | * frees the memory resources associated with the completed mailbox |
| 2854 | * command. If the completed command is a REG_LOGIN mailbox command, |
| 2855 | * this function will issue a UREG_LOGIN to re-claim the RPI. |
| 2856 | **/ |
| 2857 | void |
| 2858 | lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
| 2859 | { |
| 2860 | struct lpfc_vport *vport = pmb->vport; |
| 2861 | struct lpfc_dmabuf *mp; |
| 2862 | struct lpfc_nodelist *ndlp; |
| 2863 | struct Scsi_Host *shost; |
| 2864 | uint16_t rpi, vpi; |
| 2865 | int rc; |
| 2866 | |
| 2867 | /* |
| 2868 | * If a REG_LOGIN succeeded after node is destroyed or node |
| 2869 | * is in re-discovery driver need to cleanup the RPI. |
| 2870 | */ |
| 2871 | if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) && |
| 2872 | pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && |
| 2873 | !pmb->u.mb.mbxStatus) { |
| 2874 | mp = pmb->ctx_buf; |
| 2875 | if (mp) { |
| 2876 | pmb->ctx_buf = NULL; |
| 2877 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
| 2878 | kfree(objp: mp); |
| 2879 | } |
| 2880 | rpi = pmb->u.mb.un.varWords[0]; |
| 2881 | vpi = pmb->u.mb.un.varRegLogin.vpi; |
| 2882 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 2883 | vpi -= phba->sli4_hba.max_cfg_param.vpi_base; |
| 2884 | lpfc_unreg_login(phba, vpi, rpi, pmb); |
| 2885 | pmb->vport = vport; |
| 2886 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 2887 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); |
| 2888 | if (rc != MBX_NOT_FINISHED) |
| 2889 | return; |
| 2890 | } |
| 2891 | |
| 2892 | if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && |
| 2893 | !test_bit(FC_UNLOADING, &phba->pport->load_flag) && |
| 2894 | !pmb->u.mb.mbxStatus) { |
| 2895 | shost = lpfc_shost_from_vport(vport); |
| 2896 | spin_lock_irq(lock: shost->host_lock); |
| 2897 | vport->vpi_state |= LPFC_VPI_REGISTERED; |
| 2898 | spin_unlock_irq(lock: shost->host_lock); |
| 2899 | clear_bit(nr: FC_VPORT_NEEDS_REG_VPI, addr: &vport->fc_flag); |
| 2900 | } |
| 2901 | |
| 2902 | if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { |
| 2903 | ndlp = pmb->ctx_ndlp; |
| 2904 | lpfc_nlp_put(ndlp); |
| 2905 | } |
| 2906 | |
| 2907 | if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { |
| 2908 | ndlp = pmb->ctx_ndlp; |
| 2909 | |
| 2910 | /* Check to see if there are any deferred events to process */ |
| 2911 | if (ndlp) { |
| 2912 | lpfc_printf_vlog( |
| 2913 | vport, |
| 2914 | KERN_INFO, LOG_MBOX | LOG_DISCOVERY, |
| 2915 | "1438 UNREG cmpl deferred mbox x%x " |
| 2916 | "on NPort x%x Data: x%lx x%x x%px x%lx x%x\n" , |
| 2917 | ndlp->nlp_rpi, ndlp->nlp_DID, |
| 2918 | ndlp->nlp_flag, ndlp->nlp_defer_did, |
| 2919 | ndlp, vport->load_flag, kref_read(&ndlp->kref)); |
| 2920 | |
| 2921 | if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) && |
| 2922 | ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) { |
| 2923 | clear_bit(nr: NLP_UNREG_INP, addr: &ndlp->nlp_flag); |
| 2924 | ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; |
| 2925 | lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); |
| 2926 | } else { |
| 2927 | clear_bit(nr: NLP_UNREG_INP, addr: &ndlp->nlp_flag); |
| 2928 | } |
| 2929 | |
| 2930 | /* The unreg_login mailbox is complete and had a |
| 2931 | * reference that has to be released. The PLOGI |
| 2932 | * got its own ref. |
| 2933 | */ |
| 2934 | lpfc_nlp_put(ndlp); |
| 2935 | pmb->ctx_ndlp = NULL; |
| 2936 | } |
| 2937 | } |
| 2938 | |
| 2939 | /* This nlp_put pairs with lpfc_sli4_resume_rpi */ |
| 2940 | if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) { |
| 2941 | ndlp = pmb->ctx_ndlp; |
| 2942 | lpfc_nlp_put(ndlp); |
| 2943 | } |
| 2944 | |
| 2945 | /* Check security permission status on INIT_LINK mailbox command */ |
| 2946 | if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && |
| 2947 | (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) |
| 2948 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 2949 | "2860 SLI authentication is required " |
| 2950 | "for INIT_LINK but has not done yet\n" ); |
| 2951 | |
| 2952 | if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) |
| 2953 | lpfc_sli4_mbox_cmd_free(phba, pmb); |
| 2954 | else |
| 2955 | lpfc_mbox_rsrc_cleanup(phba, mbox: pmb, locked: MBOX_THD_UNLOCKED); |
| 2956 | } |
| 2957 | /** |
| 2958 | * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler |
| 2959 | * @phba: Pointer to HBA context object. |
| 2960 | * @pmb: Pointer to mailbox object. |
| 2961 | * |
| 2962 | * This function is the unreg rpi mailbox completion handler. It |
| 2963 | * frees the memory resources associated with the completed mailbox |
| 2964 | * command. An additional reference is put on the ndlp to prevent |
| 2965 | * lpfc_nlp_release from freeing the rpi bit in the bitmask before |
| 2966 | * the unreg mailbox command completes, this routine puts the |
| 2967 | * reference back. |
| 2968 | * |
| 2969 | **/ |
| 2970 | void |
| 2971 | lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
| 2972 | { |
| 2973 | struct lpfc_vport *vport = pmb->vport; |
| 2974 | struct lpfc_nodelist *ndlp; |
| 2975 | bool unreg_inp; |
| 2976 | |
| 2977 | ndlp = pmb->ctx_ndlp; |
| 2978 | if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { |
| 2979 | if (phba->sli_rev == LPFC_SLI_REV4 && |
| 2980 | (bf_get(lpfc_sli_intf_if_type, |
| 2981 | &phba->sli4_hba.sli_intf) >= |
| 2982 | LPFC_SLI_INTF_IF_TYPE_2)) { |
| 2983 | if (ndlp) { |
| 2984 | lpfc_printf_vlog( |
| 2985 | vport, KERN_INFO, |
| 2986 | LOG_MBOX | LOG_SLI | LOG_NODE, |
| 2987 | "0010 UNREG_LOGIN vpi:x%x " |
| 2988 | "rpi:%x DID:%x defer x%x flg x%lx " |
| 2989 | "x%px\n" , |
| 2990 | vport->vpi, ndlp->nlp_rpi, |
| 2991 | ndlp->nlp_DID, ndlp->nlp_defer_did, |
| 2992 | ndlp->nlp_flag, |
| 2993 | ndlp); |
| 2994 | |
| 2995 | /* Cleanup the nlp_flag now that the UNREG RPI |
| 2996 | * has completed. |
| 2997 | */ |
| 2998 | unreg_inp = test_and_clear_bit(nr: NLP_UNREG_INP, |
| 2999 | addr: &ndlp->nlp_flag); |
| 3000 | clear_bit(nr: NLP_LOGO_ACC, addr: &ndlp->nlp_flag); |
| 3001 | |
| 3002 | /* Check to see if there are any deferred |
| 3003 | * events to process |
| 3004 | */ |
| 3005 | if (unreg_inp && |
| 3006 | ndlp->nlp_defer_did != |
| 3007 | NLP_EVT_NOTHING_PENDING) { |
| 3008 | lpfc_printf_vlog( |
| 3009 | vport, KERN_INFO, |
| 3010 | LOG_MBOX | LOG_SLI | LOG_NODE, |
| 3011 | "4111 UNREG cmpl deferred " |
| 3012 | "clr x%x on " |
| 3013 | "NPort x%x Data: x%x x%px\n" , |
| 3014 | ndlp->nlp_rpi, ndlp->nlp_DID, |
| 3015 | ndlp->nlp_defer_did, ndlp); |
| 3016 | ndlp->nlp_defer_did = |
| 3017 | NLP_EVT_NOTHING_PENDING; |
| 3018 | lpfc_issue_els_plogi( |
| 3019 | vport, ndlp->nlp_DID, 0); |
| 3020 | } |
| 3021 | |
| 3022 | lpfc_nlp_put(ndlp); |
| 3023 | } |
| 3024 | } |
| 3025 | } |
| 3026 | |
| 3027 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
| 3028 | } |
| 3029 | |
| 3030 | /** |
| 3031 | * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware |
| 3032 | * @phba: Pointer to HBA context object. |
| 3033 | * |
| 3034 | * This function is called with no lock held. This function processes all |
| 3035 | * the completed mailbox commands and gives it to upper layers. The interrupt |
| 3036 | * service routine processes mailbox completion interrupt and adds completed |
| 3037 | * mailbox commands to the mboxq_cmpl queue and signals the worker thread. |
| 3038 | * Worker thread call lpfc_sli_handle_mb_event, which will return the |
| 3039 | * completed mailbox commands in mboxq_cmpl queue to the upper layers. This |
| 3040 | * function returns the mailbox commands to the upper layer by calling the |
| 3041 | * completion handler function of each mailbox. |
| 3042 | **/ |
| 3043 | int |
| 3044 | lpfc_sli_handle_mb_event(struct lpfc_hba *phba) |
| 3045 | { |
| 3046 | MAILBOX_t *pmbox; |
| 3047 | LPFC_MBOXQ_t *pmb; |
| 3048 | int rc; |
| 3049 | LIST_HEAD(cmplq); |
| 3050 | |
| 3051 | phba->sli.slistat.mbox_event++; |
| 3052 | |
| 3053 | /* Get all completed mailboxe buffers into the cmplq */ |
| 3054 | spin_lock_irq(lock: &phba->hbalock); |
| 3055 | list_splice_init(list: &phba->sli.mboxq_cmpl, head: &cmplq); |
| 3056 | spin_unlock_irq(lock: &phba->hbalock); |
| 3057 | |
| 3058 | /* Get a Mailbox buffer to setup mailbox commands for callback */ |
| 3059 | do { |
| 3060 | list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); |
| 3061 | if (pmb == NULL) |
| 3062 | break; |
| 3063 | |
| 3064 | pmbox = &pmb->u.mb; |
| 3065 | |
| 3066 | if (pmbox->mbxCommand != MBX_HEARTBEAT) { |
| 3067 | if (pmb->vport) { |
| 3068 | lpfc_debugfs_disc_trc(pmb->vport, |
| 3069 | LPFC_DISC_TRC_MBOX_VPORT, |
| 3070 | "MBOX cmpl vport: cmd:x%x mb:x%x x%x" , |
| 3071 | (uint32_t)pmbox->mbxCommand, |
| 3072 | pmbox->un.varWords[0], |
| 3073 | pmbox->un.varWords[1]); |
| 3074 | } |
| 3075 | else { |
| 3076 | lpfc_debugfs_disc_trc(phba->pport, |
| 3077 | LPFC_DISC_TRC_MBOX, |
| 3078 | "MBOX cmpl: cmd:x%x mb:x%x x%x" , |
| 3079 | (uint32_t)pmbox->mbxCommand, |
| 3080 | pmbox->un.varWords[0], |
| 3081 | pmbox->un.varWords[1]); |
| 3082 | } |
| 3083 | } |
| 3084 | |
| 3085 | /* |
| 3086 | * It is a fatal error if unknown mbox command completion. |
| 3087 | */ |
| 3088 | if (lpfc_sli_chk_mbx_command(mbxCommand: pmbox->mbxCommand) == |
| 3089 | MBX_SHUTDOWN) { |
| 3090 | /* Unknown mailbox command compl */ |
| 3091 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 3092 | "(%d):0323 Unknown Mailbox command " |
| 3093 | "x%x (x%x/x%x) Cmpl\n" , |
| 3094 | pmb->vport ? pmb->vport->vpi : |
| 3095 | LPFC_VPORT_UNKNOWN, |
| 3096 | pmbox->mbxCommand, |
| 3097 | lpfc_sli_config_mbox_subsys_get(phba, |
| 3098 | pmb), |
| 3099 | lpfc_sli_config_mbox_opcode_get(phba, |
| 3100 | pmb)); |
| 3101 | phba->link_state = LPFC_HBA_ERROR; |
| 3102 | phba->work_hs = HS_FFER3; |
| 3103 | lpfc_handle_eratt(phba); |
| 3104 | continue; |
| 3105 | } |
| 3106 | |
| 3107 | if (pmbox->mbxStatus) { |
| 3108 | phba->sli.slistat.mbox_stat_err++; |
| 3109 | if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { |
| 3110 | /* Mbox cmd cmpl error - RETRYing */ |
| 3111 | lpfc_printf_log(phba, KERN_INFO, |
| 3112 | LOG_MBOX | LOG_SLI, |
| 3113 | "(%d):0305 Mbox cmd cmpl " |
| 3114 | "error - RETRYing Data: x%x " |
| 3115 | "(x%x/x%x) x%x x%x x%x\n" , |
| 3116 | pmb->vport ? pmb->vport->vpi : |
| 3117 | LPFC_VPORT_UNKNOWN, |
| 3118 | pmbox->mbxCommand, |
| 3119 | lpfc_sli_config_mbox_subsys_get(phba, |
| 3120 | pmb), |
| 3121 | lpfc_sli_config_mbox_opcode_get(phba, |
| 3122 | pmb), |
| 3123 | pmbox->mbxStatus, |
| 3124 | pmbox->un.varWords[0], |
| 3125 | pmb->vport ? pmb->vport->port_state : |
| 3126 | LPFC_VPORT_UNKNOWN); |
| 3127 | pmbox->mbxStatus = 0; |
| 3128 | pmbox->mbxOwner = OWN_HOST; |
| 3129 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); |
| 3130 | if (rc != MBX_NOT_FINISHED) |
| 3131 | continue; |
| 3132 | } |
| 3133 | } |
| 3134 | |
| 3135 | /* Mailbox cmd <cmd> Cmpl <cmpl> */ |
| 3136 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
| 3137 | "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps " |
| 3138 | "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " |
| 3139 | "x%x x%x x%x\n" , |
| 3140 | pmb->vport ? pmb->vport->vpi : 0, |
| 3141 | pmbox->mbxCommand, |
| 3142 | lpfc_sli_config_mbox_subsys_get(phba, pmb), |
| 3143 | lpfc_sli_config_mbox_opcode_get(phba, pmb), |
| 3144 | pmb->mbox_cmpl, |
| 3145 | *((uint32_t *) pmbox), |
| 3146 | pmbox->un.varWords[0], |
| 3147 | pmbox->un.varWords[1], |
| 3148 | pmbox->un.varWords[2], |
| 3149 | pmbox->un.varWords[3], |
| 3150 | pmbox->un.varWords[4], |
| 3151 | pmbox->un.varWords[5], |
| 3152 | pmbox->un.varWords[6], |
| 3153 | pmbox->un.varWords[7], |
| 3154 | pmbox->un.varWords[8], |
| 3155 | pmbox->un.varWords[9], |
| 3156 | pmbox->un.varWords[10]); |
| 3157 | |
| 3158 | if (pmb->mbox_cmpl) |
| 3159 | pmb->mbox_cmpl(phba,pmb); |
| 3160 | } while (1); |
| 3161 | return 0; |
| 3162 | } |
| 3163 | |
| 3164 | /** |
| 3165 | * lpfc_sli_get_buff - Get the buffer associated with the buffer tag |
| 3166 | * @phba: Pointer to HBA context object. |
| 3167 | * @pring: Pointer to driver SLI ring object. |
| 3168 | * @tag: buffer tag. |
| 3169 | * |
| 3170 | * This function is called with no lock held. When QUE_BUFTAG_BIT bit |
| 3171 | * is set in the tag the buffer is posted for a particular exchange, |
| 3172 | * the function will return the buffer without replacing the buffer. |
| 3173 | * If the buffer is for unsolicited ELS or CT traffic, this function |
| 3174 | * returns the buffer and also posts another buffer to the firmware. |
| 3175 | **/ |
| 3176 | static struct lpfc_dmabuf * |
| 3177 | lpfc_sli_get_buff(struct lpfc_hba *phba, |
| 3178 | struct lpfc_sli_ring *pring, |
| 3179 | uint32_t tag) |
| 3180 | { |
| 3181 | struct hbq_dmabuf *hbq_entry; |
| 3182 | |
| 3183 | if (tag & QUE_BUFTAG_BIT) |
| 3184 | return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); |
| 3185 | hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); |
| 3186 | if (!hbq_entry) |
| 3187 | return NULL; |
| 3188 | return &hbq_entry->dbuf; |
| 3189 | } |
| 3190 | |
| 3191 | /** |
| 3192 | * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer |
| 3193 | * containing a NVME LS request. |
| 3194 | * @phba: pointer to lpfc hba data structure. |
| 3195 | * @piocb: pointer to the iocbq struct representing the sequence starting |
| 3196 | * frame. |
| 3197 | * |
| 3198 | * This routine initially validates the NVME LS, validates there is a login |
| 3199 | * with the port that sent the LS, and then calls the appropriate nvme host |
| 3200 | * or target LS request handler. |
| 3201 | **/ |
| 3202 | static void |
| 3203 | lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) |
| 3204 | { |
| 3205 | struct lpfc_nodelist *ndlp; |
| 3206 | struct lpfc_dmabuf *d_buf; |
| 3207 | struct hbq_dmabuf *nvmebuf; |
| 3208 | struct fc_frame_header *fc_hdr; |
| 3209 | struct lpfc_async_xchg_ctx *axchg = NULL; |
| 3210 | char *failwhy = NULL; |
| 3211 | uint32_t oxid, sid, did, fctl, size; |
| 3212 | int ret = 1; |
| 3213 | |
| 3214 | d_buf = piocb->cmd_dmabuf; |
| 3215 | |
| 3216 | nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); |
| 3217 | fc_hdr = nvmebuf->hbuf.virt; |
| 3218 | oxid = be16_to_cpu(fc_hdr->fh_ox_id); |
| 3219 | sid = sli4_sid_from_fc_hdr(fc_hdr); |
| 3220 | did = sli4_did_from_fc_hdr(fc_hdr); |
| 3221 | fctl = (fc_hdr->fh_f_ctl[0] << 16 | |
| 3222 | fc_hdr->fh_f_ctl[1] << 8 | |
| 3223 | fc_hdr->fh_f_ctl[2]); |
| 3224 | size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); |
| 3225 | |
| 3226 | lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n" , |
| 3227 | oxid, size, sid); |
| 3228 | |
| 3229 | if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { |
| 3230 | failwhy = "Driver Unloading" ; |
| 3231 | } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { |
| 3232 | failwhy = "NVME FC4 Disabled" ; |
| 3233 | } else if (!phba->nvmet_support && !phba->pport->localport) { |
| 3234 | failwhy = "No Localport" ; |
| 3235 | } else if (phba->nvmet_support && !phba->targetport) { |
| 3236 | failwhy = "No Targetport" ; |
| 3237 | } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) { |
| 3238 | failwhy = "Bad NVME LS R_CTL" ; |
| 3239 | } else if (unlikely((fctl & 0x00FF0000) != |
| 3240 | (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) { |
| 3241 | failwhy = "Bad NVME LS F_CTL" ; |
| 3242 | } else { |
| 3243 | axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC); |
| 3244 | if (!axchg) |
| 3245 | failwhy = "No CTX memory" ; |
| 3246 | } |
| 3247 | |
| 3248 | if (unlikely(failwhy)) { |
| 3249 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 3250 | "6154 Drop NVME LS: SID %06X OXID x%X: %s\n" , |
| 3251 | sid, oxid, failwhy); |
| 3252 | goto out_fail; |
| 3253 | } |
| 3254 | |
| 3255 | /* validate the source of the LS is logged in */ |
| 3256 | ndlp = lpfc_findnode_did(phba->pport, sid); |
| 3257 | if (!ndlp || |
| 3258 | ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && |
| 3259 | (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { |
| 3260 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, |
| 3261 | "6216 NVME Unsol rcv: No ndlp: " |
| 3262 | "NPort_ID x%x oxid x%x\n" , |
| 3263 | sid, oxid); |
| 3264 | goto out_fail; |
| 3265 | } |
| 3266 | |
| 3267 | axchg->phba = phba; |
| 3268 | axchg->ndlp = ndlp; |
| 3269 | axchg->size = size; |
| 3270 | axchg->oxid = oxid; |
| 3271 | axchg->sid = sid; |
| 3272 | axchg->wqeq = NULL; |
| 3273 | axchg->state = LPFC_NVME_STE_LS_RCV; |
| 3274 | axchg->entry_cnt = 1; |
| 3275 | axchg->rqb_buffer = (void *)nvmebuf; |
| 3276 | axchg->hdwq = &phba->sli4_hba.hdwq[0]; |
| 3277 | axchg->payload = nvmebuf->dbuf.virt; |
| 3278 | INIT_LIST_HEAD(list: &axchg->list); |
| 3279 | |
| 3280 | if (phba->nvmet_support) { |
| 3281 | ret = lpfc_nvmet_handle_lsreq(phba, axchg); |
| 3282 | spin_lock_irq(lock: &ndlp->lock); |
| 3283 | if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) { |
| 3284 | ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH; |
| 3285 | spin_unlock_irq(lock: &ndlp->lock); |
| 3286 | |
| 3287 | /* This reference is a single occurrence to hold the |
| 3288 | * node valid until the nvmet transport calls |
| 3289 | * host_release. |
| 3290 | */ |
| 3291 | if (!lpfc_nlp_get(ndlp)) |
| 3292 | goto out_fail; |
| 3293 | |
| 3294 | lpfc_printf_log(phba, KERN_ERR, LOG_NODE, |
| 3295 | "6206 NVMET unsol ls_req ndlp x%px " |
| 3296 | "DID x%x xflags x%x refcnt %d\n" , |
| 3297 | ndlp, ndlp->nlp_DID, |
| 3298 | ndlp->fc4_xpt_flags, |
| 3299 | kref_read(&ndlp->kref)); |
| 3300 | } else { |
| 3301 | spin_unlock_irq(lock: &ndlp->lock); |
| 3302 | } |
| 3303 | } else { |
| 3304 | ret = lpfc_nvme_handle_lsreq(phba, axchg); |
| 3305 | } |
| 3306 | |
| 3307 | /* if zero, LS was successfully handled. If non-zero, LS not handled */ |
| 3308 | if (!ret) |
| 3309 | return; |
| 3310 | |
| 3311 | out_fail: |
| 3312 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 3313 | "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X " |
| 3314 | "NVMe%s handler failed %d\n" , |
| 3315 | did, sid, oxid, |
| 3316 | (phba->nvmet_support) ? "T" : "I" , ret); |
| 3317 | |
| 3318 | /* recycle receive buffer */ |
| 3319 | lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
| 3320 | |
| 3321 | /* If start of new exchange, abort it */ |
| 3322 | if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX))) |
| 3323 | ret = lpfc_nvme_unsol_ls_issue_abort(phba, ctxp: axchg, sid, xri: oxid); |
| 3324 | |
| 3325 | if (ret) |
| 3326 | kfree(objp: axchg); |
| 3327 | } |
| 3328 | |
| 3329 | /** |
| 3330 | * lpfc_complete_unsol_iocb - Complete an unsolicited sequence |
| 3331 | * @phba: Pointer to HBA context object. |
| 3332 | * @pring: Pointer to driver SLI ring object. |
| 3333 | * @saveq: Pointer to the iocbq struct representing the sequence starting frame. |
| 3334 | * @fch_r_ctl: the r_ctl for the first frame of the sequence. |
| 3335 | * @fch_type: the type for the first frame of the sequence. |
| 3336 | * |
| 3337 | * This function is called with no lock held. This function uses the r_ctl and |
| 3338 | * type of the received sequence to find the correct callback function to call |
| 3339 | * to process the sequence. |
| 3340 | **/ |
| 3341 | static int |
| 3342 | lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 3343 | struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, |
| 3344 | uint32_t fch_type) |
| 3345 | { |
| 3346 | int i; |
| 3347 | |
| 3348 | switch (fch_type) { |
| 3349 | case FC_TYPE_NVME: |
| 3350 | lpfc_nvme_unsol_ls_handler(phba, piocb: saveq); |
| 3351 | return 1; |
| 3352 | default: |
| 3353 | break; |
| 3354 | } |
| 3355 | |
| 3356 | /* unSolicited Responses */ |
| 3357 | if (pring->prt[0].profile) { |
| 3358 | if (pring->prt[0].lpfc_sli_rcv_unsol_event) |
| 3359 | (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, |
| 3360 | saveq); |
| 3361 | return 1; |
| 3362 | } |
| 3363 | /* We must search, based on rctl / type |
| 3364 | for the right routine */ |
| 3365 | for (i = 0; i < pring->num_mask; i++) { |
| 3366 | if ((pring->prt[i].rctl == fch_r_ctl) && |
| 3367 | (pring->prt[i].type == fch_type)) { |
| 3368 | if (pring->prt[i].lpfc_sli_rcv_unsol_event) |
| 3369 | (pring->prt[i].lpfc_sli_rcv_unsol_event) |
| 3370 | (phba, pring, saveq); |
| 3371 | return 1; |
| 3372 | } |
| 3373 | } |
| 3374 | return 0; |
| 3375 | } |
| 3376 | |
| 3377 | static void |
| 3378 | lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba, |
| 3379 | struct lpfc_iocbq *saveq) |
| 3380 | { |
| 3381 | IOCB_t *irsp; |
| 3382 | union lpfc_wqe128 *wqe; |
| 3383 | u16 i = 0; |
| 3384 | |
| 3385 | irsp = &saveq->iocb; |
| 3386 | wqe = &saveq->wqe; |
| 3387 | |
| 3388 | /* Fill wcqe with the IOCB status fields */ |
| 3389 | bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus); |
| 3390 | saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount; |
| 3391 | saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4]; |
| 3392 | saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len; |
| 3393 | |
| 3394 | /* Source ID */ |
| 3395 | bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo); |
| 3396 | |
| 3397 | /* rx-id of the response frame */ |
| 3398 | bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext); |
| 3399 | |
| 3400 | /* ox-id of the frame */ |
| 3401 | bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, |
| 3402 | irsp->unsli3.rcvsli3.ox_id); |
| 3403 | |
| 3404 | /* DID */ |
| 3405 | bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, |
| 3406 | irsp->un.rcvels.remoteID); |
| 3407 | |
| 3408 | /* unsol data len */ |
| 3409 | for (i = 0; i < irsp->ulpBdeCount; i++) { |
| 3410 | struct lpfc_hbq_entry *hbqe = NULL; |
| 3411 | |
| 3412 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
| 3413 | if (i == 0) { |
| 3414 | hbqe = (struct lpfc_hbq_entry *) |
| 3415 | &irsp->un.ulpWord[0]; |
| 3416 | saveq->wqe.gen_req.bde.tus.f.bdeSize = |
| 3417 | hbqe->bde.tus.f.bdeSize; |
| 3418 | } else if (i == 1) { |
| 3419 | hbqe = (struct lpfc_hbq_entry *) |
| 3420 | &irsp->unsli3.sli3Words[4]; |
| 3421 | saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize; |
| 3422 | } |
| 3423 | } |
| 3424 | } |
| 3425 | } |
| 3426 | |
| 3427 | /** |
| 3428 | * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler |
| 3429 | * @phba: Pointer to HBA context object. |
| 3430 | * @pring: Pointer to driver SLI ring object. |
| 3431 | * @saveq: Pointer to the unsolicited iocb. |
| 3432 | * |
| 3433 | * This function is called with no lock held by the ring event handler |
| 3434 | * when there is an unsolicited iocb posted to the response ring by the |
| 3435 | * firmware. This function gets the buffer associated with the iocbs |
| 3436 | * and calls the event handler for the ring. This function handles both |
| 3437 | * qring buffers and hbq buffers. |
| 3438 | * When the function returns 1 the caller can free the iocb object otherwise |
| 3439 | * upper layer functions will free the iocb objects. |
| 3440 | **/ |
| 3441 | static int |
| 3442 | lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 3443 | struct lpfc_iocbq *saveq) |
| 3444 | { |
| 3445 | IOCB_t * irsp; |
| 3446 | WORD5 * w5p; |
| 3447 | dma_addr_t paddr; |
| 3448 | uint32_t Rctl, Type; |
| 3449 | struct lpfc_iocbq *iocbq; |
| 3450 | struct lpfc_dmabuf *dmzbuf; |
| 3451 | |
| 3452 | irsp = &saveq->iocb; |
| 3453 | saveq->vport = phba->pport; |
| 3454 | |
| 3455 | if (irsp->ulpCommand == CMD_ASYNC_STATUS) { |
| 3456 | if (pring->lpfc_sli_rcv_async_status) |
| 3457 | pring->lpfc_sli_rcv_async_status(phba, pring, saveq); |
| 3458 | else |
| 3459 | lpfc_printf_log(phba, |
| 3460 | KERN_WARNING, |
| 3461 | LOG_SLI, |
| 3462 | "0316 Ring %d handler: unexpected " |
| 3463 | "ASYNC_STATUS iocb received evt_code " |
| 3464 | "0x%x\n" , |
| 3465 | pring->ringno, |
| 3466 | irsp->un.asyncstat.evt_code); |
| 3467 | return 1; |
| 3468 | } |
| 3469 | |
| 3470 | if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && |
| 3471 | (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { |
| 3472 | if (irsp->ulpBdeCount > 0) { |
| 3473 | dmzbuf = lpfc_sli_get_buff(phba, pring, |
| 3474 | tag: irsp->un.ulpWord[3]); |
| 3475 | lpfc_in_buf_free(phba, dmzbuf); |
| 3476 | } |
| 3477 | |
| 3478 | if (irsp->ulpBdeCount > 1) { |
| 3479 | dmzbuf = lpfc_sli_get_buff(phba, pring, |
| 3480 | tag: irsp->unsli3.sli3Words[3]); |
| 3481 | lpfc_in_buf_free(phba, dmzbuf); |
| 3482 | } |
| 3483 | |
| 3484 | if (irsp->ulpBdeCount > 2) { |
| 3485 | dmzbuf = lpfc_sli_get_buff(phba, pring, |
| 3486 | tag: irsp->unsli3.sli3Words[7]); |
| 3487 | lpfc_in_buf_free(phba, dmzbuf); |
| 3488 | } |
| 3489 | |
| 3490 | return 1; |
| 3491 | } |
| 3492 | |
| 3493 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
| 3494 | if (irsp->ulpBdeCount != 0) { |
| 3495 | saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring, |
| 3496 | tag: irsp->un.ulpWord[3]); |
| 3497 | if (!saveq->cmd_dmabuf) |
| 3498 | lpfc_printf_log(phba, |
| 3499 | KERN_ERR, |
| 3500 | LOG_SLI, |
| 3501 | "0341 Ring %d Cannot find buffer for " |
| 3502 | "an unsolicited iocb. tag 0x%x\n" , |
| 3503 | pring->ringno, |
| 3504 | irsp->un.ulpWord[3]); |
| 3505 | } |
| 3506 | if (irsp->ulpBdeCount == 2) { |
| 3507 | saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring, |
| 3508 | tag: irsp->unsli3.sli3Words[7]); |
| 3509 | if (!saveq->bpl_dmabuf) |
| 3510 | lpfc_printf_log(phba, |
| 3511 | KERN_ERR, |
| 3512 | LOG_SLI, |
| 3513 | "0342 Ring %d Cannot find buffer for an" |
| 3514 | " unsolicited iocb. tag 0x%x\n" , |
| 3515 | pring->ringno, |
| 3516 | irsp->unsli3.sli3Words[7]); |
| 3517 | } |
| 3518 | list_for_each_entry(iocbq, &saveq->list, list) { |
| 3519 | irsp = &iocbq->iocb; |
| 3520 | if (irsp->ulpBdeCount != 0) { |
| 3521 | iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba, |
| 3522 | pring, |
| 3523 | tag: irsp->un.ulpWord[3]); |
| 3524 | if (!iocbq->cmd_dmabuf) |
| 3525 | lpfc_printf_log(phba, |
| 3526 | KERN_ERR, |
| 3527 | LOG_SLI, |
| 3528 | "0343 Ring %d Cannot find " |
| 3529 | "buffer for an unsolicited iocb" |
| 3530 | ". tag 0x%x\n" , pring->ringno, |
| 3531 | irsp->un.ulpWord[3]); |
| 3532 | } |
| 3533 | if (irsp->ulpBdeCount == 2) { |
| 3534 | iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba, |
| 3535 | pring, |
| 3536 | tag: irsp->unsli3.sli3Words[7]); |
| 3537 | if (!iocbq->bpl_dmabuf) |
| 3538 | lpfc_printf_log(phba, |
| 3539 | KERN_ERR, |
| 3540 | LOG_SLI, |
| 3541 | "0344 Ring %d Cannot find " |
| 3542 | "buffer for an unsolicited " |
| 3543 | "iocb. tag 0x%x\n" , |
| 3544 | pring->ringno, |
| 3545 | irsp->unsli3.sli3Words[7]); |
| 3546 | } |
| 3547 | } |
| 3548 | } else { |
| 3549 | paddr = getPaddr(irsp->un.cont64[0].addrHigh, |
| 3550 | irsp->un.cont64[0].addrLow); |
| 3551 | saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, |
| 3552 | paddr); |
| 3553 | if (irsp->ulpBdeCount == 2) { |
| 3554 | paddr = getPaddr(irsp->un.cont64[1].addrHigh, |
| 3555 | irsp->un.cont64[1].addrLow); |
| 3556 | saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, |
| 3557 | pring, |
| 3558 | paddr); |
| 3559 | } |
| 3560 | } |
| 3561 | |
| 3562 | if (irsp->ulpBdeCount != 0 && |
| 3563 | (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || |
| 3564 | irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { |
| 3565 | int found = 0; |
| 3566 | |
| 3567 | /* search continue save q for same XRI */ |
| 3568 | list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { |
| 3569 | if (iocbq->iocb.unsli3.rcvsli3.ox_id == |
| 3570 | saveq->iocb.unsli3.rcvsli3.ox_id) { |
| 3571 | list_add_tail(new: &saveq->list, head: &iocbq->list); |
| 3572 | found = 1; |
| 3573 | break; |
| 3574 | } |
| 3575 | } |
| 3576 | if (!found) |
| 3577 | list_add_tail(new: &saveq->clist, |
| 3578 | head: &pring->iocb_continue_saveq); |
| 3579 | |
| 3580 | if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { |
| 3581 | list_del_init(entry: &iocbq->clist); |
| 3582 | saveq = iocbq; |
| 3583 | irsp = &saveq->iocb; |
| 3584 | } else { |
| 3585 | return 0; |
| 3586 | } |
| 3587 | } |
| 3588 | if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || |
| 3589 | (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || |
| 3590 | (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { |
| 3591 | Rctl = FC_RCTL_ELS_REQ; |
| 3592 | Type = FC_TYPE_ELS; |
| 3593 | } else { |
| 3594 | w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); |
| 3595 | Rctl = w5p->hcsw.Rctl; |
| 3596 | Type = w5p->hcsw.Type; |
| 3597 | |
| 3598 | /* Firmware Workaround */ |
| 3599 | if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && |
| 3600 | (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || |
| 3601 | irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { |
| 3602 | Rctl = FC_RCTL_ELS_REQ; |
| 3603 | Type = FC_TYPE_ELS; |
| 3604 | w5p->hcsw.Rctl = Rctl; |
| 3605 | w5p->hcsw.Type = Type; |
| 3606 | } |
| 3607 | } |
| 3608 | |
| 3609 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && |
| 3610 | (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX || |
| 3611 | irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { |
| 3612 | if (irsp->unsli3.rcvsli3.vpi == 0xffff) |
| 3613 | saveq->vport = phba->pport; |
| 3614 | else |
| 3615 | saveq->vport = lpfc_find_vport_by_vpid(phba, |
| 3616 | irsp->unsli3.rcvsli3.vpi); |
| 3617 | } |
| 3618 | |
| 3619 | /* Prepare WQE with Unsol frame */ |
| 3620 | lpfc_sli_prep_unsol_wqe(phba, saveq); |
| 3621 | |
| 3622 | if (!lpfc_complete_unsol_iocb(phba, pring, saveq, fch_r_ctl: Rctl, fch_type: Type)) |
| 3623 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 3624 | "0313 Ring %d handler: unexpected Rctl x%x " |
| 3625 | "Type x%x received\n" , |
| 3626 | pring->ringno, Rctl, Type); |
| 3627 | |
| 3628 | return 1; |
| 3629 | } |
| 3630 | |
| 3631 | /** |
| 3632 | * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb |
| 3633 | * @phba: Pointer to HBA context object. |
| 3634 | * @pring: Pointer to driver SLI ring object. |
| 3635 | * @prspiocb: Pointer to response iocb object. |
| 3636 | * |
| 3637 | * This function looks up the iocb_lookup table to get the command iocb |
| 3638 | * corresponding to the given response iocb using the iotag of the |
| 3639 | * response iocb. The driver calls this function with the hbalock held |
| 3640 | * for SLI3 ports or the ring lock held for SLI4 ports. |
| 3641 | * This function returns the command iocb object if it finds the command |
| 3642 | * iocb else returns NULL. |
| 3643 | **/ |
| 3644 | static struct lpfc_iocbq * |
| 3645 | lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, |
| 3646 | struct lpfc_sli_ring *pring, |
| 3647 | struct lpfc_iocbq *prspiocb) |
| 3648 | { |
| 3649 | struct lpfc_iocbq *cmd_iocb = NULL; |
| 3650 | u16 iotag; |
| 3651 | |
| 3652 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 3653 | iotag = get_wqe_reqtag(prspiocb); |
| 3654 | else |
| 3655 | iotag = prspiocb->iocb.ulpIoTag; |
| 3656 | |
| 3657 | if (iotag != 0 && iotag <= phba->sli.last_iotag) { |
| 3658 | cmd_iocb = phba->sli.iocbq_lookup[iotag]; |
| 3659 | if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) { |
| 3660 | /* remove from txcmpl queue list */ |
| 3661 | list_del_init(entry: &cmd_iocb->list); |
| 3662 | cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; |
| 3663 | pring->txcmplq_cnt--; |
| 3664 | return cmd_iocb; |
| 3665 | } |
| 3666 | } |
| 3667 | |
| 3668 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 3669 | "0317 iotag x%x is out of " |
| 3670 | "range: max iotag x%x\n" , |
| 3671 | iotag, phba->sli.last_iotag); |
| 3672 | return NULL; |
| 3673 | } |
| 3674 | |
| 3675 | /** |
| 3676 | * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag |
| 3677 | * @phba: Pointer to HBA context object. |
| 3678 | * @pring: Pointer to driver SLI ring object. |
| 3679 | * @iotag: IOCB tag. |
| 3680 | * |
| 3681 | * This function looks up the iocb_lookup table to get the command iocb |
| 3682 | * corresponding to the given iotag. The driver calls this function with |
| 3683 | * the ring lock held because this function is an SLI4 port only helper. |
| 3684 | * This function returns the command iocb object if it finds the command |
| 3685 | * iocb else returns NULL. |
| 3686 | **/ |
| 3687 | static struct lpfc_iocbq * |
| 3688 | lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, |
| 3689 | struct lpfc_sli_ring *pring, uint16_t iotag) |
| 3690 | { |
| 3691 | struct lpfc_iocbq *cmd_iocb = NULL; |
| 3692 | |
| 3693 | if (iotag != 0 && iotag <= phba->sli.last_iotag) { |
| 3694 | cmd_iocb = phba->sli.iocbq_lookup[iotag]; |
| 3695 | if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) { |
| 3696 | /* remove from txcmpl queue list */ |
| 3697 | list_del_init(entry: &cmd_iocb->list); |
| 3698 | cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; |
| 3699 | pring->txcmplq_cnt--; |
| 3700 | return cmd_iocb; |
| 3701 | } |
| 3702 | } |
| 3703 | |
| 3704 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 3705 | "0372 iotag x%x lookup error: max iotag (x%x) " |
| 3706 | "cmd_flag x%x\n" , |
| 3707 | iotag, phba->sli.last_iotag, |
| 3708 | cmd_iocb ? cmd_iocb->cmd_flag : 0xffff); |
| 3709 | return NULL; |
| 3710 | } |
| 3711 | |
| 3712 | /** |
| 3713 | * lpfc_sli_process_sol_iocb - process solicited iocb completion |
| 3714 | * @phba: Pointer to HBA context object. |
| 3715 | * @pring: Pointer to driver SLI ring object. |
| 3716 | * @saveq: Pointer to the response iocb to be processed. |
| 3717 | * |
| 3718 | * This function is called by the ring event handler for non-fcp |
| 3719 | * rings when there is a new response iocb in the response ring. |
| 3720 | * The caller is not required to hold any locks. This function |
| 3721 | * gets the command iocb associated with the response iocb and |
| 3722 | * calls the completion handler for the command iocb. If there |
| 3723 | * is no completion handler, the function will free the resources |
| 3724 | * associated with command iocb. If the response iocb is for |
| 3725 | * an already aborted command iocb, the status of the completion |
| 3726 | * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. |
| 3727 | * This function always returns 1. |
| 3728 | **/ |
| 3729 | static int |
| 3730 | lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 3731 | struct lpfc_iocbq *saveq) |
| 3732 | { |
| 3733 | struct lpfc_iocbq *cmdiocbp; |
| 3734 | unsigned long iflag; |
| 3735 | u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag; |
| 3736 | |
| 3737 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 3738 | spin_lock_irqsave(&pring->ring_lock, iflag); |
| 3739 | else |
| 3740 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 3741 | cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, prspiocb: saveq); |
| 3742 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 3743 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflag); |
| 3744 | else |
| 3745 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 3746 | |
| 3747 | ulp_command = get_job_cmnd(phba, iocbq: saveq); |
| 3748 | ulp_status = get_job_ulpstatus(phba, iocbq: saveq); |
| 3749 | ulp_word4 = get_job_word4(phba, iocbq: saveq); |
| 3750 | ulp_context = get_job_ulpcontext(phba, iocbq: saveq); |
| 3751 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 3752 | iotag = get_wqe_reqtag(saveq); |
| 3753 | else |
| 3754 | iotag = saveq->iocb.ulpIoTag; |
| 3755 | |
| 3756 | if (cmdiocbp) { |
| 3757 | ulp_command = get_job_cmnd(phba, iocbq: cmdiocbp); |
| 3758 | if (cmdiocbp->cmd_cmpl) { |
| 3759 | /* |
| 3760 | * If an ELS command failed send an event to mgmt |
| 3761 | * application. |
| 3762 | */ |
| 3763 | if (ulp_status && |
| 3764 | (pring->ringno == LPFC_ELS_RING) && |
| 3765 | (ulp_command == CMD_ELS_REQUEST64_CR)) |
| 3766 | lpfc_send_els_failure_event(phba, |
| 3767 | cmdiocbp, saveq); |
| 3768 | |
| 3769 | /* |
| 3770 | * Post all ELS completions to the worker thread. |
| 3771 | * All other are passed to the completion callback. |
| 3772 | */ |
| 3773 | if (pring->ringno == LPFC_ELS_RING) { |
| 3774 | if ((phba->sli_rev < LPFC_SLI_REV4) && |
| 3775 | (cmdiocbp->cmd_flag & |
| 3776 | LPFC_DRIVER_ABORTED)) { |
| 3777 | spin_lock_irqsave(&phba->hbalock, |
| 3778 | iflag); |
| 3779 | cmdiocbp->cmd_flag &= |
| 3780 | ~LPFC_DRIVER_ABORTED; |
| 3781 | spin_unlock_irqrestore(lock: &phba->hbalock, |
| 3782 | flags: iflag); |
| 3783 | saveq->iocb.ulpStatus = |
| 3784 | IOSTAT_LOCAL_REJECT; |
| 3785 | saveq->iocb.un.ulpWord[4] = |
| 3786 | IOERR_SLI_ABORTED; |
| 3787 | |
| 3788 | /* Firmware could still be in progress |
| 3789 | * of DMAing payload, so don't free data |
| 3790 | * buffer till after a hbeat. |
| 3791 | */ |
| 3792 | spin_lock_irqsave(&phba->hbalock, |
| 3793 | iflag); |
| 3794 | saveq->cmd_flag |= LPFC_DELAY_MEM_FREE; |
| 3795 | spin_unlock_irqrestore(lock: &phba->hbalock, |
| 3796 | flags: iflag); |
| 3797 | } |
| 3798 | if (phba->sli_rev == LPFC_SLI_REV4) { |
| 3799 | if (saveq->cmd_flag & |
| 3800 | LPFC_EXCHANGE_BUSY) { |
| 3801 | /* Set cmdiocb flag for the |
| 3802 | * exchange busy so sgl (xri) |
| 3803 | * will not be released until |
| 3804 | * the abort xri is received |
| 3805 | * from hba. |
| 3806 | */ |
| 3807 | spin_lock_irqsave( |
| 3808 | &phba->hbalock, iflag); |
| 3809 | cmdiocbp->cmd_flag |= |
| 3810 | LPFC_EXCHANGE_BUSY; |
| 3811 | spin_unlock_irqrestore( |
| 3812 | lock: &phba->hbalock, flags: iflag); |
| 3813 | } |
| 3814 | if (cmdiocbp->cmd_flag & |
| 3815 | LPFC_DRIVER_ABORTED) { |
| 3816 | /* |
| 3817 | * Clear LPFC_DRIVER_ABORTED |
| 3818 | * bit in case it was driver |
| 3819 | * initiated abort. |
| 3820 | */ |
| 3821 | spin_lock_irqsave( |
| 3822 | &phba->hbalock, iflag); |
| 3823 | cmdiocbp->cmd_flag &= |
| 3824 | ~LPFC_DRIVER_ABORTED; |
| 3825 | spin_unlock_irqrestore( |
| 3826 | lock: &phba->hbalock, flags: iflag); |
| 3827 | set_job_ulpstatus(cmdiocbp, |
| 3828 | IOSTAT_LOCAL_REJECT); |
| 3829 | set_job_ulpword4(cmdiocbp, |
| 3830 | IOERR_ABORT_REQUESTED); |
| 3831 | /* |
| 3832 | * For SLI4, irspiocb contains |
| 3833 | * NO_XRI in sli_xritag, it |
| 3834 | * shall not affect releasing |
| 3835 | * sgl (xri) process. |
| 3836 | */ |
| 3837 | set_job_ulpstatus(saveq, |
| 3838 | IOSTAT_LOCAL_REJECT); |
| 3839 | set_job_ulpword4(saveq, |
| 3840 | IOERR_SLI_ABORTED); |
| 3841 | spin_lock_irqsave( |
| 3842 | &phba->hbalock, iflag); |
| 3843 | saveq->cmd_flag |= |
| 3844 | LPFC_DELAY_MEM_FREE; |
| 3845 | spin_unlock_irqrestore( |
| 3846 | lock: &phba->hbalock, flags: iflag); |
| 3847 | } |
| 3848 | } |
| 3849 | } |
| 3850 | cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq); |
| 3851 | } else |
| 3852 | lpfc_sli_release_iocbq(phba, iocbq: cmdiocbp); |
| 3853 | } else { |
| 3854 | /* |
| 3855 | * Unknown initiating command based on the response iotag. |
| 3856 | * This could be the case on the ELS ring because of |
| 3857 | * lpfc_els_abort(). |
| 3858 | */ |
| 3859 | if (pring->ringno != LPFC_ELS_RING) { |
| 3860 | /* |
| 3861 | * Ring <ringno> handler: unexpected completion IoTag |
| 3862 | * <IoTag> |
| 3863 | */ |
| 3864 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 3865 | "0322 Ring %d handler: " |
| 3866 | "unexpected completion IoTag x%x " |
| 3867 | "Data: x%x x%x x%x x%x\n" , |
| 3868 | pring->ringno, iotag, ulp_status, |
| 3869 | ulp_word4, ulp_command, ulp_context); |
| 3870 | } |
| 3871 | } |
| 3872 | |
| 3873 | return 1; |
| 3874 | } |
| 3875 | |
| 3876 | /** |
| 3877 | * lpfc_sli_rsp_pointers_error - Response ring pointer error handler |
| 3878 | * @phba: Pointer to HBA context object. |
| 3879 | * @pring: Pointer to driver SLI ring object. |
| 3880 | * |
| 3881 | * This function is called from the iocb ring event handlers when |
| 3882 | * put pointer is ahead of the get pointer for a ring. This function signal |
| 3883 | * an error attention condition to the worker thread and the worker |
| 3884 | * thread will transition the HBA to offline state. |
| 3885 | **/ |
| 3886 | static void |
| 3887 | lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
| 3888 | { |
| 3889 | struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; |
| 3890 | /* |
| 3891 | * Ring <ringno> handler: portRspPut <portRspPut> is bigger than |
| 3892 | * rsp ring <portRspMax> |
| 3893 | */ |
| 3894 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 3895 | "0312 Ring %d handler: portRspPut %d " |
| 3896 | "is bigger than rsp ring %d\n" , |
| 3897 | pring->ringno, le32_to_cpu(pgp->rspPutInx), |
| 3898 | pring->sli.sli3.numRiocb); |
| 3899 | |
| 3900 | phba->link_state = LPFC_HBA_ERROR; |
| 3901 | |
| 3902 | /* |
| 3903 | * All error attention handlers are posted to |
| 3904 | * worker thread |
| 3905 | */ |
| 3906 | phba->work_ha |= HA_ERATT; |
| 3907 | phba->work_hs = HS_FFER3; |
| 3908 | |
| 3909 | lpfc_worker_wake_up(phba); |
| 3910 | |
| 3911 | return; |
| 3912 | } |
| 3913 | |
| 3914 | /** |
| 3915 | * lpfc_poll_eratt - Error attention polling timer timeout handler |
| 3916 | * @t: Context to fetch pointer to address of HBA context object from. |
| 3917 | * |
| 3918 | * This function is invoked by the Error Attention polling timer when the |
| 3919 | * timer times out. It will check the SLI Error Attention register for |
| 3920 | * possible attention events. If so, it will post an Error Attention event |
| 3921 | * and wake up worker thread to process it. Otherwise, it will set up the |
| 3922 | * Error Attention polling timer for the next poll. |
| 3923 | **/ |
| 3924 | void lpfc_poll_eratt(struct timer_list *t) |
| 3925 | { |
| 3926 | struct lpfc_hba *phba; |
| 3927 | uint32_t eratt = 0; |
| 3928 | uint64_t sli_intr, cnt; |
| 3929 | |
| 3930 | phba = timer_container_of(phba, t, eratt_poll); |
| 3931 | |
| 3932 | if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) |
| 3933 | return; |
| 3934 | |
| 3935 | if (phba->sli_rev == LPFC_SLI_REV4 && |
| 3936 | !test_bit(HBA_SETUP, &phba->hba_flag)) { |
| 3937 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 3938 | "0663 HBA still initializing 0x%lx, restart " |
| 3939 | "timer\n" , |
| 3940 | phba->hba_flag); |
| 3941 | goto restart_timer; |
| 3942 | } |
| 3943 | |
| 3944 | /* Here we will also keep track of interrupts per sec of the hba */ |
| 3945 | sli_intr = phba->sli.slistat.sli_intr; |
| 3946 | |
| 3947 | if (phba->sli.slistat.sli_prev_intr > sli_intr) |
| 3948 | cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + |
| 3949 | sli_intr); |
| 3950 | else |
| 3951 | cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); |
| 3952 | |
| 3953 | /* 64-bit integer division not supported on 32-bit x86 - use do_div */ |
| 3954 | do_div(cnt, phba->eratt_poll_interval); |
| 3955 | phba->sli.slistat.sli_ips = cnt; |
| 3956 | |
| 3957 | phba->sli.slistat.sli_prev_intr = sli_intr; |
| 3958 | |
| 3959 | /* Check chip HA register for error event */ |
| 3960 | eratt = lpfc_sli_check_eratt(phba); |
| 3961 | |
| 3962 | if (eratt) { |
| 3963 | /* Tell the worker thread there is work to do */ |
| 3964 | lpfc_worker_wake_up(phba); |
| 3965 | return; |
| 3966 | } |
| 3967 | |
| 3968 | restart_timer: |
| 3969 | /* Restart the timer for next eratt poll */ |
| 3970 | mod_timer(timer: &phba->eratt_poll, |
| 3971 | expires: jiffies + secs_to_jiffies(phba->eratt_poll_interval)); |
| 3972 | return; |
| 3973 | } |
| 3974 | |
| 3975 | |
| 3976 | /** |
| 3977 | * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring |
| 3978 | * @phba: Pointer to HBA context object. |
| 3979 | * @pring: Pointer to driver SLI ring object. |
| 3980 | * @mask: Host attention register mask for this ring. |
| 3981 | * |
| 3982 | * This function is called from the interrupt context when there is a ring |
| 3983 | * event for the fcp ring. The caller does not hold any lock. |
| 3984 | * The function processes each response iocb in the response ring until it |
| 3985 | * finds an iocb with LE bit set and chains all the iocbs up to the iocb with |
| 3986 | * LE bit set. The function will call the completion handler of the command iocb |
| 3987 | * if the response iocb indicates a completion for a command iocb or it is |
| 3988 | * an abort completion. The function will call lpfc_sli_process_unsol_iocb |
| 3989 | * function if this is an unsolicited iocb. |
| 3990 | * This routine presumes LPFC_FCP_RING handling and doesn't bother |
| 3991 | * to check it explicitly. |
| 3992 | */ |
| 3993 | int |
| 3994 | lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, |
| 3995 | struct lpfc_sli_ring *pring, uint32_t mask) |
| 3996 | { |
| 3997 | struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; |
| 3998 | IOCB_t *irsp = NULL; |
| 3999 | IOCB_t *entry = NULL; |
| 4000 | struct lpfc_iocbq *cmdiocbq = NULL; |
| 4001 | struct lpfc_iocbq rspiocbq; |
| 4002 | uint32_t status; |
| 4003 | uint32_t portRspPut, portRspMax; |
| 4004 | int rc = 1; |
| 4005 | lpfc_iocb_type type; |
| 4006 | unsigned long iflag; |
| 4007 | uint32_t rsp_cmpl = 0; |
| 4008 | |
| 4009 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 4010 | pring->stats.iocb_event++; |
| 4011 | |
| 4012 | /* |
| 4013 | * The next available response entry should never exceed the maximum |
| 4014 | * entries. If it does, treat it as an adapter hardware error. |
| 4015 | */ |
| 4016 | portRspMax = pring->sli.sli3.numRiocb; |
| 4017 | portRspPut = le32_to_cpu(pgp->rspPutInx); |
| 4018 | if (unlikely(portRspPut >= portRspMax)) { |
| 4019 | lpfc_sli_rsp_pointers_error(phba, pring); |
| 4020 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4021 | return 1; |
| 4022 | } |
| 4023 | if (phba->fcp_ring_in_use) { |
| 4024 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4025 | return 1; |
| 4026 | } else |
| 4027 | phba->fcp_ring_in_use = 1; |
| 4028 | |
| 4029 | rmb(); |
| 4030 | while (pring->sli.sli3.rspidx != portRspPut) { |
| 4031 | /* |
| 4032 | * Fetch an entry off the ring and copy it into a local data |
| 4033 | * structure. The copy involves a byte-swap since the |
| 4034 | * network byte order and pci byte orders are different. |
| 4035 | */ |
| 4036 | entry = lpfc_resp_iocb(phba, pring); |
| 4037 | phba->last_completion_time = jiffies; |
| 4038 | |
| 4039 | if (++pring->sli.sli3.rspidx >= portRspMax) |
| 4040 | pring->sli.sli3.rspidx = 0; |
| 4041 | |
| 4042 | lpfc_sli_pcimem_bcopy((uint32_t *) entry, |
| 4043 | (uint32_t *) &rspiocbq.iocb, |
| 4044 | phba->iocb_rsp_size); |
| 4045 | INIT_LIST_HEAD(list: &(rspiocbq.list)); |
| 4046 | irsp = &rspiocbq.iocb; |
| 4047 | |
| 4048 | type = lpfc_sli_iocb_cmd_type(iocb_cmnd: irsp->ulpCommand & CMD_IOCB_MASK); |
| 4049 | pring->stats.iocb_rsp++; |
| 4050 | rsp_cmpl++; |
| 4051 | |
| 4052 | if (unlikely(irsp->ulpStatus)) { |
| 4053 | /* |
| 4054 | * If resource errors reported from HBA, reduce |
| 4055 | * queuedepths of the SCSI device. |
| 4056 | */ |
| 4057 | if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && |
| 4058 | ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == |
| 4059 | IOERR_NO_RESOURCES)) { |
| 4060 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4061 | phba->lpfc_rampdown_queue_depth(phba); |
| 4062 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 4063 | } |
| 4064 | |
| 4065 | /* Rsp ring <ringno> error: IOCB */ |
| 4066 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 4067 | "0336 Rsp Ring %d error: IOCB Data: " |
| 4068 | "x%x x%x x%x x%x x%x x%x x%x x%x\n" , |
| 4069 | pring->ringno, |
| 4070 | irsp->un.ulpWord[0], |
| 4071 | irsp->un.ulpWord[1], |
| 4072 | irsp->un.ulpWord[2], |
| 4073 | irsp->un.ulpWord[3], |
| 4074 | irsp->un.ulpWord[4], |
| 4075 | irsp->un.ulpWord[5], |
| 4076 | *(uint32_t *)&irsp->un1, |
| 4077 | *((uint32_t *)&irsp->un1 + 1)); |
| 4078 | } |
| 4079 | |
| 4080 | switch (type) { |
| 4081 | case LPFC_ABORT_IOCB: |
| 4082 | case LPFC_SOL_IOCB: |
| 4083 | /* |
| 4084 | * Idle exchange closed via ABTS from port. No iocb |
| 4085 | * resources need to be recovered. |
| 4086 | */ |
| 4087 | if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { |
| 4088 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 4089 | "0333 IOCB cmd 0x%x" |
| 4090 | " processed. Skipping" |
| 4091 | " completion\n" , |
| 4092 | irsp->ulpCommand); |
| 4093 | break; |
| 4094 | } |
| 4095 | |
| 4096 | cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, |
| 4097 | prspiocb: &rspiocbq); |
| 4098 | if (unlikely(!cmdiocbq)) |
| 4099 | break; |
| 4100 | if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) |
| 4101 | cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; |
| 4102 | if (cmdiocbq->cmd_cmpl) { |
| 4103 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4104 | cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq); |
| 4105 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 4106 | } |
| 4107 | break; |
| 4108 | case LPFC_UNSOL_IOCB: |
| 4109 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4110 | lpfc_sli_process_unsol_iocb(phba, pring, saveq: &rspiocbq); |
| 4111 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 4112 | break; |
| 4113 | default: |
| 4114 | if (irsp->ulpCommand == CMD_ADAPTER_MSG) { |
| 4115 | char adaptermsg[LPFC_MAX_ADPTMSG]; |
| 4116 | memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); |
| 4117 | memcpy(&adaptermsg[0], (uint8_t *) irsp, |
| 4118 | MAX_MSG_DATA); |
| 4119 | dev_warn(&((phba->pcidev)->dev), |
| 4120 | "lpfc%d: %s\n" , |
| 4121 | phba->brd_no, adaptermsg); |
| 4122 | } else { |
| 4123 | /* Unknown IOCB command */ |
| 4124 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 4125 | "0334 Unknown IOCB command " |
| 4126 | "Data: x%x, x%x x%x x%x x%x\n" , |
| 4127 | type, irsp->ulpCommand, |
| 4128 | irsp->ulpStatus, |
| 4129 | irsp->ulpIoTag, |
| 4130 | irsp->ulpContext); |
| 4131 | } |
| 4132 | break; |
| 4133 | } |
| 4134 | |
| 4135 | /* |
| 4136 | * The response IOCB has been processed. Update the ring |
| 4137 | * pointer in SLIM. If the port response put pointer has not |
| 4138 | * been updated, sync the pgp->rspPutInx and fetch the new port |
| 4139 | * response put pointer. |
| 4140 | */ |
| 4141 | writel(val: pring->sli.sli3.rspidx, |
| 4142 | addr: &phba->host_gp[pring->ringno].rspGetInx); |
| 4143 | |
| 4144 | if (pring->sli.sli3.rspidx == portRspPut) |
| 4145 | portRspPut = le32_to_cpu(pgp->rspPutInx); |
| 4146 | } |
| 4147 | |
| 4148 | if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { |
| 4149 | pring->stats.iocb_rsp_full++; |
| 4150 | status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); |
| 4151 | writel(val: status, addr: phba->CAregaddr); |
| 4152 | readl(addr: phba->CAregaddr); |
| 4153 | } |
| 4154 | if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { |
| 4155 | pring->flag &= ~LPFC_CALL_RING_AVAILABLE; |
| 4156 | pring->stats.iocb_cmd_empty++; |
| 4157 | |
| 4158 | /* Force update of the local copy of cmdGetInx */ |
| 4159 | pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); |
| 4160 | lpfc_sli_resume_iocb(phba, pring); |
| 4161 | |
| 4162 | if ((pring->lpfc_sli_cmd_available)) |
| 4163 | (pring->lpfc_sli_cmd_available) (phba, pring); |
| 4164 | |
| 4165 | } |
| 4166 | |
| 4167 | phba->fcp_ring_in_use = 0; |
| 4168 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4169 | return rc; |
| 4170 | } |
| 4171 | |
| 4172 | /** |
| 4173 | * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb |
| 4174 | * @phba: Pointer to HBA context object. |
| 4175 | * @pring: Pointer to driver SLI ring object. |
| 4176 | * @rspiocbp: Pointer to driver response IOCB object. |
| 4177 | * |
| 4178 | * This function is called from the worker thread when there is a slow-path |
| 4179 | * response IOCB to process. This function chains all the response iocbs until |
| 4180 | * seeing the iocb with the LE bit set. The function will call |
| 4181 | * lpfc_sli_process_sol_iocb function if the response iocb indicates a |
| 4182 | * completion of a command iocb. The function will call the |
| 4183 | * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. |
| 4184 | * The function frees the resources or calls the completion handler if this |
| 4185 | * iocb is an abort completion. The function returns NULL when the response |
| 4186 | * iocb has the LE bit set and all the chained iocbs are processed, otherwise |
| 4187 | * this function shall chain the iocb on to the iocb_continueq and return the |
| 4188 | * response iocb passed in. |
| 4189 | **/ |
| 4190 | static struct lpfc_iocbq * |
| 4191 | lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 4192 | struct lpfc_iocbq *rspiocbp) |
| 4193 | { |
| 4194 | struct lpfc_iocbq *saveq; |
| 4195 | struct lpfc_iocbq *cmdiocb; |
| 4196 | struct lpfc_iocbq *next_iocb; |
| 4197 | IOCB_t *irsp; |
| 4198 | uint32_t free_saveq; |
| 4199 | u8 cmd_type; |
| 4200 | lpfc_iocb_type type; |
| 4201 | unsigned long iflag; |
| 4202 | u32 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocbp); |
| 4203 | u32 ulp_word4 = get_job_word4(phba, iocbq: rspiocbp); |
| 4204 | u32 ulp_command = get_job_cmnd(phba, iocbq: rspiocbp); |
| 4205 | int rc; |
| 4206 | |
| 4207 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 4208 | /* First add the response iocb to the countinueq list */ |
| 4209 | list_add_tail(new: &rspiocbp->list, head: &pring->iocb_continueq); |
| 4210 | pring->iocb_continueq_cnt++; |
| 4211 | |
| 4212 | /* |
| 4213 | * By default, the driver expects to free all resources |
| 4214 | * associated with this iocb completion. |
| 4215 | */ |
| 4216 | free_saveq = 1; |
| 4217 | saveq = list_get_first(&pring->iocb_continueq, |
| 4218 | struct lpfc_iocbq, list); |
| 4219 | list_del_init(entry: &pring->iocb_continueq); |
| 4220 | pring->iocb_continueq_cnt = 0; |
| 4221 | |
| 4222 | pring->stats.iocb_rsp++; |
| 4223 | |
| 4224 | /* |
| 4225 | * If resource errors reported from HBA, reduce |
| 4226 | * queuedepths of the SCSI device. |
| 4227 | */ |
| 4228 | if (ulp_status == IOSTAT_LOCAL_REJECT && |
| 4229 | ((ulp_word4 & IOERR_PARAM_MASK) == |
| 4230 | IOERR_NO_RESOURCES)) { |
| 4231 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4232 | phba->lpfc_rampdown_queue_depth(phba); |
| 4233 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 4234 | } |
| 4235 | |
| 4236 | if (ulp_status) { |
| 4237 | /* Rsp ring <ringno> error: IOCB */ |
| 4238 | if (phba->sli_rev < LPFC_SLI_REV4) { |
| 4239 | irsp = &rspiocbp->iocb; |
| 4240 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 4241 | "0328 Rsp Ring %d error: ulp_status x%x " |
| 4242 | "IOCB Data: " |
| 4243 | "x%08x x%08x x%08x x%08x " |
| 4244 | "x%08x x%08x x%08x x%08x " |
| 4245 | "x%08x x%08x x%08x x%08x " |
| 4246 | "x%08x x%08x x%08x x%08x\n" , |
| 4247 | pring->ringno, ulp_status, |
| 4248 | get_job_ulpword(rspiocbp, 0), |
| 4249 | get_job_ulpword(rspiocbp, 1), |
| 4250 | get_job_ulpword(rspiocbp, 2), |
| 4251 | get_job_ulpword(rspiocbp, 3), |
| 4252 | get_job_ulpword(rspiocbp, 4), |
| 4253 | get_job_ulpword(rspiocbp, 5), |
| 4254 | *(((uint32_t *)irsp) + 6), |
| 4255 | *(((uint32_t *)irsp) + 7), |
| 4256 | *(((uint32_t *)irsp) + 8), |
| 4257 | *(((uint32_t *)irsp) + 9), |
| 4258 | *(((uint32_t *)irsp) + 10), |
| 4259 | *(((uint32_t *)irsp) + 11), |
| 4260 | *(((uint32_t *)irsp) + 12), |
| 4261 | *(((uint32_t *)irsp) + 13), |
| 4262 | *(((uint32_t *)irsp) + 14), |
| 4263 | *(((uint32_t *)irsp) + 15)); |
| 4264 | } else { |
| 4265 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 4266 | "0321 Rsp Ring %d error: " |
| 4267 | "IOCB Data: " |
| 4268 | "x%x x%x x%x x%x\n" , |
| 4269 | pring->ringno, |
| 4270 | rspiocbp->wcqe_cmpl.word0, |
| 4271 | rspiocbp->wcqe_cmpl.total_data_placed, |
| 4272 | rspiocbp->wcqe_cmpl.parameter, |
| 4273 | rspiocbp->wcqe_cmpl.word3); |
| 4274 | } |
| 4275 | } |
| 4276 | |
| 4277 | |
| 4278 | /* |
| 4279 | * Fetch the iocb command type and call the correct completion |
| 4280 | * routine. Solicited and Unsolicited IOCBs on the ELS ring |
| 4281 | * get freed back to the lpfc_iocb_list by the discovery |
| 4282 | * kernel thread. |
| 4283 | */ |
| 4284 | cmd_type = ulp_command & CMD_IOCB_MASK; |
| 4285 | type = lpfc_sli_iocb_cmd_type(iocb_cmnd: cmd_type); |
| 4286 | switch (type) { |
| 4287 | case LPFC_SOL_IOCB: |
| 4288 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4289 | rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); |
| 4290 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 4291 | break; |
| 4292 | case LPFC_UNSOL_IOCB: |
| 4293 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4294 | rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); |
| 4295 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 4296 | if (!rc) |
| 4297 | free_saveq = 0; |
| 4298 | break; |
| 4299 | case LPFC_ABORT_IOCB: |
| 4300 | cmdiocb = NULL; |
| 4301 | if (ulp_command != CMD_XRI_ABORTED_CX) |
| 4302 | cmdiocb = lpfc_sli_iocbq_lookup(phba, pring, |
| 4303 | prspiocb: saveq); |
| 4304 | if (cmdiocb) { |
| 4305 | /* Call the specified completion routine */ |
| 4306 | if (cmdiocb->cmd_cmpl) { |
| 4307 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4308 | cmdiocb->cmd_cmpl(phba, cmdiocb, saveq); |
| 4309 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 4310 | } else { |
| 4311 | __lpfc_sli_release_iocbq(phba, iocbq: cmdiocb); |
| 4312 | } |
| 4313 | } |
| 4314 | break; |
| 4315 | case LPFC_UNKNOWN_IOCB: |
| 4316 | if (ulp_command == CMD_ADAPTER_MSG) { |
| 4317 | char adaptermsg[LPFC_MAX_ADPTMSG]; |
| 4318 | |
| 4319 | memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); |
| 4320 | memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe, |
| 4321 | MAX_MSG_DATA); |
| 4322 | dev_warn(&((phba->pcidev)->dev), |
| 4323 | "lpfc%d: %s\n" , |
| 4324 | phba->brd_no, adaptermsg); |
| 4325 | } else { |
| 4326 | /* Unknown command */ |
| 4327 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 4328 | "0335 Unknown IOCB " |
| 4329 | "command Data: x%x " |
| 4330 | "x%x x%x x%x\n" , |
| 4331 | ulp_command, |
| 4332 | ulp_status, |
| 4333 | get_wqe_reqtag(rspiocbp), |
| 4334 | get_job_ulpcontext(phba, rspiocbp)); |
| 4335 | } |
| 4336 | break; |
| 4337 | } |
| 4338 | |
| 4339 | if (free_saveq) { |
| 4340 | list_for_each_entry_safe(rspiocbp, next_iocb, |
| 4341 | &saveq->list, list) { |
| 4342 | list_del_init(entry: &rspiocbp->list); |
| 4343 | __lpfc_sli_release_iocbq(phba, iocbq: rspiocbp); |
| 4344 | } |
| 4345 | __lpfc_sli_release_iocbq(phba, iocbq: saveq); |
| 4346 | } |
| 4347 | rspiocbp = NULL; |
| 4348 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4349 | return rspiocbp; |
| 4350 | } |
| 4351 | |
| 4352 | /** |
| 4353 | * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs |
| 4354 | * @phba: Pointer to HBA context object. |
| 4355 | * @pring: Pointer to driver SLI ring object. |
| 4356 | * @mask: Host attention register mask for this ring. |
| 4357 | * |
| 4358 | * This routine wraps the actual slow_ring event process routine from the |
| 4359 | * API jump table function pointer from the lpfc_hba struct. |
| 4360 | **/ |
| 4361 | void |
| 4362 | lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, |
| 4363 | struct lpfc_sli_ring *pring, uint32_t mask) |
| 4364 | { |
| 4365 | phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); |
| 4366 | } |
| 4367 | |
| 4368 | /** |
| 4369 | * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings |
| 4370 | * @phba: Pointer to HBA context object. |
| 4371 | * @pring: Pointer to driver SLI ring object. |
| 4372 | * @mask: Host attention register mask for this ring. |
| 4373 | * |
| 4374 | * This function is called from the worker thread when there is a ring event |
| 4375 | * for non-fcp rings. The caller does not hold any lock. The function will |
| 4376 | * remove each response iocb in the response ring and calls the handle |
| 4377 | * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. |
| 4378 | **/ |
| 4379 | static void |
| 4380 | lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, |
| 4381 | struct lpfc_sli_ring *pring, uint32_t mask) |
| 4382 | { |
| 4383 | struct lpfc_pgp *pgp; |
| 4384 | IOCB_t *entry; |
| 4385 | IOCB_t *irsp = NULL; |
| 4386 | struct lpfc_iocbq *rspiocbp = NULL; |
| 4387 | uint32_t portRspPut, portRspMax; |
| 4388 | unsigned long iflag; |
| 4389 | uint32_t status; |
| 4390 | |
| 4391 | pgp = &phba->port_gp[pring->ringno]; |
| 4392 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 4393 | pring->stats.iocb_event++; |
| 4394 | |
| 4395 | /* |
| 4396 | * The next available response entry should never exceed the maximum |
| 4397 | * entries. If it does, treat it as an adapter hardware error. |
| 4398 | */ |
| 4399 | portRspMax = pring->sli.sli3.numRiocb; |
| 4400 | portRspPut = le32_to_cpu(pgp->rspPutInx); |
| 4401 | if (portRspPut >= portRspMax) { |
| 4402 | /* |
| 4403 | * Ring <ringno> handler: portRspPut <portRspPut> is bigger than |
| 4404 | * rsp ring <portRspMax> |
| 4405 | */ |
| 4406 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 4407 | "0303 Ring %d handler: portRspPut %d " |
| 4408 | "is bigger than rsp ring %d\n" , |
| 4409 | pring->ringno, portRspPut, portRspMax); |
| 4410 | |
| 4411 | phba->link_state = LPFC_HBA_ERROR; |
| 4412 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4413 | |
| 4414 | phba->work_hs = HS_FFER3; |
| 4415 | lpfc_handle_eratt(phba); |
| 4416 | |
| 4417 | return; |
| 4418 | } |
| 4419 | |
| 4420 | rmb(); |
| 4421 | while (pring->sli.sli3.rspidx != portRspPut) { |
| 4422 | /* |
| 4423 | * Build a completion list and call the appropriate handler. |
| 4424 | * The process is to get the next available response iocb, get |
| 4425 | * a free iocb from the list, copy the response data into the |
| 4426 | * free iocb, insert to the continuation list, and update the |
| 4427 | * next response index to slim. This process makes response |
| 4428 | * iocb's in the ring available to DMA as fast as possible but |
| 4429 | * pays a penalty for a copy operation. Since the iocb is |
| 4430 | * only 32 bytes, this penalty is considered small relative to |
| 4431 | * the PCI reads for register values and a slim write. When |
| 4432 | * the ulpLe field is set, the entire Command has been |
| 4433 | * received. |
| 4434 | */ |
| 4435 | entry = lpfc_resp_iocb(phba, pring); |
| 4436 | |
| 4437 | phba->last_completion_time = jiffies; |
| 4438 | rspiocbp = __lpfc_sli_get_iocbq(phba); |
| 4439 | if (rspiocbp == NULL) { |
| 4440 | printk(KERN_ERR "%s: out of buffers! Failing " |
| 4441 | "completion.\n" , __func__); |
| 4442 | break; |
| 4443 | } |
| 4444 | |
| 4445 | lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, |
| 4446 | phba->iocb_rsp_size); |
| 4447 | irsp = &rspiocbp->iocb; |
| 4448 | |
| 4449 | if (++pring->sli.sli3.rspidx >= portRspMax) |
| 4450 | pring->sli.sli3.rspidx = 0; |
| 4451 | |
| 4452 | if (pring->ringno == LPFC_ELS_RING) { |
| 4453 | lpfc_debugfs_slow_ring_trc(phba, |
| 4454 | "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x" , |
| 4455 | *(((uint32_t *) irsp) + 4), |
| 4456 | *(((uint32_t *) irsp) + 6), |
| 4457 | *(((uint32_t *) irsp) + 7)); |
| 4458 | } |
| 4459 | |
| 4460 | writel(val: pring->sli.sli3.rspidx, |
| 4461 | addr: &phba->host_gp[pring->ringno].rspGetInx); |
| 4462 | |
| 4463 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4464 | /* Handle the response IOCB */ |
| 4465 | rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); |
| 4466 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 4467 | |
| 4468 | /* |
| 4469 | * If the port response put pointer has not been updated, sync |
| 4470 | * the pgp->rspPutInx in the MAILBOX_tand fetch the new port |
| 4471 | * response put pointer. |
| 4472 | */ |
| 4473 | if (pring->sli.sli3.rspidx == portRspPut) { |
| 4474 | portRspPut = le32_to_cpu(pgp->rspPutInx); |
| 4475 | } |
| 4476 | } /* while (pring->sli.sli3.rspidx != portRspPut) */ |
| 4477 | |
| 4478 | if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { |
| 4479 | /* At least one response entry has been freed */ |
| 4480 | pring->stats.iocb_rsp_full++; |
| 4481 | /* SET RxRE_RSP in Chip Att register */ |
| 4482 | status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); |
| 4483 | writel(val: status, addr: phba->CAregaddr); |
| 4484 | readl(addr: phba->CAregaddr); /* flush */ |
| 4485 | } |
| 4486 | if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { |
| 4487 | pring->flag &= ~LPFC_CALL_RING_AVAILABLE; |
| 4488 | pring->stats.iocb_cmd_empty++; |
| 4489 | |
| 4490 | /* Force update of the local copy of cmdGetInx */ |
| 4491 | pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); |
| 4492 | lpfc_sli_resume_iocb(phba, pring); |
| 4493 | |
| 4494 | if ((pring->lpfc_sli_cmd_available)) |
| 4495 | (pring->lpfc_sli_cmd_available) (phba, pring); |
| 4496 | |
| 4497 | } |
| 4498 | |
| 4499 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4500 | return; |
| 4501 | } |
| 4502 | |
| 4503 | /** |
| 4504 | * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events |
| 4505 | * @phba: Pointer to HBA context object. |
| 4506 | * @pring: Pointer to driver SLI ring object. |
| 4507 | * @mask: Host attention register mask for this ring. |
| 4508 | * |
| 4509 | * This function is called from the worker thread when there is a pending |
| 4510 | * ELS response iocb on the driver internal slow-path response iocb worker |
| 4511 | * queue. The caller does not hold any lock. The function will remove each |
| 4512 | * response iocb from the response worker queue and calls the handle |
| 4513 | * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. |
| 4514 | **/ |
| 4515 | static void |
| 4516 | lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, |
| 4517 | struct lpfc_sli_ring *pring, uint32_t mask) |
| 4518 | { |
| 4519 | struct lpfc_iocbq *irspiocbq; |
| 4520 | struct hbq_dmabuf *dmabuf; |
| 4521 | struct lpfc_cq_event *cq_event; |
| 4522 | unsigned long iflag; |
| 4523 | int count = 0; |
| 4524 | |
| 4525 | clear_bit(nr: HBA_SP_QUEUE_EVT, addr: &phba->hba_flag); |
| 4526 | while (!list_empty(head: &phba->sli4_hba.sp_queue_event)) { |
| 4527 | /* Get the response iocb from the head of work queue */ |
| 4528 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 4529 | list_remove_head(&phba->sli4_hba.sp_queue_event, |
| 4530 | cq_event, struct lpfc_cq_event, list); |
| 4531 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 4532 | |
| 4533 | switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { |
| 4534 | case CQE_CODE_COMPL_WQE: |
| 4535 | irspiocbq = container_of(cq_event, struct lpfc_iocbq, |
| 4536 | cq_event); |
| 4537 | /* Translate ELS WCQE to response IOCBQ */ |
| 4538 | irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba, |
| 4539 | rspiocbq: irspiocbq); |
| 4540 | if (irspiocbq) |
| 4541 | lpfc_sli_sp_handle_rspiocb(phba, pring, |
| 4542 | rspiocbp: irspiocbq); |
| 4543 | count++; |
| 4544 | break; |
| 4545 | case CQE_CODE_RECEIVE: |
| 4546 | case CQE_CODE_RECEIVE_V1: |
| 4547 | dmabuf = container_of(cq_event, struct hbq_dmabuf, |
| 4548 | cq_event); |
| 4549 | lpfc_sli4_handle_received_buffer(phba, dmabuf); |
| 4550 | count++; |
| 4551 | break; |
| 4552 | default: |
| 4553 | break; |
| 4554 | } |
| 4555 | |
| 4556 | /* Limit the number of events to 64 to avoid soft lockups */ |
| 4557 | if (count == 64) |
| 4558 | break; |
| 4559 | } |
| 4560 | } |
| 4561 | |
| 4562 | /** |
| 4563 | * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring |
| 4564 | * @phba: Pointer to HBA context object. |
| 4565 | * @pring: Pointer to driver SLI ring object. |
| 4566 | * |
| 4567 | * This function aborts all iocbs in the given ring and frees all the iocb |
| 4568 | * objects in txq. This function issues an abort iocb for all the iocb commands |
| 4569 | * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before |
| 4570 | * the return of this function. The caller is not required to hold any locks. |
| 4571 | **/ |
| 4572 | void |
| 4573 | lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
| 4574 | { |
| 4575 | LIST_HEAD(tx_completions); |
| 4576 | LIST_HEAD(txcmplq_completions); |
| 4577 | struct lpfc_iocbq *iocb, *next_iocb; |
| 4578 | int offline; |
| 4579 | |
| 4580 | if (pring->ringno == LPFC_ELS_RING) { |
| 4581 | lpfc_fabric_abort_hba(phba); |
| 4582 | } |
| 4583 | offline = pci_channel_offline(pdev: phba->pcidev); |
| 4584 | |
| 4585 | /* Error everything on txq and txcmplq |
| 4586 | * First do the txq. |
| 4587 | */ |
| 4588 | if (phba->sli_rev >= LPFC_SLI_REV4) { |
| 4589 | spin_lock_irq(lock: &pring->ring_lock); |
| 4590 | list_splice_init(list: &pring->txq, head: &tx_completions); |
| 4591 | pring->txq_cnt = 0; |
| 4592 | |
| 4593 | if (offline) { |
| 4594 | list_splice_init(list: &pring->txcmplq, |
| 4595 | head: &txcmplq_completions); |
| 4596 | } else { |
| 4597 | /* Next issue ABTS for everything on the txcmplq */ |
| 4598 | list_for_each_entry_safe(iocb, next_iocb, |
| 4599 | &pring->txcmplq, list) |
| 4600 | lpfc_sli_issue_abort_iotag(phba, pring, |
| 4601 | iocb, NULL); |
| 4602 | } |
| 4603 | spin_unlock_irq(lock: &pring->ring_lock); |
| 4604 | } else { |
| 4605 | spin_lock_irq(lock: &phba->hbalock); |
| 4606 | list_splice_init(list: &pring->txq, head: &tx_completions); |
| 4607 | pring->txq_cnt = 0; |
| 4608 | |
| 4609 | if (offline) { |
| 4610 | list_splice_init(list: &pring->txcmplq, head: &txcmplq_completions); |
| 4611 | } else { |
| 4612 | /* Next issue ABTS for everything on the txcmplq */ |
| 4613 | list_for_each_entry_safe(iocb, next_iocb, |
| 4614 | &pring->txcmplq, list) |
| 4615 | lpfc_sli_issue_abort_iotag(phba, pring, |
| 4616 | iocb, NULL); |
| 4617 | } |
| 4618 | spin_unlock_irq(lock: &phba->hbalock); |
| 4619 | } |
| 4620 | |
| 4621 | if (offline) { |
| 4622 | /* Cancel all the IOCBs from the completions list */ |
| 4623 | lpfc_sli_cancel_iocbs(phba, iocblist: &txcmplq_completions, |
| 4624 | IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); |
| 4625 | } else { |
| 4626 | /* Make sure HBA is alive */ |
| 4627 | lpfc_issue_hb_tmo(phba); |
| 4628 | } |
| 4629 | /* Cancel all the IOCBs from the completions list */ |
| 4630 | lpfc_sli_cancel_iocbs(phba, iocblist: &tx_completions, IOSTAT_LOCAL_REJECT, |
| 4631 | IOERR_SLI_ABORTED); |
| 4632 | } |
| 4633 | |
| 4634 | /** |
| 4635 | * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings |
| 4636 | * @phba: Pointer to HBA context object. |
| 4637 | * |
| 4638 | * This function aborts all iocbs in FCP rings and frees all the iocb |
| 4639 | * objects in txq. This function issues an abort iocb for all the iocb commands |
| 4640 | * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before |
| 4641 | * the return of this function. The caller is not required to hold any locks. |
| 4642 | **/ |
| 4643 | void |
| 4644 | lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) |
| 4645 | { |
| 4646 | struct lpfc_sli *psli = &phba->sli; |
| 4647 | struct lpfc_sli_ring *pring; |
| 4648 | uint32_t i; |
| 4649 | |
| 4650 | /* Look on all the FCP Rings for the iotag */ |
| 4651 | if (phba->sli_rev >= LPFC_SLI_REV4) { |
| 4652 | for (i = 0; i < phba->cfg_hdw_queue; i++) { |
| 4653 | pring = phba->sli4_hba.hdwq[i].io_wq->pring; |
| 4654 | lpfc_sli_abort_iocb_ring(phba, pring); |
| 4655 | } |
| 4656 | } else { |
| 4657 | pring = &psli->sli3_ring[LPFC_FCP_RING]; |
| 4658 | lpfc_sli_abort_iocb_ring(phba, pring); |
| 4659 | } |
| 4660 | } |
| 4661 | |
| 4662 | /** |
| 4663 | * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring |
| 4664 | * @phba: Pointer to HBA context object. |
| 4665 | * |
| 4666 | * This function flushes all iocbs in the IO ring and frees all the iocb |
| 4667 | * objects in txq and txcmplq. This function will not issue abort iocbs |
| 4668 | * for all the iocb commands in txcmplq, they will just be returned with |
| 4669 | * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI |
| 4670 | * slot has been permanently disabled. |
| 4671 | **/ |
| 4672 | void |
| 4673 | lpfc_sli_flush_io_rings(struct lpfc_hba *phba) |
| 4674 | { |
| 4675 | LIST_HEAD(txq); |
| 4676 | LIST_HEAD(txcmplq); |
| 4677 | struct lpfc_sli *psli = &phba->sli; |
| 4678 | struct lpfc_sli_ring *pring; |
| 4679 | uint32_t i; |
| 4680 | struct lpfc_iocbq *piocb, *next_iocb; |
| 4681 | |
| 4682 | /* Indicate the I/O queues are flushed */ |
| 4683 | set_bit(nr: HBA_IOQ_FLUSH, addr: &phba->hba_flag); |
| 4684 | |
| 4685 | /* Look on all the FCP Rings for the iotag */ |
| 4686 | if (phba->sli_rev >= LPFC_SLI_REV4) { |
| 4687 | for (i = 0; i < phba->cfg_hdw_queue; i++) { |
| 4688 | if (!phba->sli4_hba.hdwq || |
| 4689 | !phba->sli4_hba.hdwq[i].io_wq) { |
| 4690 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 4691 | "7777 hdwq's deleted %lx " |
| 4692 | "%lx %x %x\n" , |
| 4693 | phba->pport->load_flag, |
| 4694 | phba->hba_flag, |
| 4695 | phba->link_state, |
| 4696 | phba->sli.sli_flag); |
| 4697 | return; |
| 4698 | } |
| 4699 | pring = phba->sli4_hba.hdwq[i].io_wq->pring; |
| 4700 | |
| 4701 | spin_lock_irq(lock: &pring->ring_lock); |
| 4702 | /* Retrieve everything on txq */ |
| 4703 | list_splice_init(list: &pring->txq, head: &txq); |
| 4704 | list_for_each_entry_safe(piocb, next_iocb, |
| 4705 | &pring->txcmplq, list) |
| 4706 | piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; |
| 4707 | /* Retrieve everything on the txcmplq */ |
| 4708 | list_splice_init(list: &pring->txcmplq, head: &txcmplq); |
| 4709 | pring->txq_cnt = 0; |
| 4710 | pring->txcmplq_cnt = 0; |
| 4711 | spin_unlock_irq(lock: &pring->ring_lock); |
| 4712 | |
| 4713 | /* Flush the txq */ |
| 4714 | lpfc_sli_cancel_iocbs(phba, iocblist: &txq, |
| 4715 | IOSTAT_LOCAL_REJECT, |
| 4716 | IOERR_SLI_DOWN); |
| 4717 | /* Flush the txcmplq */ |
| 4718 | lpfc_sli_cancel_iocbs(phba, iocblist: &txcmplq, |
| 4719 | IOSTAT_LOCAL_REJECT, |
| 4720 | IOERR_SLI_DOWN); |
| 4721 | if (unlikely(pci_channel_offline(phba->pcidev))) |
| 4722 | lpfc_sli4_io_xri_aborted(phba, NULL, idx: 0); |
| 4723 | } |
| 4724 | } else { |
| 4725 | pring = &psli->sli3_ring[LPFC_FCP_RING]; |
| 4726 | |
| 4727 | spin_lock_irq(lock: &phba->hbalock); |
| 4728 | /* Retrieve everything on txq */ |
| 4729 | list_splice_init(list: &pring->txq, head: &txq); |
| 4730 | list_for_each_entry_safe(piocb, next_iocb, |
| 4731 | &pring->txcmplq, list) |
| 4732 | piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; |
| 4733 | /* Retrieve everything on the txcmplq */ |
| 4734 | list_splice_init(list: &pring->txcmplq, head: &txcmplq); |
| 4735 | pring->txq_cnt = 0; |
| 4736 | pring->txcmplq_cnt = 0; |
| 4737 | spin_unlock_irq(lock: &phba->hbalock); |
| 4738 | |
| 4739 | /* Flush the txq */ |
| 4740 | lpfc_sli_cancel_iocbs(phba, iocblist: &txq, IOSTAT_LOCAL_REJECT, |
| 4741 | IOERR_SLI_DOWN); |
| 4742 | /* Flush the txcmpq */ |
| 4743 | lpfc_sli_cancel_iocbs(phba, iocblist: &txcmplq, IOSTAT_LOCAL_REJECT, |
| 4744 | IOERR_SLI_DOWN); |
| 4745 | } |
| 4746 | } |
| 4747 | |
| 4748 | /** |
| 4749 | * lpfc_sli_brdready_s3 - Check for sli3 host ready status |
| 4750 | * @phba: Pointer to HBA context object. |
| 4751 | * @mask: Bit mask to be checked. |
| 4752 | * |
| 4753 | * This function reads the host status register and compares |
| 4754 | * with the provided bit mask to check if HBA completed |
| 4755 | * the restart. This function will wait in a loop for the |
| 4756 | * HBA to complete restart. If the HBA does not restart within |
| 4757 | * 15 iterations, the function will reset the HBA again. The |
| 4758 | * function returns 1 when HBA fail to restart otherwise returns |
| 4759 | * zero. |
| 4760 | **/ |
| 4761 | static int |
| 4762 | lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) |
| 4763 | { |
| 4764 | uint32_t status; |
| 4765 | int i = 0; |
| 4766 | int retval = 0; |
| 4767 | |
| 4768 | /* Read the HBA Host Status Register */ |
| 4769 | if (lpfc_readl(addr: phba->HSregaddr, data: &status)) |
| 4770 | return 1; |
| 4771 | |
| 4772 | set_bit(nr: HBA_NEEDS_CFG_PORT, addr: &phba->hba_flag); |
| 4773 | |
| 4774 | /* |
| 4775 | * Check status register every 100ms for 5 retries, then every |
| 4776 | * 500ms for 5, then every 2.5 sec for 5, then reset board and |
| 4777 | * every 2.5 sec for 4. |
| 4778 | * Break our of the loop if errors occurred during init. |
| 4779 | */ |
| 4780 | while (((status & mask) != mask) && |
| 4781 | !(status & HS_FFERM) && |
| 4782 | i++ < 20) { |
| 4783 | |
| 4784 | if (i <= 5) |
| 4785 | msleep(msecs: 10); |
| 4786 | else if (i <= 10) |
| 4787 | msleep(msecs: 500); |
| 4788 | else |
| 4789 | msleep(msecs: 2500); |
| 4790 | |
| 4791 | if (i == 15) { |
| 4792 | /* Do post */ |
| 4793 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; |
| 4794 | lpfc_sli_brdrestart(phba); |
| 4795 | } |
| 4796 | /* Read the HBA Host Status Register */ |
| 4797 | if (lpfc_readl(addr: phba->HSregaddr, data: &status)) { |
| 4798 | retval = 1; |
| 4799 | break; |
| 4800 | } |
| 4801 | } |
| 4802 | |
| 4803 | /* Check to see if any errors occurred during init */ |
| 4804 | if ((status & HS_FFERM) || (i >= 20)) { |
| 4805 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 4806 | "2751 Adapter failed to restart, " |
| 4807 | "status reg x%x, FW Data: A8 x%x AC x%x\n" , |
| 4808 | status, |
| 4809 | readl(phba->MBslimaddr + 0xa8), |
| 4810 | readl(phba->MBslimaddr + 0xac)); |
| 4811 | phba->link_state = LPFC_HBA_ERROR; |
| 4812 | retval = 1; |
| 4813 | } |
| 4814 | |
| 4815 | return retval; |
| 4816 | } |
| 4817 | |
| 4818 | /** |
| 4819 | * lpfc_sli_brdready_s4 - Check for sli4 host ready status |
| 4820 | * @phba: Pointer to HBA context object. |
| 4821 | * @mask: Bit mask to be checked. |
| 4822 | * |
| 4823 | * This function checks the host status register to check if HBA is |
| 4824 | * ready. This function will wait in a loop for the HBA to be ready |
| 4825 | * If the HBA is not ready , the function will will reset the HBA PCI |
| 4826 | * function again. The function returns 1 when HBA fail to be ready |
| 4827 | * otherwise returns zero. |
| 4828 | **/ |
| 4829 | static int |
| 4830 | lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) |
| 4831 | { |
| 4832 | uint32_t status; |
| 4833 | int retval = 0; |
| 4834 | |
| 4835 | /* Read the HBA Host Status Register */ |
| 4836 | status = lpfc_sli4_post_status_check(phba); |
| 4837 | |
| 4838 | if (status) { |
| 4839 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; |
| 4840 | lpfc_sli_brdrestart(phba); |
| 4841 | status = lpfc_sli4_post_status_check(phba); |
| 4842 | } |
| 4843 | |
| 4844 | /* Check to see if any errors occurred during init */ |
| 4845 | if (status) { |
| 4846 | phba->link_state = LPFC_HBA_ERROR; |
| 4847 | retval = 1; |
| 4848 | } else |
| 4849 | phba->sli4_hba.intr_enable = 0; |
| 4850 | |
| 4851 | clear_bit(nr: HBA_SETUP, addr: &phba->hba_flag); |
| 4852 | return retval; |
| 4853 | } |
| 4854 | |
| 4855 | /** |
| 4856 | * lpfc_sli_brdready - Wrapper func for checking the hba readyness |
| 4857 | * @phba: Pointer to HBA context object. |
| 4858 | * @mask: Bit mask to be checked. |
| 4859 | * |
| 4860 | * This routine wraps the actual SLI3 or SLI4 hba readyness check routine |
| 4861 | * from the API jump table function pointer from the lpfc_hba struct. |
| 4862 | **/ |
| 4863 | int |
| 4864 | lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) |
| 4865 | { |
| 4866 | return phba->lpfc_sli_brdready(phba, mask); |
| 4867 | } |
| 4868 | |
| 4869 | #define BARRIER_TEST_PATTERN (0xdeadbeef) |
| 4870 | |
| 4871 | /** |
| 4872 | * lpfc_reset_barrier - Make HBA ready for HBA reset |
| 4873 | * @phba: Pointer to HBA context object. |
| 4874 | * |
| 4875 | * This function is called before resetting an HBA. This function is called |
| 4876 | * with hbalock held and requests HBA to quiesce DMAs before a reset. |
| 4877 | **/ |
| 4878 | void lpfc_reset_barrier(struct lpfc_hba *phba) |
| 4879 | { |
| 4880 | uint32_t __iomem *resp_buf; |
| 4881 | uint32_t __iomem *mbox_buf; |
| 4882 | volatile struct MAILBOX_word0 mbox; |
| 4883 | uint32_t hc_copy, ha_copy, resp_data; |
| 4884 | int i; |
| 4885 | uint8_t hdrtype; |
| 4886 | |
| 4887 | lockdep_assert_held(&phba->hbalock); |
| 4888 | |
| 4889 | pci_read_config_byte(dev: phba->pcidev, PCI_HEADER_TYPE, val: &hdrtype); |
| 4890 | if (hdrtype != PCI_HEADER_TYPE_MFD || |
| 4891 | (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && |
| 4892 | FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) |
| 4893 | return; |
| 4894 | |
| 4895 | /* |
| 4896 | * Tell the other part of the chip to suspend temporarily all |
| 4897 | * its DMA activity. |
| 4898 | */ |
| 4899 | resp_buf = phba->MBslimaddr; |
| 4900 | |
| 4901 | /* Disable the error attention */ |
| 4902 | if (lpfc_readl(addr: phba->HCregaddr, data: &hc_copy)) |
| 4903 | return; |
| 4904 | writel(val: (hc_copy & ~HC_ERINT_ENA), addr: phba->HCregaddr); |
| 4905 | readl(addr: phba->HCregaddr); /* flush */ |
| 4906 | phba->link_flag |= LS_IGNORE_ERATT; |
| 4907 | |
| 4908 | if (lpfc_readl(addr: phba->HAregaddr, data: &ha_copy)) |
| 4909 | return; |
| 4910 | if (ha_copy & HA_ERATT) { |
| 4911 | /* Clear Chip error bit */ |
| 4912 | writel(HA_ERATT, addr: phba->HAregaddr); |
| 4913 | phba->pport->stopped = 1; |
| 4914 | } |
| 4915 | |
| 4916 | mbox.word0 = 0; |
| 4917 | mbox.mbxCommand = MBX_KILL_BOARD; |
| 4918 | mbox.mbxOwner = OWN_CHIP; |
| 4919 | |
| 4920 | writel(BARRIER_TEST_PATTERN, addr: (resp_buf + 1)); |
| 4921 | mbox_buf = phba->MBslimaddr; |
| 4922 | writel(val: mbox.word0, addr: mbox_buf); |
| 4923 | |
| 4924 | for (i = 0; i < 50; i++) { |
| 4925 | if (lpfc_readl(addr: (resp_buf + 1), data: &resp_data)) |
| 4926 | return; |
| 4927 | if (resp_data != ~(BARRIER_TEST_PATTERN)) |
| 4928 | mdelay(1); |
| 4929 | else |
| 4930 | break; |
| 4931 | } |
| 4932 | resp_data = 0; |
| 4933 | if (lpfc_readl(addr: (resp_buf + 1), data: &resp_data)) |
| 4934 | return; |
| 4935 | if (resp_data != ~(BARRIER_TEST_PATTERN)) { |
| 4936 | if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || |
| 4937 | phba->pport->stopped) |
| 4938 | goto restore_hc; |
| 4939 | else |
| 4940 | goto clear_errat; |
| 4941 | } |
| 4942 | |
| 4943 | mbox.mbxOwner = OWN_HOST; |
| 4944 | resp_data = 0; |
| 4945 | for (i = 0; i < 500; i++) { |
| 4946 | if (lpfc_readl(addr: resp_buf, data: &resp_data)) |
| 4947 | return; |
| 4948 | if (resp_data != mbox.word0) |
| 4949 | mdelay(1); |
| 4950 | else |
| 4951 | break; |
| 4952 | } |
| 4953 | |
| 4954 | clear_errat: |
| 4955 | |
| 4956 | while (++i < 500) { |
| 4957 | if (lpfc_readl(addr: phba->HAregaddr, data: &ha_copy)) |
| 4958 | return; |
| 4959 | if (!(ha_copy & HA_ERATT)) |
| 4960 | mdelay(1); |
| 4961 | else |
| 4962 | break; |
| 4963 | } |
| 4964 | |
| 4965 | if (readl(addr: phba->HAregaddr) & HA_ERATT) { |
| 4966 | writel(HA_ERATT, addr: phba->HAregaddr); |
| 4967 | phba->pport->stopped = 1; |
| 4968 | } |
| 4969 | |
| 4970 | restore_hc: |
| 4971 | phba->link_flag &= ~LS_IGNORE_ERATT; |
| 4972 | writel(val: hc_copy, addr: phba->HCregaddr); |
| 4973 | readl(addr: phba->HCregaddr); /* flush */ |
| 4974 | } |
| 4975 | |
| 4976 | /** |
| 4977 | * lpfc_sli_brdkill - Issue a kill_board mailbox command |
| 4978 | * @phba: Pointer to HBA context object. |
| 4979 | * |
| 4980 | * This function issues a kill_board mailbox command and waits for |
| 4981 | * the error attention interrupt. This function is called for stopping |
| 4982 | * the firmware processing. The caller is not required to hold any |
| 4983 | * locks. This function calls lpfc_hba_down_post function to free |
| 4984 | * any pending commands after the kill. The function will return 1 when it |
| 4985 | * fails to kill the board else will return 0. |
| 4986 | **/ |
| 4987 | int |
| 4988 | lpfc_sli_brdkill(struct lpfc_hba *phba) |
| 4989 | { |
| 4990 | struct lpfc_sli *psli; |
| 4991 | LPFC_MBOXQ_t *pmb; |
| 4992 | uint32_t status; |
| 4993 | uint32_t ha_copy; |
| 4994 | int retval; |
| 4995 | int i = 0; |
| 4996 | |
| 4997 | psli = &phba->sli; |
| 4998 | |
| 4999 | /* Kill HBA */ |
| 5000 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 5001 | "0329 Kill HBA Data: x%x x%x\n" , |
| 5002 | phba->pport->port_state, psli->sli_flag); |
| 5003 | |
| 5004 | pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 5005 | if (!pmb) |
| 5006 | return 1; |
| 5007 | |
| 5008 | /* Disable the error attention */ |
| 5009 | spin_lock_irq(lock: &phba->hbalock); |
| 5010 | if (lpfc_readl(addr: phba->HCregaddr, data: &status)) { |
| 5011 | spin_unlock_irq(lock: &phba->hbalock); |
| 5012 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
| 5013 | return 1; |
| 5014 | } |
| 5015 | status &= ~HC_ERINT_ENA; |
| 5016 | writel(val: status, addr: phba->HCregaddr); |
| 5017 | readl(addr: phba->HCregaddr); /* flush */ |
| 5018 | phba->link_flag |= LS_IGNORE_ERATT; |
| 5019 | spin_unlock_irq(lock: &phba->hbalock); |
| 5020 | |
| 5021 | lpfc_kill_board(phba, pmb); |
| 5022 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 5023 | retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); |
| 5024 | |
| 5025 | if (retval != MBX_SUCCESS) { |
| 5026 | if (retval != MBX_BUSY) |
| 5027 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
| 5028 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 5029 | "2752 KILL_BOARD command failed retval %d\n" , |
| 5030 | retval); |
| 5031 | spin_lock_irq(lock: &phba->hbalock); |
| 5032 | phba->link_flag &= ~LS_IGNORE_ERATT; |
| 5033 | spin_unlock_irq(lock: &phba->hbalock); |
| 5034 | return 1; |
| 5035 | } |
| 5036 | |
| 5037 | spin_lock_irq(lock: &phba->hbalock); |
| 5038 | psli->sli_flag &= ~LPFC_SLI_ACTIVE; |
| 5039 | spin_unlock_irq(lock: &phba->hbalock); |
| 5040 | |
| 5041 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
| 5042 | |
| 5043 | /* There is no completion for a KILL_BOARD mbox cmd. Check for an error |
| 5044 | * attention every 100ms for 3 seconds. If we don't get ERATT after |
| 5045 | * 3 seconds we still set HBA_ERROR state because the status of the |
| 5046 | * board is now undefined. |
| 5047 | */ |
| 5048 | if (lpfc_readl(addr: phba->HAregaddr, data: &ha_copy)) |
| 5049 | return 1; |
| 5050 | while ((i++ < 30) && !(ha_copy & HA_ERATT)) { |
| 5051 | mdelay(100); |
| 5052 | if (lpfc_readl(addr: phba->HAregaddr, data: &ha_copy)) |
| 5053 | return 1; |
| 5054 | } |
| 5055 | |
| 5056 | timer_delete_sync(timer: &psli->mbox_tmo); |
| 5057 | if (ha_copy & HA_ERATT) { |
| 5058 | writel(HA_ERATT, addr: phba->HAregaddr); |
| 5059 | phba->pport->stopped = 1; |
| 5060 | } |
| 5061 | spin_lock_irq(lock: &phba->hbalock); |
| 5062 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
| 5063 | psli->mbox_active = NULL; |
| 5064 | phba->link_flag &= ~LS_IGNORE_ERATT; |
| 5065 | spin_unlock_irq(lock: &phba->hbalock); |
| 5066 | |
| 5067 | lpfc_hba_down_post(phba); |
| 5068 | phba->link_state = LPFC_HBA_ERROR; |
| 5069 | |
| 5070 | return ha_copy & HA_ERATT ? 0 : 1; |
| 5071 | } |
| 5072 | |
| 5073 | /** |
| 5074 | * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA |
| 5075 | * @phba: Pointer to HBA context object. |
| 5076 | * |
| 5077 | * This function resets the HBA by writing HC_INITFF to the control |
| 5078 | * register. After the HBA resets, this function resets all the iocb ring |
| 5079 | * indices. This function disables PCI layer parity checking during |
| 5080 | * the reset. |
| 5081 | * This function returns 0 always. |
| 5082 | * The caller is not required to hold any locks. |
| 5083 | **/ |
| 5084 | int |
| 5085 | lpfc_sli_brdreset(struct lpfc_hba *phba) |
| 5086 | { |
| 5087 | struct lpfc_sli *psli; |
| 5088 | struct lpfc_sli_ring *pring; |
| 5089 | uint16_t cfg_value; |
| 5090 | int i; |
| 5091 | |
| 5092 | psli = &phba->sli; |
| 5093 | |
| 5094 | /* Reset HBA */ |
| 5095 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 5096 | "0325 Reset HBA Data: x%x x%x\n" , |
| 5097 | (phba->pport) ? phba->pport->port_state : 0, |
| 5098 | psli->sli_flag); |
| 5099 | |
| 5100 | /* perform board reset */ |
| 5101 | phba->fc_eventTag = 0; |
| 5102 | phba->link_events = 0; |
| 5103 | set_bit(nr: HBA_NEEDS_CFG_PORT, addr: &phba->hba_flag); |
| 5104 | if (phba->pport) { |
| 5105 | phba->pport->fc_myDID = 0; |
| 5106 | phba->pport->fc_prevDID = 0; |
| 5107 | } |
| 5108 | |
| 5109 | /* Turn off parity checking and serr during the physical reset */ |
| 5110 | if (pci_read_config_word(dev: phba->pcidev, PCI_COMMAND, val: &cfg_value)) |
| 5111 | return -EIO; |
| 5112 | |
| 5113 | pci_write_config_word(dev: phba->pcidev, PCI_COMMAND, |
| 5114 | val: (cfg_value & |
| 5115 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); |
| 5116 | |
| 5117 | psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); |
| 5118 | |
| 5119 | /* Now toggle INITFF bit in the Host Control Register */ |
| 5120 | writel(HC_INITFF, addr: phba->HCregaddr); |
| 5121 | mdelay(1); |
| 5122 | readl(addr: phba->HCregaddr); /* flush */ |
| 5123 | writel(val: 0, addr: phba->HCregaddr); |
| 5124 | readl(addr: phba->HCregaddr); /* flush */ |
| 5125 | |
| 5126 | /* Restore PCI cmd register */ |
| 5127 | pci_write_config_word(dev: phba->pcidev, PCI_COMMAND, val: cfg_value); |
| 5128 | |
| 5129 | /* Initialize relevant SLI info */ |
| 5130 | for (i = 0; i < psli->num_rings; i++) { |
| 5131 | pring = &psli->sli3_ring[i]; |
| 5132 | pring->flag = 0; |
| 5133 | pring->sli.sli3.rspidx = 0; |
| 5134 | pring->sli.sli3.next_cmdidx = 0; |
| 5135 | pring->sli.sli3.local_getidx = 0; |
| 5136 | pring->sli.sli3.cmdidx = 0; |
| 5137 | pring->missbufcnt = 0; |
| 5138 | } |
| 5139 | |
| 5140 | phba->link_state = LPFC_WARM_START; |
| 5141 | return 0; |
| 5142 | } |
| 5143 | |
| 5144 | /** |
| 5145 | * lpfc_sli4_brdreset - Reset a sli-4 HBA |
| 5146 | * @phba: Pointer to HBA context object. |
| 5147 | * |
| 5148 | * This function resets a SLI4 HBA. This function disables PCI layer parity |
| 5149 | * checking during resets the device. The caller is not required to hold |
| 5150 | * any locks. |
| 5151 | * |
| 5152 | * This function returns 0 on success else returns negative error code. |
| 5153 | **/ |
| 5154 | int |
| 5155 | lpfc_sli4_brdreset(struct lpfc_hba *phba) |
| 5156 | { |
| 5157 | struct lpfc_sli *psli = &phba->sli; |
| 5158 | uint16_t cfg_value; |
| 5159 | int rc = 0; |
| 5160 | |
| 5161 | /* Reset HBA */ |
| 5162 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 5163 | "0295 Reset HBA Data: x%x x%x x%lx\n" , |
| 5164 | phba->pport->port_state, psli->sli_flag, |
| 5165 | phba->hba_flag); |
| 5166 | |
| 5167 | /* perform board reset */ |
| 5168 | phba->fc_eventTag = 0; |
| 5169 | phba->link_events = 0; |
| 5170 | phba->pport->fc_myDID = 0; |
| 5171 | phba->pport->fc_prevDID = 0; |
| 5172 | |
| 5173 | spin_lock_irq(lock: &phba->hbalock); |
| 5174 | psli->sli_flag &= ~(LPFC_PROCESS_LA); |
| 5175 | phba->fcf.fcf_flag = 0; |
| 5176 | spin_unlock_irq(lock: &phba->hbalock); |
| 5177 | |
| 5178 | /* Now physically reset the device */ |
| 5179 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 5180 | "0389 Performing PCI function reset!\n" ); |
| 5181 | |
| 5182 | /* Turn off parity checking and serr during the physical reset */ |
| 5183 | if (pci_read_config_word(dev: phba->pcidev, PCI_COMMAND, val: &cfg_value)) { |
| 5184 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 5185 | "3205 PCI read Config failed\n" ); |
| 5186 | return -EIO; |
| 5187 | } |
| 5188 | |
| 5189 | pci_write_config_word(dev: phba->pcidev, PCI_COMMAND, val: (cfg_value & |
| 5190 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); |
| 5191 | |
| 5192 | /* Perform FCoE PCI function reset before freeing queue memory */ |
| 5193 | rc = lpfc_pci_function_reset(phba); |
| 5194 | |
| 5195 | /* Restore PCI cmd register */ |
| 5196 | pci_write_config_word(dev: phba->pcidev, PCI_COMMAND, val: cfg_value); |
| 5197 | |
| 5198 | return rc; |
| 5199 | } |
| 5200 | |
| 5201 | /** |
| 5202 | * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba |
| 5203 | * @phba: Pointer to HBA context object. |
| 5204 | * |
| 5205 | * This function is called in the SLI initialization code path to |
| 5206 | * restart the HBA. The caller is not required to hold any lock. |
| 5207 | * This function writes MBX_RESTART mailbox command to the SLIM and |
| 5208 | * resets the HBA. At the end of the function, it calls lpfc_hba_down_post |
| 5209 | * function to free any pending commands. The function enables |
| 5210 | * POST only during the first initialization. The function returns zero. |
| 5211 | * The function does not guarantee completion of MBX_RESTART mailbox |
| 5212 | * command before the return of this function. |
| 5213 | **/ |
| 5214 | static int |
| 5215 | lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) |
| 5216 | { |
| 5217 | volatile struct MAILBOX_word0 mb; |
| 5218 | struct lpfc_sli *psli; |
| 5219 | void __iomem *to_slim; |
| 5220 | |
| 5221 | spin_lock_irq(lock: &phba->hbalock); |
| 5222 | |
| 5223 | psli = &phba->sli; |
| 5224 | |
| 5225 | /* Restart HBA */ |
| 5226 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 5227 | "0337 Restart HBA Data: x%x x%x\n" , |
| 5228 | (phba->pport) ? phba->pport->port_state : 0, |
| 5229 | psli->sli_flag); |
| 5230 | |
| 5231 | mb.word0 = 0; |
| 5232 | mb.mbxCommand = MBX_RESTART; |
| 5233 | mb.mbxHc = 1; |
| 5234 | |
| 5235 | lpfc_reset_barrier(phba); |
| 5236 | |
| 5237 | to_slim = phba->MBslimaddr; |
| 5238 | writel(val: mb.word0, addr: to_slim); |
| 5239 | readl(addr: to_slim); /* flush */ |
| 5240 | |
| 5241 | /* Only skip post after fc_ffinit is completed */ |
| 5242 | if (phba->pport && phba->pport->port_state) |
| 5243 | mb.word0 = 1; /* This is really setting up word1 */ |
| 5244 | else |
| 5245 | mb.word0 = 0; /* This is really setting up word1 */ |
| 5246 | to_slim = phba->MBslimaddr + sizeof (uint32_t); |
| 5247 | writel(val: mb.word0, addr: to_slim); |
| 5248 | readl(addr: to_slim); /* flush */ |
| 5249 | |
| 5250 | lpfc_sli_brdreset(phba); |
| 5251 | if (phba->pport) |
| 5252 | phba->pport->stopped = 0; |
| 5253 | phba->link_state = LPFC_INIT_START; |
| 5254 | phba->hba_flag = 0; |
| 5255 | spin_unlock_irq(lock: &phba->hbalock); |
| 5256 | |
| 5257 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); |
| 5258 | psli->stats_start = ktime_get_seconds(); |
| 5259 | |
| 5260 | /* Give the INITFF and Post time to settle. */ |
| 5261 | mdelay(100); |
| 5262 | |
| 5263 | lpfc_hba_down_post(phba); |
| 5264 | |
| 5265 | return 0; |
| 5266 | } |
| 5267 | |
| 5268 | /** |
| 5269 | * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba |
| 5270 | * @phba: Pointer to HBA context object. |
| 5271 | * |
| 5272 | * This function is called in the SLI initialization code path to restart |
| 5273 | * a SLI4 HBA. The caller is not required to hold any lock. |
| 5274 | * At the end of the function, it calls lpfc_hba_down_post function to |
| 5275 | * free any pending commands. |
| 5276 | **/ |
| 5277 | static int |
| 5278 | lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) |
| 5279 | { |
| 5280 | struct lpfc_sli *psli = &phba->sli; |
| 5281 | int rc; |
| 5282 | |
| 5283 | /* Restart HBA */ |
| 5284 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 5285 | "0296 Restart HBA Data: x%x x%x\n" , |
| 5286 | phba->pport->port_state, psli->sli_flag); |
| 5287 | |
| 5288 | clear_bit(nr: HBA_SETUP, addr: &phba->hba_flag); |
| 5289 | lpfc_sli4_queue_unset(phba); |
| 5290 | |
| 5291 | rc = lpfc_sli4_brdreset(phba); |
| 5292 | if (rc) { |
| 5293 | phba->link_state = LPFC_HBA_ERROR; |
| 5294 | goto hba_down_queue; |
| 5295 | } |
| 5296 | |
| 5297 | spin_lock_irq(lock: &phba->hbalock); |
| 5298 | phba->pport->stopped = 0; |
| 5299 | phba->link_state = LPFC_INIT_START; |
| 5300 | phba->hba_flag = 0; |
| 5301 | /* Preserve FA-PWWN expectation */ |
| 5302 | phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC; |
| 5303 | spin_unlock_irq(lock: &phba->hbalock); |
| 5304 | |
| 5305 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); |
| 5306 | psli->stats_start = ktime_get_seconds(); |
| 5307 | |
| 5308 | hba_down_queue: |
| 5309 | lpfc_hba_down_post(phba); |
| 5310 | lpfc_sli4_queue_destroy(phba); |
| 5311 | |
| 5312 | return rc; |
| 5313 | } |
| 5314 | |
| 5315 | /** |
| 5316 | * lpfc_sli_brdrestart - Wrapper func for restarting hba |
| 5317 | * @phba: Pointer to HBA context object. |
| 5318 | * |
| 5319 | * This routine wraps the actual SLI3 or SLI4 hba restart routine from the |
| 5320 | * API jump table function pointer from the lpfc_hba struct. |
| 5321 | **/ |
| 5322 | int |
| 5323 | lpfc_sli_brdrestart(struct lpfc_hba *phba) |
| 5324 | { |
| 5325 | return phba->lpfc_sli_brdrestart(phba); |
| 5326 | } |
| 5327 | |
| 5328 | /** |
| 5329 | * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart |
| 5330 | * @phba: Pointer to HBA context object. |
| 5331 | * |
| 5332 | * This function is called after a HBA restart to wait for successful |
| 5333 | * restart of the HBA. Successful restart of the HBA is indicated by |
| 5334 | * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 |
| 5335 | * iteration, the function will restart the HBA again. The function returns |
| 5336 | * zero if HBA successfully restarted else returns negative error code. |
| 5337 | **/ |
| 5338 | int |
| 5339 | lpfc_sli_chipset_init(struct lpfc_hba *phba) |
| 5340 | { |
| 5341 | uint32_t status, i = 0; |
| 5342 | |
| 5343 | /* Read the HBA Host Status Register */ |
| 5344 | if (lpfc_readl(addr: phba->HSregaddr, data: &status)) |
| 5345 | return -EIO; |
| 5346 | |
| 5347 | /* Check status register to see what current state is */ |
| 5348 | i = 0; |
| 5349 | while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { |
| 5350 | |
| 5351 | /* Check every 10ms for 10 retries, then every 100ms for 90 |
| 5352 | * retries, then every 1 sec for 50 retires for a total of |
| 5353 | * ~60 seconds before reset the board again and check every |
| 5354 | * 1 sec for 50 retries. The up to 60 seconds before the |
| 5355 | * board ready is required by the Falcon FIPS zeroization |
| 5356 | * complete, and any reset the board in between shall cause |
| 5357 | * restart of zeroization, further delay the board ready. |
| 5358 | */ |
| 5359 | if (i++ >= 200) { |
| 5360 | /* Adapter failed to init, timeout, status reg |
| 5361 | <status> */ |
| 5362 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 5363 | "0436 Adapter failed to init, " |
| 5364 | "timeout, status reg x%x, " |
| 5365 | "FW Data: A8 x%x AC x%x\n" , status, |
| 5366 | readl(phba->MBslimaddr + 0xa8), |
| 5367 | readl(phba->MBslimaddr + 0xac)); |
| 5368 | phba->link_state = LPFC_HBA_ERROR; |
| 5369 | return -ETIMEDOUT; |
| 5370 | } |
| 5371 | |
| 5372 | /* Check to see if any errors occurred during init */ |
| 5373 | if (status & HS_FFERM) { |
| 5374 | /* ERROR: During chipset initialization */ |
| 5375 | /* Adapter failed to init, chipset, status reg |
| 5376 | <status> */ |
| 5377 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 5378 | "0437 Adapter failed to init, " |
| 5379 | "chipset, status reg x%x, " |
| 5380 | "FW Data: A8 x%x AC x%x\n" , status, |
| 5381 | readl(phba->MBslimaddr + 0xa8), |
| 5382 | readl(phba->MBslimaddr + 0xac)); |
| 5383 | phba->link_state = LPFC_HBA_ERROR; |
| 5384 | return -EIO; |
| 5385 | } |
| 5386 | |
| 5387 | if (i <= 10) |
| 5388 | msleep(msecs: 10); |
| 5389 | else if (i <= 100) |
| 5390 | msleep(msecs: 100); |
| 5391 | else |
| 5392 | msleep(msecs: 1000); |
| 5393 | |
| 5394 | if (i == 150) { |
| 5395 | /* Do post */ |
| 5396 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; |
| 5397 | lpfc_sli_brdrestart(phba); |
| 5398 | } |
| 5399 | /* Read the HBA Host Status Register */ |
| 5400 | if (lpfc_readl(addr: phba->HSregaddr, data: &status)) |
| 5401 | return -EIO; |
| 5402 | } |
| 5403 | |
| 5404 | /* Check to see if any errors occurred during init */ |
| 5405 | if (status & HS_FFERM) { |
| 5406 | /* ERROR: During chipset initialization */ |
| 5407 | /* Adapter failed to init, chipset, status reg <status> */ |
| 5408 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 5409 | "0438 Adapter failed to init, chipset, " |
| 5410 | "status reg x%x, " |
| 5411 | "FW Data: A8 x%x AC x%x\n" , status, |
| 5412 | readl(phba->MBslimaddr + 0xa8), |
| 5413 | readl(phba->MBslimaddr + 0xac)); |
| 5414 | phba->link_state = LPFC_HBA_ERROR; |
| 5415 | return -EIO; |
| 5416 | } |
| 5417 | |
| 5418 | set_bit(nr: HBA_NEEDS_CFG_PORT, addr: &phba->hba_flag); |
| 5419 | |
| 5420 | /* Clear all interrupt enable conditions */ |
| 5421 | writel(val: 0, addr: phba->HCregaddr); |
| 5422 | readl(addr: phba->HCregaddr); /* flush */ |
| 5423 | |
| 5424 | /* setup host attn register */ |
| 5425 | writel(val: 0xffffffff, addr: phba->HAregaddr); |
| 5426 | readl(addr: phba->HAregaddr); /* flush */ |
| 5427 | return 0; |
| 5428 | } |
| 5429 | |
| 5430 | /** |
| 5431 | * lpfc_sli_hbq_count - Get the number of HBQs to be configured |
| 5432 | * |
| 5433 | * This function calculates and returns the number of HBQs required to be |
| 5434 | * configured. |
| 5435 | **/ |
| 5436 | int |
| 5437 | lpfc_sli_hbq_count(void) |
| 5438 | { |
| 5439 | return ARRAY_SIZE(lpfc_hbq_defs); |
| 5440 | } |
| 5441 | |
| 5442 | /** |
| 5443 | * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries |
| 5444 | * |
| 5445 | * This function adds the number of hbq entries in every HBQ to get |
| 5446 | * the total number of hbq entries required for the HBA and returns |
| 5447 | * the total count. |
| 5448 | **/ |
| 5449 | static int |
| 5450 | lpfc_sli_hbq_entry_count(void) |
| 5451 | { |
| 5452 | int hbq_count = lpfc_sli_hbq_count(); |
| 5453 | int count = 0; |
| 5454 | int i; |
| 5455 | |
| 5456 | for (i = 0; i < hbq_count; ++i) |
| 5457 | count += lpfc_hbq_defs[i]->entry_count; |
| 5458 | return count; |
| 5459 | } |
| 5460 | |
| 5461 | /** |
| 5462 | * lpfc_sli_hbq_size - Calculate memory required for all hbq entries |
| 5463 | * |
| 5464 | * This function calculates amount of memory required for all hbq entries |
| 5465 | * to be configured and returns the total memory required. |
| 5466 | **/ |
| 5467 | int |
| 5468 | lpfc_sli_hbq_size(void) |
| 5469 | { |
| 5470 | return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); |
| 5471 | } |
| 5472 | |
| 5473 | /** |
| 5474 | * lpfc_sli_hbq_setup - configure and initialize HBQs |
| 5475 | * @phba: Pointer to HBA context object. |
| 5476 | * |
| 5477 | * This function is called during the SLI initialization to configure |
| 5478 | * all the HBQs and post buffers to the HBQ. The caller is not |
| 5479 | * required to hold any locks. This function will return zero if successful |
| 5480 | * else it will return negative error code. |
| 5481 | **/ |
| 5482 | static int |
| 5483 | lpfc_sli_hbq_setup(struct lpfc_hba *phba) |
| 5484 | { |
| 5485 | int hbq_count = lpfc_sli_hbq_count(); |
| 5486 | LPFC_MBOXQ_t *pmb; |
| 5487 | MAILBOX_t *pmbox; |
| 5488 | uint32_t hbqno; |
| 5489 | uint32_t hbq_entry_index; |
| 5490 | |
| 5491 | /* Get a Mailbox buffer to setup mailbox |
| 5492 | * commands for HBA initialization |
| 5493 | */ |
| 5494 | pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 5495 | |
| 5496 | if (!pmb) |
| 5497 | return -ENOMEM; |
| 5498 | |
| 5499 | pmbox = &pmb->u.mb; |
| 5500 | |
| 5501 | /* Initialize the struct lpfc_sli_hbq structure for each hbq */ |
| 5502 | phba->link_state = LPFC_INIT_MBX_CMDS; |
| 5503 | phba->hbq_in_use = 1; |
| 5504 | |
| 5505 | hbq_entry_index = 0; |
| 5506 | for (hbqno = 0; hbqno < hbq_count; ++hbqno) { |
| 5507 | phba->hbqs[hbqno].next_hbqPutIdx = 0; |
| 5508 | phba->hbqs[hbqno].hbqPutIdx = 0; |
| 5509 | phba->hbqs[hbqno].local_hbqGetIdx = 0; |
| 5510 | phba->hbqs[hbqno].entry_count = |
| 5511 | lpfc_hbq_defs[hbqno]->entry_count; |
| 5512 | lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], |
| 5513 | hbq_entry_index, pmb); |
| 5514 | hbq_entry_index += phba->hbqs[hbqno].entry_count; |
| 5515 | |
| 5516 | if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { |
| 5517 | /* Adapter failed to init, mbxCmd <cmd> CFG_RING, |
| 5518 | mbxStatus <status>, ring <num> */ |
| 5519 | |
| 5520 | lpfc_printf_log(phba, KERN_ERR, |
| 5521 | LOG_SLI | LOG_VPORT, |
| 5522 | "1805 Adapter failed to init. " |
| 5523 | "Data: x%x x%x x%x\n" , |
| 5524 | pmbox->mbxCommand, |
| 5525 | pmbox->mbxStatus, hbqno); |
| 5526 | |
| 5527 | phba->link_state = LPFC_HBA_ERROR; |
| 5528 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
| 5529 | return -ENXIO; |
| 5530 | } |
| 5531 | } |
| 5532 | phba->hbq_count = hbq_count; |
| 5533 | |
| 5534 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
| 5535 | |
| 5536 | /* Initially populate or replenish the HBQs */ |
| 5537 | for (hbqno = 0; hbqno < hbq_count; ++hbqno) |
| 5538 | lpfc_sli_hbqbuf_init_hbqs(phba, qno: hbqno); |
| 5539 | return 0; |
| 5540 | } |
| 5541 | |
| 5542 | /** |
| 5543 | * lpfc_sli4_rb_setup - Initialize and post RBs to HBA |
| 5544 | * @phba: Pointer to HBA context object. |
| 5545 | * |
| 5546 | * This function is called during the SLI initialization to configure |
| 5547 | * all the HBQs and post buffers to the HBQ. The caller is not |
| 5548 | * required to hold any locks. This function will return zero if successful |
| 5549 | * else it will return negative error code. |
| 5550 | **/ |
| 5551 | static int |
| 5552 | lpfc_sli4_rb_setup(struct lpfc_hba *phba) |
| 5553 | { |
| 5554 | phba->hbq_in_use = 1; |
| 5555 | /** |
| 5556 | * Specific case when the MDS diagnostics is enabled and supported. |
| 5557 | * The receive buffer count is truncated to manage the incoming |
| 5558 | * traffic. |
| 5559 | **/ |
| 5560 | if (phba->cfg_enable_mds_diags && phba->mds_diags_support) |
| 5561 | phba->hbqs[LPFC_ELS_HBQ].entry_count = |
| 5562 | lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1; |
| 5563 | else |
| 5564 | phba->hbqs[LPFC_ELS_HBQ].entry_count = |
| 5565 | lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; |
| 5566 | phba->hbq_count = 1; |
| 5567 | lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); |
| 5568 | /* Initially populate or replenish the HBQs */ |
| 5569 | return 0; |
| 5570 | } |
| 5571 | |
| 5572 | /** |
| 5573 | * lpfc_sli_config_port - Issue config port mailbox command |
| 5574 | * @phba: Pointer to HBA context object. |
| 5575 | * @sli_mode: sli mode - 2/3 |
| 5576 | * |
| 5577 | * This function is called by the sli initialization code path |
| 5578 | * to issue config_port mailbox command. This function restarts the |
| 5579 | * HBA firmware and issues a config_port mailbox command to configure |
| 5580 | * the SLI interface in the sli mode specified by sli_mode |
| 5581 | * variable. The caller is not required to hold any locks. |
| 5582 | * The function returns 0 if successful, else returns negative error |
| 5583 | * code. |
| 5584 | **/ |
| 5585 | int |
| 5586 | lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) |
| 5587 | { |
| 5588 | LPFC_MBOXQ_t *pmb; |
| 5589 | uint32_t resetcount = 0, rc = 0, done = 0; |
| 5590 | |
| 5591 | pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 5592 | if (!pmb) { |
| 5593 | phba->link_state = LPFC_HBA_ERROR; |
| 5594 | return -ENOMEM; |
| 5595 | } |
| 5596 | |
| 5597 | phba->sli_rev = sli_mode; |
| 5598 | while (resetcount < 2 && !done) { |
| 5599 | spin_lock_irq(lock: &phba->hbalock); |
| 5600 | phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; |
| 5601 | spin_unlock_irq(lock: &phba->hbalock); |
| 5602 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; |
| 5603 | lpfc_sli_brdrestart(phba); |
| 5604 | rc = lpfc_sli_chipset_init(phba); |
| 5605 | if (rc) |
| 5606 | break; |
| 5607 | |
| 5608 | spin_lock_irq(lock: &phba->hbalock); |
| 5609 | phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
| 5610 | spin_unlock_irq(lock: &phba->hbalock); |
| 5611 | resetcount++; |
| 5612 | |
| 5613 | /* Call pre CONFIG_PORT mailbox command initialization. A |
| 5614 | * value of 0 means the call was successful. Any other |
| 5615 | * nonzero value is a failure, but if ERESTART is returned, |
| 5616 | * the driver may reset the HBA and try again. |
| 5617 | */ |
| 5618 | rc = lpfc_config_port_prep(phba); |
| 5619 | if (rc == -ERESTART) { |
| 5620 | phba->link_state = LPFC_LINK_UNKNOWN; |
| 5621 | continue; |
| 5622 | } else if (rc) |
| 5623 | break; |
| 5624 | |
| 5625 | phba->link_state = LPFC_INIT_MBX_CMDS; |
| 5626 | lpfc_config_port(phba, pmb); |
| 5627 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); |
| 5628 | phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | |
| 5629 | LPFC_SLI3_HBQ_ENABLED | |
| 5630 | LPFC_SLI3_CRP_ENABLED | |
| 5631 | LPFC_SLI3_DSS_ENABLED); |
| 5632 | if (rc != MBX_SUCCESS) { |
| 5633 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 5634 | "0442 Adapter failed to init, mbxCmd x%x " |
| 5635 | "CONFIG_PORT, mbxStatus x%x Data: x%x\n" , |
| 5636 | pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); |
| 5637 | spin_lock_irq(lock: &phba->hbalock); |
| 5638 | phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; |
| 5639 | spin_unlock_irq(lock: &phba->hbalock); |
| 5640 | rc = -ENXIO; |
| 5641 | } else { |
| 5642 | /* Allow asynchronous mailbox command to go through */ |
| 5643 | spin_lock_irq(lock: &phba->hbalock); |
| 5644 | phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; |
| 5645 | spin_unlock_irq(lock: &phba->hbalock); |
| 5646 | done = 1; |
| 5647 | |
| 5648 | if ((pmb->u.mb.un.varCfgPort.casabt == 1) && |
| 5649 | (pmb->u.mb.un.varCfgPort.gasabt == 0)) |
| 5650 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
| 5651 | "3110 Port did not grant ASABT\n" ); |
| 5652 | } |
| 5653 | } |
| 5654 | if (!done) { |
| 5655 | rc = -EINVAL; |
| 5656 | goto do_prep_failed; |
| 5657 | } |
| 5658 | if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { |
| 5659 | if (!pmb->u.mb.un.varCfgPort.cMA) { |
| 5660 | rc = -ENXIO; |
| 5661 | goto do_prep_failed; |
| 5662 | } |
| 5663 | if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { |
| 5664 | phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; |
| 5665 | phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; |
| 5666 | phba->max_vports = (phba->max_vpi > phba->max_vports) ? |
| 5667 | phba->max_vpi : phba->max_vports; |
| 5668 | |
| 5669 | } else |
| 5670 | phba->max_vpi = 0; |
| 5671 | if (pmb->u.mb.un.varCfgPort.gerbm) |
| 5672 | phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; |
| 5673 | if (pmb->u.mb.un.varCfgPort.gcrp) |
| 5674 | phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; |
| 5675 | |
| 5676 | phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; |
| 5677 | phba->port_gp = phba->mbox->us.s3_pgp.port; |
| 5678 | |
| 5679 | if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { |
| 5680 | if (pmb->u.mb.un.varCfgPort.gbg == 0) { |
| 5681 | phba->cfg_enable_bg = 0; |
| 5682 | phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; |
| 5683 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 5684 | "0443 Adapter did not grant " |
| 5685 | "BlockGuard\n" ); |
| 5686 | } |
| 5687 | } |
| 5688 | } else { |
| 5689 | phba->hbq_get = NULL; |
| 5690 | phba->port_gp = phba->mbox->us.s2.port; |
| 5691 | phba->max_vpi = 0; |
| 5692 | } |
| 5693 | do_prep_failed: |
| 5694 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
| 5695 | return rc; |
| 5696 | } |
| 5697 | |
| 5698 | |
| 5699 | /** |
| 5700 | * lpfc_sli_hba_setup - SLI initialization function |
| 5701 | * @phba: Pointer to HBA context object. |
| 5702 | * |
| 5703 | * This function is the main SLI initialization function. This function |
| 5704 | * is called by the HBA initialization code, HBA reset code and HBA |
| 5705 | * error attention handler code. Caller is not required to hold any |
| 5706 | * locks. This function issues config_port mailbox command to configure |
| 5707 | * the SLI, setup iocb rings and HBQ rings. In the end the function |
| 5708 | * calls the config_port_post function to issue init_link mailbox |
| 5709 | * command and to start the discovery. The function will return zero |
| 5710 | * if successful, else it will return negative error code. |
| 5711 | **/ |
| 5712 | int |
| 5713 | lpfc_sli_hba_setup(struct lpfc_hba *phba) |
| 5714 | { |
| 5715 | uint32_t rc; |
| 5716 | int i; |
| 5717 | int longs; |
| 5718 | |
| 5719 | /* Enable ISR already does config_port because of config_msi mbx */ |
| 5720 | if (test_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag)) { |
| 5721 | rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3); |
| 5722 | if (rc) |
| 5723 | return -EIO; |
| 5724 | clear_bit(nr: HBA_NEEDS_CFG_PORT, addr: &phba->hba_flag); |
| 5725 | } |
| 5726 | phba->fcp_embed_io = 0; /* SLI4 FC support only */ |
| 5727 | |
| 5728 | if (phba->sli_rev == 3) { |
| 5729 | phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; |
| 5730 | phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; |
| 5731 | } else { |
| 5732 | phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; |
| 5733 | phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; |
| 5734 | phba->sli3_options = 0; |
| 5735 | } |
| 5736 | |
| 5737 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 5738 | "0444 Firmware in SLI %x mode. Max_vpi %d\n" , |
| 5739 | phba->sli_rev, phba->max_vpi); |
| 5740 | rc = lpfc_sli_ring_map(phba); |
| 5741 | |
| 5742 | if (rc) |
| 5743 | goto lpfc_sli_hba_setup_error; |
| 5744 | |
| 5745 | /* Initialize VPIs. */ |
| 5746 | if (phba->sli_rev == LPFC_SLI_REV3) { |
| 5747 | /* |
| 5748 | * The VPI bitmask and physical ID array are allocated |
| 5749 | * and initialized once only - at driver load. A port |
| 5750 | * reset doesn't need to reinitialize this memory. |
| 5751 | */ |
| 5752 | if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { |
| 5753 | longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; |
| 5754 | phba->vpi_bmask = kcalloc(longs, |
| 5755 | sizeof(unsigned long), |
| 5756 | GFP_KERNEL); |
| 5757 | if (!phba->vpi_bmask) { |
| 5758 | rc = -ENOMEM; |
| 5759 | goto lpfc_sli_hba_setup_error; |
| 5760 | } |
| 5761 | |
| 5762 | phba->vpi_ids = kcalloc(phba->max_vpi + 1, |
| 5763 | sizeof(uint16_t), |
| 5764 | GFP_KERNEL); |
| 5765 | if (!phba->vpi_ids) { |
| 5766 | kfree(objp: phba->vpi_bmask); |
| 5767 | rc = -ENOMEM; |
| 5768 | goto lpfc_sli_hba_setup_error; |
| 5769 | } |
| 5770 | for (i = 0; i < phba->max_vpi; i++) |
| 5771 | phba->vpi_ids[i] = i; |
| 5772 | } |
| 5773 | } |
| 5774 | |
| 5775 | /* Init HBQs */ |
| 5776 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
| 5777 | rc = lpfc_sli_hbq_setup(phba); |
| 5778 | if (rc) |
| 5779 | goto lpfc_sli_hba_setup_error; |
| 5780 | } |
| 5781 | spin_lock_irq(lock: &phba->hbalock); |
| 5782 | phba->sli.sli_flag |= LPFC_PROCESS_LA; |
| 5783 | spin_unlock_irq(lock: &phba->hbalock); |
| 5784 | |
| 5785 | rc = lpfc_config_port_post(phba); |
| 5786 | if (rc) |
| 5787 | goto lpfc_sli_hba_setup_error; |
| 5788 | |
| 5789 | return rc; |
| 5790 | |
| 5791 | lpfc_sli_hba_setup_error: |
| 5792 | phba->link_state = LPFC_HBA_ERROR; |
| 5793 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 5794 | "0445 Firmware initialization failed\n" ); |
| 5795 | return rc; |
| 5796 | } |
| 5797 | |
| 5798 | /** |
| 5799 | * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region |
| 5800 | * @phba: Pointer to HBA context object. |
| 5801 | * |
| 5802 | * This function issue a dump mailbox command to read config region |
| 5803 | * 23 and parse the records in the region and populate driver |
| 5804 | * data structure. |
| 5805 | **/ |
| 5806 | static int |
| 5807 | lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) |
| 5808 | { |
| 5809 | LPFC_MBOXQ_t *mboxq; |
| 5810 | struct lpfc_dmabuf *mp; |
| 5811 | struct lpfc_mqe *mqe; |
| 5812 | uint32_t data_length; |
| 5813 | int rc; |
| 5814 | |
| 5815 | /* Program the default value of vlan_id and fc_map */ |
| 5816 | phba->valid_vlan = 0; |
| 5817 | phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; |
| 5818 | phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; |
| 5819 | phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; |
| 5820 | |
| 5821 | mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 5822 | if (!mboxq) |
| 5823 | return -ENOMEM; |
| 5824 | |
| 5825 | mqe = &mboxq->u.mqe; |
| 5826 | if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { |
| 5827 | rc = -ENOMEM; |
| 5828 | goto out_free_mboxq; |
| 5829 | } |
| 5830 | |
| 5831 | mp = mboxq->ctx_buf; |
| 5832 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 5833 | |
| 5834 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
| 5835 | "(%d):2571 Mailbox cmd x%x Status x%x " |
| 5836 | "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " |
| 5837 | "x%x x%x x%x x%x x%x x%x x%x x%x x%x " |
| 5838 | "CQ: x%x x%x x%x x%x\n" , |
| 5839 | mboxq->vport ? mboxq->vport->vpi : 0, |
| 5840 | bf_get(lpfc_mqe_command, mqe), |
| 5841 | bf_get(lpfc_mqe_status, mqe), |
| 5842 | mqe->un.mb_words[0], mqe->un.mb_words[1], |
| 5843 | mqe->un.mb_words[2], mqe->un.mb_words[3], |
| 5844 | mqe->un.mb_words[4], mqe->un.mb_words[5], |
| 5845 | mqe->un.mb_words[6], mqe->un.mb_words[7], |
| 5846 | mqe->un.mb_words[8], mqe->un.mb_words[9], |
| 5847 | mqe->un.mb_words[10], mqe->un.mb_words[11], |
| 5848 | mqe->un.mb_words[12], mqe->un.mb_words[13], |
| 5849 | mqe->un.mb_words[14], mqe->un.mb_words[15], |
| 5850 | mqe->un.mb_words[16], mqe->un.mb_words[50], |
| 5851 | mboxq->mcqe.word0, |
| 5852 | mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, |
| 5853 | mboxq->mcqe.trailer); |
| 5854 | |
| 5855 | if (rc) { |
| 5856 | rc = -EIO; |
| 5857 | goto out_free_mboxq; |
| 5858 | } |
| 5859 | data_length = mqe->un.mb_words[5]; |
| 5860 | if (data_length > DMP_RGN23_SIZE) { |
| 5861 | rc = -EIO; |
| 5862 | goto out_free_mboxq; |
| 5863 | } |
| 5864 | |
| 5865 | lpfc_parse_fcoe_conf(phba, mp->virt, data_length); |
| 5866 | rc = 0; |
| 5867 | |
| 5868 | out_free_mboxq: |
| 5869 | lpfc_mbox_rsrc_cleanup(phba, mbox: mboxq, locked: MBOX_THD_UNLOCKED); |
| 5870 | return rc; |
| 5871 | } |
| 5872 | |
| 5873 | /** |
| 5874 | * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data |
| 5875 | * @phba: pointer to lpfc hba data structure. |
| 5876 | * @mboxq: pointer to the LPFC_MBOXQ_t structure. |
| 5877 | * @vpd: pointer to the memory to hold resulting port vpd data. |
| 5878 | * @vpd_size: On input, the number of bytes allocated to @vpd. |
| 5879 | * On output, the number of data bytes in @vpd. |
| 5880 | * |
| 5881 | * This routine executes a READ_REV SLI4 mailbox command. In |
| 5882 | * addition, this routine gets the port vpd data. |
| 5883 | * |
| 5884 | * Return codes |
| 5885 | * 0 - successful |
| 5886 | * -ENOMEM - could not allocated memory. |
| 5887 | **/ |
| 5888 | static int |
| 5889 | lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, |
| 5890 | uint8_t *vpd, uint32_t *vpd_size) |
| 5891 | { |
| 5892 | int rc = 0; |
| 5893 | uint32_t dma_size; |
| 5894 | struct lpfc_dmabuf *dmabuf; |
| 5895 | struct lpfc_mqe *mqe; |
| 5896 | |
| 5897 | dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
| 5898 | if (!dmabuf) |
| 5899 | return -ENOMEM; |
| 5900 | |
| 5901 | /* |
| 5902 | * Get a DMA buffer for the vpd data resulting from the READ_REV |
| 5903 | * mailbox command. |
| 5904 | */ |
| 5905 | dma_size = *vpd_size; |
| 5906 | dmabuf->virt = dma_alloc_coherent(dev: &phba->pcidev->dev, size: dma_size, |
| 5907 | dma_handle: &dmabuf->phys, GFP_KERNEL); |
| 5908 | if (!dmabuf->virt) { |
| 5909 | kfree(objp: dmabuf); |
| 5910 | return -ENOMEM; |
| 5911 | } |
| 5912 | |
| 5913 | /* |
| 5914 | * The SLI4 implementation of READ_REV conflicts at word1, |
| 5915 | * bits 31:16 and SLI4 adds vpd functionality not present |
| 5916 | * in SLI3. This code corrects the conflicts. |
| 5917 | */ |
| 5918 | lpfc_read_rev(phba, mboxq); |
| 5919 | mqe = &mboxq->u.mqe; |
| 5920 | mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); |
| 5921 | mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); |
| 5922 | mqe->un.read_rev.word1 &= 0x0000FFFF; |
| 5923 | bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); |
| 5924 | bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); |
| 5925 | |
| 5926 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 5927 | if (rc) { |
| 5928 | dma_free_coherent(dev: &phba->pcidev->dev, size: dma_size, |
| 5929 | cpu_addr: dmabuf->virt, dma_handle: dmabuf->phys); |
| 5930 | kfree(objp: dmabuf); |
| 5931 | return -EIO; |
| 5932 | } |
| 5933 | |
| 5934 | /* |
| 5935 | * The available vpd length cannot be bigger than the |
| 5936 | * DMA buffer passed to the port. Catch the less than |
| 5937 | * case and update the caller's size. |
| 5938 | */ |
| 5939 | if (mqe->un.read_rev.avail_vpd_len < *vpd_size) |
| 5940 | *vpd_size = mqe->un.read_rev.avail_vpd_len; |
| 5941 | |
| 5942 | memcpy(vpd, dmabuf->virt, *vpd_size); |
| 5943 | |
| 5944 | dma_free_coherent(dev: &phba->pcidev->dev, size: dma_size, |
| 5945 | cpu_addr: dmabuf->virt, dma_handle: dmabuf->phys); |
| 5946 | kfree(objp: dmabuf); |
| 5947 | return 0; |
| 5948 | } |
| 5949 | |
| 5950 | /** |
| 5951 | * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes |
| 5952 | * @phba: pointer to lpfc hba data structure. |
| 5953 | * |
| 5954 | * This routine retrieves SLI4 device physical port name this PCI function |
| 5955 | * is attached to. |
| 5956 | * |
| 5957 | * Return codes |
| 5958 | * 0 - successful |
| 5959 | * otherwise - failed to retrieve controller attributes |
| 5960 | **/ |
| 5961 | static int |
| 5962 | lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) |
| 5963 | { |
| 5964 | LPFC_MBOXQ_t *mboxq; |
| 5965 | struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; |
| 5966 | struct lpfc_controller_attribute *cntl_attr; |
| 5967 | void *virtaddr = NULL; |
| 5968 | uint32_t alloclen, reqlen; |
| 5969 | uint32_t shdr_status, shdr_add_status; |
| 5970 | union lpfc_sli4_cfg_shdr *shdr; |
| 5971 | int rc; |
| 5972 | |
| 5973 | mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 5974 | if (!mboxq) |
| 5975 | return -ENOMEM; |
| 5976 | |
| 5977 | /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */ |
| 5978 | reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); |
| 5979 | alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 5980 | LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, |
| 5981 | LPFC_SLI4_MBX_NEMBED); |
| 5982 | |
| 5983 | if (alloclen < reqlen) { |
| 5984 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 5985 | "3084 Allocated DMA memory size (%d) is " |
| 5986 | "less than the requested DMA memory size " |
| 5987 | "(%d)\n" , alloclen, reqlen); |
| 5988 | rc = -ENOMEM; |
| 5989 | goto out_free_mboxq; |
| 5990 | } |
| 5991 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 5992 | virtaddr = mboxq->sge_array->addr[0]; |
| 5993 | mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; |
| 5994 | shdr = &mbx_cntl_attr->cfg_shdr; |
| 5995 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 5996 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 5997 | if (shdr_status || shdr_add_status || rc) { |
| 5998 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 5999 | "3085 Mailbox x%x (x%x/x%x) failed, " |
| 6000 | "rc:x%x, status:x%x, add_status:x%x\n" , |
| 6001 | bf_get(lpfc_mqe_command, &mboxq->u.mqe), |
| 6002 | lpfc_sli_config_mbox_subsys_get(phba, mboxq), |
| 6003 | lpfc_sli_config_mbox_opcode_get(phba, mboxq), |
| 6004 | rc, shdr_status, shdr_add_status); |
| 6005 | rc = -ENXIO; |
| 6006 | goto out_free_mboxq; |
| 6007 | } |
| 6008 | |
| 6009 | cntl_attr = &mbx_cntl_attr->cntl_attr; |
| 6010 | phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; |
| 6011 | phba->sli4_hba.lnk_info.lnk_tp = |
| 6012 | bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); |
| 6013 | phba->sli4_hba.lnk_info.lnk_no = |
| 6014 | bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); |
| 6015 | phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); |
| 6016 | phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); |
| 6017 | |
| 6018 | memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str, |
| 6019 | sizeof(phba->BIOSVersion)); |
| 6020 | phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0'; |
| 6021 | |
| 6022 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 6023 | "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " |
| 6024 | "flash_id: x%02x, asic_rev: x%02x\n" , |
| 6025 | phba->sli4_hba.lnk_info.lnk_tp, |
| 6026 | phba->sli4_hba.lnk_info.lnk_no, |
| 6027 | phba->BIOSVersion, phba->sli4_hba.flash_id, |
| 6028 | phba->sli4_hba.asic_rev); |
| 6029 | out_free_mboxq: |
| 6030 | if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) |
| 6031 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
| 6032 | else |
| 6033 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
| 6034 | return rc; |
| 6035 | } |
| 6036 | |
| 6037 | /** |
| 6038 | * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name |
| 6039 | * @phba: pointer to lpfc hba data structure. |
| 6040 | * |
| 6041 | * This routine retrieves SLI4 device physical port name this PCI function |
| 6042 | * is attached to. |
| 6043 | * |
| 6044 | * Return codes |
| 6045 | * 0 - successful |
| 6046 | * otherwise - failed to retrieve physical port name |
| 6047 | **/ |
| 6048 | static int |
| 6049 | lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) |
| 6050 | { |
| 6051 | LPFC_MBOXQ_t *mboxq; |
| 6052 | struct lpfc_mbx_get_port_name *get_port_name; |
| 6053 | uint32_t shdr_status, shdr_add_status; |
| 6054 | union lpfc_sli4_cfg_shdr *shdr; |
| 6055 | char cport_name = 0; |
| 6056 | int rc; |
| 6057 | |
| 6058 | /* We assume nothing at this point */ |
| 6059 | phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; |
| 6060 | phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; |
| 6061 | |
| 6062 | mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 6063 | if (!mboxq) |
| 6064 | return -ENOMEM; |
| 6065 | /* obtain link type and link number via READ_CONFIG */ |
| 6066 | phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; |
| 6067 | lpfc_sli4_read_config(phba); |
| 6068 | |
| 6069 | if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) |
| 6070 | phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; |
| 6071 | |
| 6072 | if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) |
| 6073 | goto retrieve_ppname; |
| 6074 | |
| 6075 | /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ |
| 6076 | rc = lpfc_sli4_get_ctl_attr(phba); |
| 6077 | if (rc) |
| 6078 | goto out_free_mboxq; |
| 6079 | |
| 6080 | retrieve_ppname: |
| 6081 | lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 6082 | LPFC_MBOX_OPCODE_GET_PORT_NAME, |
| 6083 | sizeof(struct lpfc_mbx_get_port_name) - |
| 6084 | sizeof(struct lpfc_sli4_cfg_mhdr), |
| 6085 | LPFC_SLI4_MBX_EMBED); |
| 6086 | get_port_name = &mboxq->u.mqe.un.get_port_name; |
| 6087 | shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; |
| 6088 | bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); |
| 6089 | bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, |
| 6090 | phba->sli4_hba.lnk_info.lnk_tp); |
| 6091 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 6092 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 6093 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 6094 | if (shdr_status || shdr_add_status || rc) { |
| 6095 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 6096 | "3087 Mailbox x%x (x%x/x%x) failed: " |
| 6097 | "rc:x%x, status:x%x, add_status:x%x\n" , |
| 6098 | bf_get(lpfc_mqe_command, &mboxq->u.mqe), |
| 6099 | lpfc_sli_config_mbox_subsys_get(phba, mboxq), |
| 6100 | lpfc_sli_config_mbox_opcode_get(phba, mboxq), |
| 6101 | rc, shdr_status, shdr_add_status); |
| 6102 | rc = -ENXIO; |
| 6103 | goto out_free_mboxq; |
| 6104 | } |
| 6105 | switch (phba->sli4_hba.lnk_info.lnk_no) { |
| 6106 | case LPFC_LINK_NUMBER_0: |
| 6107 | cport_name = bf_get(lpfc_mbx_get_port_name_name0, |
| 6108 | &get_port_name->u.response); |
| 6109 | phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; |
| 6110 | break; |
| 6111 | case LPFC_LINK_NUMBER_1: |
| 6112 | cport_name = bf_get(lpfc_mbx_get_port_name_name1, |
| 6113 | &get_port_name->u.response); |
| 6114 | phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; |
| 6115 | break; |
| 6116 | case LPFC_LINK_NUMBER_2: |
| 6117 | cport_name = bf_get(lpfc_mbx_get_port_name_name2, |
| 6118 | &get_port_name->u.response); |
| 6119 | phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; |
| 6120 | break; |
| 6121 | case LPFC_LINK_NUMBER_3: |
| 6122 | cport_name = bf_get(lpfc_mbx_get_port_name_name3, |
| 6123 | &get_port_name->u.response); |
| 6124 | phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; |
| 6125 | break; |
| 6126 | default: |
| 6127 | break; |
| 6128 | } |
| 6129 | |
| 6130 | if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { |
| 6131 | phba->Port[0] = cport_name; |
| 6132 | phba->Port[1] = '\0'; |
| 6133 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 6134 | "3091 SLI get port name: %s\n" , phba->Port); |
| 6135 | } |
| 6136 | |
| 6137 | out_free_mboxq: |
| 6138 | if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) |
| 6139 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
| 6140 | else |
| 6141 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
| 6142 | return rc; |
| 6143 | } |
| 6144 | |
| 6145 | /** |
| 6146 | * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues |
| 6147 | * @phba: pointer to lpfc hba data structure. |
| 6148 | * |
| 6149 | * This routine is called to explicitly arm the SLI4 device's completion and |
| 6150 | * event queues |
| 6151 | **/ |
| 6152 | static void |
| 6153 | lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) |
| 6154 | { |
| 6155 | int qidx; |
| 6156 | struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; |
| 6157 | struct lpfc_sli4_hdw_queue *qp; |
| 6158 | struct lpfc_queue *eq; |
| 6159 | |
| 6160 | sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM); |
| 6161 | sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); |
| 6162 | if (sli4_hba->nvmels_cq) |
| 6163 | sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, |
| 6164 | LPFC_QUEUE_REARM); |
| 6165 | |
| 6166 | if (sli4_hba->hdwq) { |
| 6167 | /* Loop thru all Hardware Queues */ |
| 6168 | for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { |
| 6169 | qp = &sli4_hba->hdwq[qidx]; |
| 6170 | /* ARM the corresponding CQ */ |
| 6171 | sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0, |
| 6172 | LPFC_QUEUE_REARM); |
| 6173 | } |
| 6174 | |
| 6175 | /* Loop thru all IRQ vectors */ |
| 6176 | for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { |
| 6177 | eq = sli4_hba->hba_eq_hdl[qidx].eq; |
| 6178 | /* ARM the corresponding EQ */ |
| 6179 | sli4_hba->sli4_write_eq_db(phba, eq, |
| 6180 | 0, LPFC_QUEUE_REARM); |
| 6181 | } |
| 6182 | } |
| 6183 | |
| 6184 | if (phba->nvmet_support) { |
| 6185 | for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { |
| 6186 | sli4_hba->sli4_write_cq_db(phba, |
| 6187 | sli4_hba->nvmet_cqset[qidx], 0, |
| 6188 | LPFC_QUEUE_REARM); |
| 6189 | } |
| 6190 | } |
| 6191 | } |
| 6192 | |
| 6193 | /** |
| 6194 | * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. |
| 6195 | * @phba: Pointer to HBA context object. |
| 6196 | * @type: The resource extent type. |
| 6197 | * @extnt_count: buffer to hold port available extent count. |
| 6198 | * @extnt_size: buffer to hold element count per extent. |
| 6199 | * |
| 6200 | * This function calls the port and retrievs the number of available |
| 6201 | * extents and their size for a particular extent type. |
| 6202 | * |
| 6203 | * Returns: 0 if successful. Nonzero otherwise. |
| 6204 | **/ |
| 6205 | int |
| 6206 | lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, |
| 6207 | uint16_t *extnt_count, uint16_t *extnt_size) |
| 6208 | { |
| 6209 | int rc = 0; |
| 6210 | uint32_t length; |
| 6211 | uint32_t mbox_tmo; |
| 6212 | struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; |
| 6213 | LPFC_MBOXQ_t *mbox; |
| 6214 | |
| 6215 | *extnt_count = 0; |
| 6216 | *extnt_size = 0; |
| 6217 | |
| 6218 | mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 6219 | if (!mbox) |
| 6220 | return -ENOMEM; |
| 6221 | |
| 6222 | /* Find out how many extents are available for this resource type */ |
| 6223 | length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - |
| 6224 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 6225 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 6226 | LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, |
| 6227 | length, LPFC_SLI4_MBX_EMBED); |
| 6228 | |
| 6229 | /* Send an extents count of 0 - the GET doesn't use it. */ |
| 6230 | rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, |
| 6231 | LPFC_SLI4_MBX_EMBED); |
| 6232 | if (unlikely(rc)) { |
| 6233 | rc = -EIO; |
| 6234 | goto err_exit; |
| 6235 | } |
| 6236 | |
| 6237 | if (!phba->sli4_hba.intr_enable) |
| 6238 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
| 6239 | else { |
| 6240 | mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); |
| 6241 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); |
| 6242 | } |
| 6243 | if (unlikely(rc)) { |
| 6244 | rc = -EIO; |
| 6245 | goto err_exit; |
| 6246 | } |
| 6247 | |
| 6248 | rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; |
| 6249 | if (bf_get(lpfc_mbox_hdr_status, |
| 6250 | &rsrc_info->header.cfg_shdr.response)) { |
| 6251 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 6252 | "2930 Failed to get resource extents " |
| 6253 | "Status 0x%x Add'l Status 0x%x\n" , |
| 6254 | bf_get(lpfc_mbox_hdr_status, |
| 6255 | &rsrc_info->header.cfg_shdr.response), |
| 6256 | bf_get(lpfc_mbox_hdr_add_status, |
| 6257 | &rsrc_info->header.cfg_shdr.response)); |
| 6258 | rc = -EIO; |
| 6259 | goto err_exit; |
| 6260 | } |
| 6261 | |
| 6262 | *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, |
| 6263 | &rsrc_info->u.rsp); |
| 6264 | *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, |
| 6265 | &rsrc_info->u.rsp); |
| 6266 | |
| 6267 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 6268 | "3162 Retrieved extents type-%d from port: count:%d, " |
| 6269 | "size:%d\n" , type, *extnt_count, *extnt_size); |
| 6270 | |
| 6271 | err_exit: |
| 6272 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 6273 | return rc; |
| 6274 | } |
| 6275 | |
| 6276 | /** |
| 6277 | * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. |
| 6278 | * @phba: Pointer to HBA context object. |
| 6279 | * @type: The extent type to check. |
| 6280 | * |
| 6281 | * This function reads the current available extents from the port and checks |
| 6282 | * if the extent count or extent size has changed since the last access. |
| 6283 | * Callers use this routine post port reset to understand if there is a |
| 6284 | * extent reprovisioning requirement. |
| 6285 | * |
| 6286 | * Returns: |
| 6287 | * -Error: error indicates problem. |
| 6288 | * 1: Extent count or size has changed. |
| 6289 | * 0: No changes. |
| 6290 | **/ |
| 6291 | static int |
| 6292 | lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) |
| 6293 | { |
| 6294 | uint16_t curr_ext_cnt, rsrc_ext_cnt; |
| 6295 | uint16_t size_diff, rsrc_ext_size; |
| 6296 | int rc = 0; |
| 6297 | struct lpfc_rsrc_blks *rsrc_entry; |
| 6298 | struct list_head *rsrc_blk_list = NULL; |
| 6299 | |
| 6300 | size_diff = 0; |
| 6301 | curr_ext_cnt = 0; |
| 6302 | rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, |
| 6303 | extnt_count: &rsrc_ext_cnt, |
| 6304 | extnt_size: &rsrc_ext_size); |
| 6305 | if (unlikely(rc)) |
| 6306 | return -EIO; |
| 6307 | |
| 6308 | switch (type) { |
| 6309 | case LPFC_RSC_TYPE_FCOE_RPI: |
| 6310 | rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; |
| 6311 | break; |
| 6312 | case LPFC_RSC_TYPE_FCOE_VPI: |
| 6313 | rsrc_blk_list = &phba->lpfc_vpi_blk_list; |
| 6314 | break; |
| 6315 | case LPFC_RSC_TYPE_FCOE_XRI: |
| 6316 | rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; |
| 6317 | break; |
| 6318 | case LPFC_RSC_TYPE_FCOE_VFI: |
| 6319 | rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; |
| 6320 | break; |
| 6321 | default: |
| 6322 | break; |
| 6323 | } |
| 6324 | |
| 6325 | list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { |
| 6326 | curr_ext_cnt++; |
| 6327 | if (rsrc_entry->rsrc_size != rsrc_ext_size) |
| 6328 | size_diff++; |
| 6329 | } |
| 6330 | |
| 6331 | if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) |
| 6332 | rc = 1; |
| 6333 | |
| 6334 | return rc; |
| 6335 | } |
| 6336 | |
| 6337 | /** |
| 6338 | * lpfc_sli4_cfg_post_extnts - |
| 6339 | * @phba: Pointer to HBA context object. |
| 6340 | * @extnt_cnt: number of available extents. |
| 6341 | * @type: the extent type (rpi, xri, vfi, vpi). |
| 6342 | * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation. |
| 6343 | * @mbox: pointer to the caller's allocated mailbox structure. |
| 6344 | * |
| 6345 | * This function executes the extents allocation request. It also |
| 6346 | * takes care of the amount of memory needed to allocate or get the |
| 6347 | * allocated extents. It is the caller's responsibility to evaluate |
| 6348 | * the response. |
| 6349 | * |
| 6350 | * Returns: |
| 6351 | * -Error: Error value describes the condition found. |
| 6352 | * 0: if successful |
| 6353 | **/ |
| 6354 | static int |
| 6355 | lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, |
| 6356 | uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) |
| 6357 | { |
| 6358 | int rc = 0; |
| 6359 | uint32_t req_len; |
| 6360 | uint32_t emb_len; |
| 6361 | uint32_t alloc_len, mbox_tmo; |
| 6362 | |
| 6363 | /* Calculate the total requested length of the dma memory */ |
| 6364 | req_len = extnt_cnt * sizeof(uint16_t); |
| 6365 | |
| 6366 | /* |
| 6367 | * Calculate the size of an embedded mailbox. The uint32_t |
| 6368 | * accounts for extents-specific word. |
| 6369 | */ |
| 6370 | emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - |
| 6371 | sizeof(uint32_t); |
| 6372 | |
| 6373 | /* |
| 6374 | * Presume the allocation and response will fit into an embedded |
| 6375 | * mailbox. If not true, reconfigure to a non-embedded mailbox. |
| 6376 | */ |
| 6377 | *emb = LPFC_SLI4_MBX_EMBED; |
| 6378 | if (req_len > emb_len) { |
| 6379 | req_len = extnt_cnt * sizeof(uint16_t) + |
| 6380 | sizeof(union lpfc_sli4_cfg_shdr) + |
| 6381 | sizeof(uint32_t); |
| 6382 | *emb = LPFC_SLI4_MBX_NEMBED; |
| 6383 | } |
| 6384 | |
| 6385 | alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 6386 | LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, |
| 6387 | req_len, *emb); |
| 6388 | if (alloc_len < req_len) { |
| 6389 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 6390 | "2982 Allocated DMA memory size (x%x) is " |
| 6391 | "less than the requested DMA memory " |
| 6392 | "size (x%x)\n" , alloc_len, req_len); |
| 6393 | return -ENOMEM; |
| 6394 | } |
| 6395 | rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); |
| 6396 | if (unlikely(rc)) |
| 6397 | return -EIO; |
| 6398 | |
| 6399 | if (!phba->sli4_hba.intr_enable) |
| 6400 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
| 6401 | else { |
| 6402 | mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); |
| 6403 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); |
| 6404 | } |
| 6405 | |
| 6406 | if (unlikely(rc)) |
| 6407 | rc = -EIO; |
| 6408 | return rc; |
| 6409 | } |
| 6410 | |
| 6411 | /** |
| 6412 | * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. |
| 6413 | * @phba: Pointer to HBA context object. |
| 6414 | * @type: The resource extent type to allocate. |
| 6415 | * |
| 6416 | * This function allocates the number of elements for the specified |
| 6417 | * resource type. |
| 6418 | **/ |
| 6419 | static int |
| 6420 | lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) |
| 6421 | { |
| 6422 | bool emb = false; |
| 6423 | uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; |
| 6424 | uint16_t rsrc_id, rsrc_start, j, k; |
| 6425 | uint16_t *ids; |
| 6426 | int i, rc; |
| 6427 | unsigned long longs; |
| 6428 | unsigned long *bmask; |
| 6429 | struct lpfc_rsrc_blks *rsrc_blks; |
| 6430 | LPFC_MBOXQ_t *mbox; |
| 6431 | uint32_t length; |
| 6432 | struct lpfc_id_range *id_array = NULL; |
| 6433 | void *virtaddr = NULL; |
| 6434 | struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; |
| 6435 | struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; |
| 6436 | struct list_head *ext_blk_list; |
| 6437 | |
| 6438 | rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, |
| 6439 | extnt_count: &rsrc_cnt, |
| 6440 | extnt_size: &rsrc_size); |
| 6441 | if (unlikely(rc)) |
| 6442 | return -EIO; |
| 6443 | |
| 6444 | if ((rsrc_cnt == 0) || (rsrc_size == 0)) { |
| 6445 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 6446 | "3009 No available Resource Extents " |
| 6447 | "for resource type 0x%x: Count: 0x%x, " |
| 6448 | "Size 0x%x\n" , type, rsrc_cnt, |
| 6449 | rsrc_size); |
| 6450 | return -ENOMEM; |
| 6451 | } |
| 6452 | |
| 6453 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, |
| 6454 | "2903 Post resource extents type-0x%x: " |
| 6455 | "count:%d, size %d\n" , type, rsrc_cnt, rsrc_size); |
| 6456 | |
| 6457 | mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 6458 | if (!mbox) |
| 6459 | return -ENOMEM; |
| 6460 | |
| 6461 | rc = lpfc_sli4_cfg_post_extnts(phba, extnt_cnt: rsrc_cnt, type, emb: &emb, mbox); |
| 6462 | if (unlikely(rc)) { |
| 6463 | rc = -EIO; |
| 6464 | goto err_exit; |
| 6465 | } |
| 6466 | |
| 6467 | /* |
| 6468 | * Figure out where the response is located. Then get local pointers |
| 6469 | * to the response data. The port does not guarantee to respond to |
| 6470 | * all extents counts request so update the local variable with the |
| 6471 | * allocated count from the port. |
| 6472 | */ |
| 6473 | if (emb == LPFC_SLI4_MBX_EMBED) { |
| 6474 | rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; |
| 6475 | id_array = &rsrc_ext->u.rsp.id[0]; |
| 6476 | rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); |
| 6477 | } else { |
| 6478 | virtaddr = mbox->sge_array->addr[0]; |
| 6479 | n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; |
| 6480 | rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); |
| 6481 | id_array = &n_rsrc->id; |
| 6482 | } |
| 6483 | |
| 6484 | longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; |
| 6485 | rsrc_id_cnt = rsrc_cnt * rsrc_size; |
| 6486 | |
| 6487 | /* |
| 6488 | * Based on the resource size and count, correct the base and max |
| 6489 | * resource values. |
| 6490 | */ |
| 6491 | length = sizeof(struct lpfc_rsrc_blks); |
| 6492 | switch (type) { |
| 6493 | case LPFC_RSC_TYPE_FCOE_RPI: |
| 6494 | phba->sli4_hba.rpi_bmask = kcalloc(longs, |
| 6495 | sizeof(unsigned long), |
| 6496 | GFP_KERNEL); |
| 6497 | if (unlikely(!phba->sli4_hba.rpi_bmask)) { |
| 6498 | rc = -ENOMEM; |
| 6499 | goto err_exit; |
| 6500 | } |
| 6501 | phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, |
| 6502 | sizeof(uint16_t), |
| 6503 | GFP_KERNEL); |
| 6504 | if (unlikely(!phba->sli4_hba.rpi_ids)) { |
| 6505 | kfree(objp: phba->sli4_hba.rpi_bmask); |
| 6506 | rc = -ENOMEM; |
| 6507 | goto err_exit; |
| 6508 | } |
| 6509 | |
| 6510 | /* |
| 6511 | * The next_rpi was initialized with the maximum available |
| 6512 | * count but the port may allocate a smaller number. Catch |
| 6513 | * that case and update the next_rpi. |
| 6514 | */ |
| 6515 | phba->sli4_hba.next_rpi = rsrc_id_cnt; |
| 6516 | |
| 6517 | /* Initialize local ptrs for common extent processing later. */ |
| 6518 | bmask = phba->sli4_hba.rpi_bmask; |
| 6519 | ids = phba->sli4_hba.rpi_ids; |
| 6520 | ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; |
| 6521 | break; |
| 6522 | case LPFC_RSC_TYPE_FCOE_VPI: |
| 6523 | phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), |
| 6524 | GFP_KERNEL); |
| 6525 | if (unlikely(!phba->vpi_bmask)) { |
| 6526 | rc = -ENOMEM; |
| 6527 | goto err_exit; |
| 6528 | } |
| 6529 | phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), |
| 6530 | GFP_KERNEL); |
| 6531 | if (unlikely(!phba->vpi_ids)) { |
| 6532 | kfree(objp: phba->vpi_bmask); |
| 6533 | rc = -ENOMEM; |
| 6534 | goto err_exit; |
| 6535 | } |
| 6536 | |
| 6537 | /* Initialize local ptrs for common extent processing later. */ |
| 6538 | bmask = phba->vpi_bmask; |
| 6539 | ids = phba->vpi_ids; |
| 6540 | ext_blk_list = &phba->lpfc_vpi_blk_list; |
| 6541 | break; |
| 6542 | case LPFC_RSC_TYPE_FCOE_XRI: |
| 6543 | phba->sli4_hba.xri_bmask = kcalloc(longs, |
| 6544 | sizeof(unsigned long), |
| 6545 | GFP_KERNEL); |
| 6546 | if (unlikely(!phba->sli4_hba.xri_bmask)) { |
| 6547 | rc = -ENOMEM; |
| 6548 | goto err_exit; |
| 6549 | } |
| 6550 | phba->sli4_hba.max_cfg_param.xri_used = 0; |
| 6551 | phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, |
| 6552 | sizeof(uint16_t), |
| 6553 | GFP_KERNEL); |
| 6554 | if (unlikely(!phba->sli4_hba.xri_ids)) { |
| 6555 | kfree(objp: phba->sli4_hba.xri_bmask); |
| 6556 | rc = -ENOMEM; |
| 6557 | goto err_exit; |
| 6558 | } |
| 6559 | |
| 6560 | /* Initialize local ptrs for common extent processing later. */ |
| 6561 | bmask = phba->sli4_hba.xri_bmask; |
| 6562 | ids = phba->sli4_hba.xri_ids; |
| 6563 | ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; |
| 6564 | break; |
| 6565 | case LPFC_RSC_TYPE_FCOE_VFI: |
| 6566 | phba->sli4_hba.vfi_bmask = kcalloc(longs, |
| 6567 | sizeof(unsigned long), |
| 6568 | GFP_KERNEL); |
| 6569 | if (unlikely(!phba->sli4_hba.vfi_bmask)) { |
| 6570 | rc = -ENOMEM; |
| 6571 | goto err_exit; |
| 6572 | } |
| 6573 | phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, |
| 6574 | sizeof(uint16_t), |
| 6575 | GFP_KERNEL); |
| 6576 | if (unlikely(!phba->sli4_hba.vfi_ids)) { |
| 6577 | kfree(objp: phba->sli4_hba.vfi_bmask); |
| 6578 | rc = -ENOMEM; |
| 6579 | goto err_exit; |
| 6580 | } |
| 6581 | |
| 6582 | /* Initialize local ptrs for common extent processing later. */ |
| 6583 | bmask = phba->sli4_hba.vfi_bmask; |
| 6584 | ids = phba->sli4_hba.vfi_ids; |
| 6585 | ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; |
| 6586 | break; |
| 6587 | default: |
| 6588 | /* Unsupported Opcode. Fail call. */ |
| 6589 | id_array = NULL; |
| 6590 | bmask = NULL; |
| 6591 | ids = NULL; |
| 6592 | ext_blk_list = NULL; |
| 6593 | goto err_exit; |
| 6594 | } |
| 6595 | |
| 6596 | /* |
| 6597 | * Complete initializing the extent configuration with the |
| 6598 | * allocated ids assigned to this function. The bitmask serves |
| 6599 | * as an index into the array and manages the available ids. The |
| 6600 | * array just stores the ids communicated to the port via the wqes. |
| 6601 | */ |
| 6602 | for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { |
| 6603 | if ((i % 2) == 0) |
| 6604 | rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, |
| 6605 | &id_array[k]); |
| 6606 | else |
| 6607 | rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, |
| 6608 | &id_array[k]); |
| 6609 | |
| 6610 | rsrc_blks = kzalloc(length, GFP_KERNEL); |
| 6611 | if (unlikely(!rsrc_blks)) { |
| 6612 | rc = -ENOMEM; |
| 6613 | kfree(objp: bmask); |
| 6614 | kfree(objp: ids); |
| 6615 | goto err_exit; |
| 6616 | } |
| 6617 | rsrc_blks->rsrc_start = rsrc_id; |
| 6618 | rsrc_blks->rsrc_size = rsrc_size; |
| 6619 | list_add_tail(new: &rsrc_blks->list, head: ext_blk_list); |
| 6620 | rsrc_start = rsrc_id; |
| 6621 | if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { |
| 6622 | phba->sli4_hba.io_xri_start = rsrc_start + |
| 6623 | lpfc_sli4_get_iocb_cnt(phba); |
| 6624 | } |
| 6625 | |
| 6626 | while (rsrc_id < (rsrc_start + rsrc_size)) { |
| 6627 | ids[j] = rsrc_id; |
| 6628 | rsrc_id++; |
| 6629 | j++; |
| 6630 | } |
| 6631 | /* Entire word processed. Get next word.*/ |
| 6632 | if ((i % 2) == 1) |
| 6633 | k++; |
| 6634 | } |
| 6635 | err_exit: |
| 6636 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
| 6637 | return rc; |
| 6638 | } |
| 6639 | |
| 6640 | |
| 6641 | |
| 6642 | /** |
| 6643 | * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. |
| 6644 | * @phba: Pointer to HBA context object. |
| 6645 | * @type: the extent's type. |
| 6646 | * |
| 6647 | * This function deallocates all extents of a particular resource type. |
| 6648 | * SLI4 does not allow for deallocating a particular extent range. It |
| 6649 | * is the caller's responsibility to release all kernel memory resources. |
| 6650 | **/ |
| 6651 | static int |
| 6652 | lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) |
| 6653 | { |
| 6654 | int rc; |
| 6655 | uint32_t length, mbox_tmo = 0; |
| 6656 | LPFC_MBOXQ_t *mbox; |
| 6657 | struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; |
| 6658 | struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; |
| 6659 | |
| 6660 | mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 6661 | if (!mbox) |
| 6662 | return -ENOMEM; |
| 6663 | |
| 6664 | /* |
| 6665 | * This function sends an embedded mailbox because it only sends the |
| 6666 | * the resource type. All extents of this type are released by the |
| 6667 | * port. |
| 6668 | */ |
| 6669 | length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - |
| 6670 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 6671 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 6672 | LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, |
| 6673 | length, LPFC_SLI4_MBX_EMBED); |
| 6674 | |
| 6675 | /* Send an extents count of 0 - the dealloc doesn't use it. */ |
| 6676 | rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, |
| 6677 | LPFC_SLI4_MBX_EMBED); |
| 6678 | if (unlikely(rc)) { |
| 6679 | rc = -EIO; |
| 6680 | goto out_free_mbox; |
| 6681 | } |
| 6682 | if (!phba->sli4_hba.intr_enable) |
| 6683 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
| 6684 | else { |
| 6685 | mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); |
| 6686 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); |
| 6687 | } |
| 6688 | if (unlikely(rc)) { |
| 6689 | rc = -EIO; |
| 6690 | goto out_free_mbox; |
| 6691 | } |
| 6692 | |
| 6693 | dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; |
| 6694 | if (bf_get(lpfc_mbox_hdr_status, |
| 6695 | &dealloc_rsrc->header.cfg_shdr.response)) { |
| 6696 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 6697 | "2919 Failed to release resource extents " |
| 6698 | "for type %d - Status 0x%x Add'l Status 0x%x. " |
| 6699 | "Resource memory not released.\n" , |
| 6700 | type, |
| 6701 | bf_get(lpfc_mbox_hdr_status, |
| 6702 | &dealloc_rsrc->header.cfg_shdr.response), |
| 6703 | bf_get(lpfc_mbox_hdr_add_status, |
| 6704 | &dealloc_rsrc->header.cfg_shdr.response)); |
| 6705 | rc = -EIO; |
| 6706 | goto out_free_mbox; |
| 6707 | } |
| 6708 | |
| 6709 | /* Release kernel memory resources for the specific type. */ |
| 6710 | switch (type) { |
| 6711 | case LPFC_RSC_TYPE_FCOE_VPI: |
| 6712 | kfree(objp: phba->vpi_bmask); |
| 6713 | kfree(objp: phba->vpi_ids); |
| 6714 | bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); |
| 6715 | list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, |
| 6716 | &phba->lpfc_vpi_blk_list, list) { |
| 6717 | list_del_init(entry: &rsrc_blk->list); |
| 6718 | kfree(objp: rsrc_blk); |
| 6719 | } |
| 6720 | phba->sli4_hba.max_cfg_param.vpi_used = 0; |
| 6721 | break; |
| 6722 | case LPFC_RSC_TYPE_FCOE_XRI: |
| 6723 | kfree(objp: phba->sli4_hba.xri_bmask); |
| 6724 | kfree(objp: phba->sli4_hba.xri_ids); |
| 6725 | list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, |
| 6726 | &phba->sli4_hba.lpfc_xri_blk_list, list) { |
| 6727 | list_del_init(entry: &rsrc_blk->list); |
| 6728 | kfree(objp: rsrc_blk); |
| 6729 | } |
| 6730 | break; |
| 6731 | case LPFC_RSC_TYPE_FCOE_VFI: |
| 6732 | kfree(objp: phba->sli4_hba.vfi_bmask); |
| 6733 | kfree(objp: phba->sli4_hba.vfi_ids); |
| 6734 | bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); |
| 6735 | list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, |
| 6736 | &phba->sli4_hba.lpfc_vfi_blk_list, list) { |
| 6737 | list_del_init(entry: &rsrc_blk->list); |
| 6738 | kfree(objp: rsrc_blk); |
| 6739 | } |
| 6740 | break; |
| 6741 | case LPFC_RSC_TYPE_FCOE_RPI: |
| 6742 | /* RPI bitmask and physical id array are cleaned up earlier. */ |
| 6743 | list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, |
| 6744 | &phba->sli4_hba.lpfc_rpi_blk_list, list) { |
| 6745 | list_del_init(entry: &rsrc_blk->list); |
| 6746 | kfree(objp: rsrc_blk); |
| 6747 | } |
| 6748 | break; |
| 6749 | default: |
| 6750 | break; |
| 6751 | } |
| 6752 | |
| 6753 | bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); |
| 6754 | |
| 6755 | out_free_mbox: |
| 6756 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 6757 | return rc; |
| 6758 | } |
| 6759 | |
| 6760 | static void |
| 6761 | lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, |
| 6762 | uint32_t feature) |
| 6763 | { |
| 6764 | uint32_t len; |
| 6765 | u32 sig_freq = 0; |
| 6766 | |
| 6767 | len = sizeof(struct lpfc_mbx_set_feature) - |
| 6768 | sizeof(struct lpfc_sli4_cfg_mhdr); |
| 6769 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 6770 | LPFC_MBOX_OPCODE_SET_FEATURES, len, |
| 6771 | LPFC_SLI4_MBX_EMBED); |
| 6772 | |
| 6773 | switch (feature) { |
| 6774 | case LPFC_SET_UE_RECOVERY: |
| 6775 | bf_set(lpfc_mbx_set_feature_UER, |
| 6776 | &mbox->u.mqe.un.set_feature, 1); |
| 6777 | mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; |
| 6778 | mbox->u.mqe.un.set_feature.param_len = 8; |
| 6779 | break; |
| 6780 | case LPFC_SET_MDS_DIAGS: |
| 6781 | bf_set(lpfc_mbx_set_feature_mds, |
| 6782 | &mbox->u.mqe.un.set_feature, 1); |
| 6783 | bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, |
| 6784 | &mbox->u.mqe.un.set_feature, 1); |
| 6785 | mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; |
| 6786 | mbox->u.mqe.un.set_feature.param_len = 8; |
| 6787 | break; |
| 6788 | case LPFC_SET_CGN_SIGNAL: |
| 6789 | if (phba->cmf_active_mode == LPFC_CFG_OFF) |
| 6790 | sig_freq = 0; |
| 6791 | else |
| 6792 | sig_freq = phba->cgn_sig_freq; |
| 6793 | |
| 6794 | if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { |
| 6795 | bf_set(lpfc_mbx_set_feature_CGN_alarm_freq, |
| 6796 | &mbox->u.mqe.un.set_feature, sig_freq); |
| 6797 | bf_set(lpfc_mbx_set_feature_CGN_warn_freq, |
| 6798 | &mbox->u.mqe.un.set_feature, sig_freq); |
| 6799 | } |
| 6800 | |
| 6801 | if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY) |
| 6802 | bf_set(lpfc_mbx_set_feature_CGN_warn_freq, |
| 6803 | &mbox->u.mqe.un.set_feature, sig_freq); |
| 6804 | |
| 6805 | if (phba->cmf_active_mode == LPFC_CFG_OFF || |
| 6806 | phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED) |
| 6807 | sig_freq = 0; |
| 6808 | else |
| 6809 | sig_freq = lpfc_acqe_cgn_frequency; |
| 6810 | |
| 6811 | bf_set(lpfc_mbx_set_feature_CGN_acqe_freq, |
| 6812 | &mbox->u.mqe.un.set_feature, sig_freq); |
| 6813 | |
| 6814 | mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL; |
| 6815 | mbox->u.mqe.un.set_feature.param_len = 12; |
| 6816 | break; |
| 6817 | case LPFC_SET_DUAL_DUMP: |
| 6818 | bf_set(lpfc_mbx_set_feature_dd, |
| 6819 | &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP); |
| 6820 | bf_set(lpfc_mbx_set_feature_ddquery, |
| 6821 | &mbox->u.mqe.un.set_feature, 0); |
| 6822 | mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP; |
| 6823 | mbox->u.mqe.un.set_feature.param_len = 4; |
| 6824 | break; |
| 6825 | case LPFC_SET_ENABLE_MI: |
| 6826 | mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI; |
| 6827 | mbox->u.mqe.un.set_feature.param_len = 4; |
| 6828 | bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature, |
| 6829 | phba->pport->cfg_lun_queue_depth); |
| 6830 | bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature, |
| 6831 | phba->sli4_hba.pc_sli4_params.mi_ver); |
| 6832 | break; |
| 6833 | case LPFC_SET_LD_SIGNAL: |
| 6834 | mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL; |
| 6835 | mbox->u.mqe.un.set_feature.param_len = 16; |
| 6836 | bf_set(lpfc_mbx_set_feature_lds_qry, |
| 6837 | &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP); |
| 6838 | break; |
| 6839 | case LPFC_SET_ENABLE_CMF: |
| 6840 | mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF; |
| 6841 | mbox->u.mqe.un.set_feature.param_len = 4; |
| 6842 | bf_set(lpfc_mbx_set_feature_cmf, |
| 6843 | &mbox->u.mqe.un.set_feature, 1); |
| 6844 | break; |
| 6845 | } |
| 6846 | return; |
| 6847 | } |
| 6848 | |
| 6849 | /** |
| 6850 | * lpfc_ras_stop_fwlog: Disable FW logging by the adapter |
| 6851 | * @phba: Pointer to HBA context object. |
| 6852 | * |
| 6853 | * Disable FW logging into host memory on the adapter. To |
| 6854 | * be done before reading logs from the host memory. |
| 6855 | **/ |
| 6856 | void |
| 6857 | lpfc_ras_stop_fwlog(struct lpfc_hba *phba) |
| 6858 | { |
| 6859 | struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; |
| 6860 | |
| 6861 | spin_lock_irq(lock: &phba->ras_fwlog_lock); |
| 6862 | ras_fwlog->state = INACTIVE; |
| 6863 | spin_unlock_irq(lock: &phba->ras_fwlog_lock); |
| 6864 | |
| 6865 | /* Disable FW logging to host memory */ |
| 6866 | writel(LPFC_CTL_PDEV_CTL_DDL_RAS, |
| 6867 | addr: phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); |
| 6868 | |
| 6869 | /* Wait 10ms for firmware to stop using DMA buffer */ |
| 6870 | usleep_range(min: 10 * 1000, max: 20 * 1000); |
| 6871 | } |
| 6872 | |
| 6873 | /** |
| 6874 | * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. |
| 6875 | * @phba: Pointer to HBA context object. |
| 6876 | * |
| 6877 | * This function is called to free memory allocated for RAS FW logging |
| 6878 | * support in the driver. |
| 6879 | **/ |
| 6880 | void |
| 6881 | lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) |
| 6882 | { |
| 6883 | struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; |
| 6884 | struct lpfc_dmabuf *dmabuf, *next; |
| 6885 | |
| 6886 | if (!list_empty(head: &ras_fwlog->fwlog_buff_list)) { |
| 6887 | list_for_each_entry_safe(dmabuf, next, |
| 6888 | &ras_fwlog->fwlog_buff_list, |
| 6889 | list) { |
| 6890 | list_del(entry: &dmabuf->list); |
| 6891 | dma_free_coherent(dev: &phba->pcidev->dev, |
| 6892 | LPFC_RAS_MAX_ENTRY_SIZE, |
| 6893 | cpu_addr: dmabuf->virt, dma_handle: dmabuf->phys); |
| 6894 | kfree(objp: dmabuf); |
| 6895 | } |
| 6896 | } |
| 6897 | |
| 6898 | if (ras_fwlog->lwpd.virt) { |
| 6899 | dma_free_coherent(dev: &phba->pcidev->dev, |
| 6900 | size: sizeof(uint32_t) * 2, |
| 6901 | cpu_addr: ras_fwlog->lwpd.virt, |
| 6902 | dma_handle: ras_fwlog->lwpd.phys); |
| 6903 | ras_fwlog->lwpd.virt = NULL; |
| 6904 | } |
| 6905 | |
| 6906 | spin_lock_irq(lock: &phba->ras_fwlog_lock); |
| 6907 | ras_fwlog->state = INACTIVE; |
| 6908 | spin_unlock_irq(lock: &phba->ras_fwlog_lock); |
| 6909 | } |
| 6910 | |
| 6911 | /** |
| 6912 | * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support |
| 6913 | * @phba: Pointer to HBA context object. |
| 6914 | * @fwlog_buff_count: Count of buffers to be created. |
| 6915 | * |
| 6916 | * This routine DMA memory for Log Write Position Data[LPWD] and buffer |
| 6917 | * to update FW log is posted to the adapter. |
| 6918 | * Buffer count is calculated based on module param ras_fwlog_buffsize |
| 6919 | * Size of each buffer posted to FW is 64K. |
| 6920 | **/ |
| 6921 | |
| 6922 | static int |
| 6923 | lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, |
| 6924 | uint32_t fwlog_buff_count) |
| 6925 | { |
| 6926 | struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; |
| 6927 | struct lpfc_dmabuf *dmabuf; |
| 6928 | int rc = 0, i = 0; |
| 6929 | |
| 6930 | /* Initialize List */ |
| 6931 | INIT_LIST_HEAD(list: &ras_fwlog->fwlog_buff_list); |
| 6932 | |
| 6933 | /* Allocate memory for the LWPD */ |
| 6934 | ras_fwlog->lwpd.virt = dma_alloc_coherent(dev: &phba->pcidev->dev, |
| 6935 | size: sizeof(uint32_t) * 2, |
| 6936 | dma_handle: &ras_fwlog->lwpd.phys, |
| 6937 | GFP_KERNEL); |
| 6938 | if (!ras_fwlog->lwpd.virt) { |
| 6939 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 6940 | "6185 LWPD Memory Alloc Failed\n" ); |
| 6941 | |
| 6942 | return -ENOMEM; |
| 6943 | } |
| 6944 | |
| 6945 | ras_fwlog->fw_buffcount = fwlog_buff_count; |
| 6946 | for (i = 0; i < ras_fwlog->fw_buffcount; i++) { |
| 6947 | dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), |
| 6948 | GFP_KERNEL); |
| 6949 | if (!dmabuf) { |
| 6950 | rc = -ENOMEM; |
| 6951 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
| 6952 | "6186 Memory Alloc failed FW logging" ); |
| 6953 | goto free_mem; |
| 6954 | } |
| 6955 | |
| 6956 | dmabuf->virt = dma_alloc_coherent(dev: &phba->pcidev->dev, |
| 6957 | LPFC_RAS_MAX_ENTRY_SIZE, |
| 6958 | dma_handle: &dmabuf->phys, GFP_KERNEL); |
| 6959 | if (!dmabuf->virt) { |
| 6960 | kfree(objp: dmabuf); |
| 6961 | rc = -ENOMEM; |
| 6962 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
| 6963 | "6187 DMA Alloc Failed FW logging" ); |
| 6964 | goto free_mem; |
| 6965 | } |
| 6966 | dmabuf->buffer_tag = i; |
| 6967 | list_add_tail(new: &dmabuf->list, head: &ras_fwlog->fwlog_buff_list); |
| 6968 | } |
| 6969 | |
| 6970 | free_mem: |
| 6971 | if (rc) |
| 6972 | lpfc_sli4_ras_dma_free(phba); |
| 6973 | |
| 6974 | return rc; |
| 6975 | } |
| 6976 | |
| 6977 | /** |
| 6978 | * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command |
| 6979 | * @phba: pointer to lpfc hba data structure. |
| 6980 | * @pmb: pointer to the driver internal queue element for mailbox command. |
| 6981 | * |
| 6982 | * Completion handler for driver's RAS MBX command to the device. |
| 6983 | **/ |
| 6984 | static void |
| 6985 | lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
| 6986 | { |
| 6987 | MAILBOX_t *mb; |
| 6988 | union lpfc_sli4_cfg_shdr *shdr; |
| 6989 | uint32_t shdr_status, shdr_add_status; |
| 6990 | struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; |
| 6991 | |
| 6992 | mb = &pmb->u.mb; |
| 6993 | |
| 6994 | shdr = (union lpfc_sli4_cfg_shdr *) |
| 6995 | &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr; |
| 6996 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 6997 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 6998 | |
| 6999 | if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { |
| 7000 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 7001 | "6188 FW LOG mailbox " |
| 7002 | "completed with status x%x add_status x%x," |
| 7003 | " mbx status x%x\n" , |
| 7004 | shdr_status, shdr_add_status, mb->mbxStatus); |
| 7005 | |
| 7006 | ras_fwlog->ras_hwsupport = false; |
| 7007 | goto disable_ras; |
| 7008 | } |
| 7009 | |
| 7010 | spin_lock_irq(lock: &phba->ras_fwlog_lock); |
| 7011 | ras_fwlog->state = ACTIVE; |
| 7012 | spin_unlock_irq(lock: &phba->ras_fwlog_lock); |
| 7013 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
| 7014 | |
| 7015 | return; |
| 7016 | |
| 7017 | disable_ras: |
| 7018 | /* Free RAS DMA memory */ |
| 7019 | lpfc_sli4_ras_dma_free(phba); |
| 7020 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
| 7021 | } |
| 7022 | |
| 7023 | /** |
| 7024 | * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command |
| 7025 | * @phba: pointer to lpfc hba data structure. |
| 7026 | * @fwlog_level: Logging verbosity level. |
| 7027 | * @fwlog_enable: Enable/Disable logging. |
| 7028 | * |
| 7029 | * Initialize memory and post mailbox command to enable FW logging in host |
| 7030 | * memory. |
| 7031 | **/ |
| 7032 | int |
| 7033 | lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, |
| 7034 | uint32_t fwlog_level, |
| 7035 | uint32_t fwlog_enable) |
| 7036 | { |
| 7037 | struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; |
| 7038 | struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL; |
| 7039 | struct lpfc_dmabuf *dmabuf; |
| 7040 | LPFC_MBOXQ_t *mbox; |
| 7041 | uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; |
| 7042 | int rc = 0; |
| 7043 | |
| 7044 | spin_lock_irq(lock: &phba->ras_fwlog_lock); |
| 7045 | ras_fwlog->state = INACTIVE; |
| 7046 | spin_unlock_irq(lock: &phba->ras_fwlog_lock); |
| 7047 | |
| 7048 | fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * |
| 7049 | phba->cfg_ras_fwlog_buffsize); |
| 7050 | fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); |
| 7051 | |
| 7052 | /* |
| 7053 | * If re-enabling FW logging support use earlier allocated |
| 7054 | * DMA buffers while posting MBX command. |
| 7055 | **/ |
| 7056 | if (!ras_fwlog->lwpd.virt) { |
| 7057 | rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_buff_count: fwlog_entry_count); |
| 7058 | if (rc) { |
| 7059 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
| 7060 | "6189 FW Log Memory Allocation Failed" ); |
| 7061 | return rc; |
| 7062 | } |
| 7063 | } |
| 7064 | |
| 7065 | /* Setup Mailbox command */ |
| 7066 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 7067 | if (!mbox) { |
| 7068 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 7069 | "6190 RAS MBX Alloc Failed" ); |
| 7070 | rc = -ENOMEM; |
| 7071 | goto mem_free; |
| 7072 | } |
| 7073 | |
| 7074 | ras_fwlog->fw_loglevel = fwlog_level; |
| 7075 | len = (sizeof(struct lpfc_mbx_set_ras_fwlog) - |
| 7076 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 7077 | |
| 7078 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL, |
| 7079 | LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION, |
| 7080 | len, LPFC_SLI4_MBX_EMBED); |
| 7081 | |
| 7082 | mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog; |
| 7083 | bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request, |
| 7084 | fwlog_enable); |
| 7085 | bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request, |
| 7086 | ras_fwlog->fw_loglevel); |
| 7087 | bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request, |
| 7088 | ras_fwlog->fw_buffcount); |
| 7089 | bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request, |
| 7090 | LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE); |
| 7091 | |
| 7092 | /* Update DMA buffer address */ |
| 7093 | list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) { |
| 7094 | memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); |
| 7095 | |
| 7096 | mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo = |
| 7097 | putPaddrLow(dmabuf->phys); |
| 7098 | |
| 7099 | mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi = |
| 7100 | putPaddrHigh(dmabuf->phys); |
| 7101 | } |
| 7102 | |
| 7103 | /* Update LPWD address */ |
| 7104 | mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); |
| 7105 | mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); |
| 7106 | |
| 7107 | spin_lock_irq(lock: &phba->ras_fwlog_lock); |
| 7108 | ras_fwlog->state = REG_INPROGRESS; |
| 7109 | spin_unlock_irq(lock: &phba->ras_fwlog_lock); |
| 7110 | mbox->vport = phba->pport; |
| 7111 | mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; |
| 7112 | |
| 7113 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
| 7114 | |
| 7115 | if (rc == MBX_NOT_FINISHED) { |
| 7116 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 7117 | "6191 FW-Log Mailbox failed. " |
| 7118 | "status %d mbxStatus : x%x" , rc, |
| 7119 | bf_get(lpfc_mqe_status, &mbox->u.mqe)); |
| 7120 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 7121 | rc = -EIO; |
| 7122 | goto mem_free; |
| 7123 | } else |
| 7124 | rc = 0; |
| 7125 | mem_free: |
| 7126 | if (rc) |
| 7127 | lpfc_sli4_ras_dma_free(phba); |
| 7128 | |
| 7129 | return rc; |
| 7130 | } |
| 7131 | |
| 7132 | /** |
| 7133 | * lpfc_sli4_ras_setup - Check if RAS supported on the adapter |
| 7134 | * @phba: Pointer to HBA context object. |
| 7135 | * |
| 7136 | * Check if RAS is supported on the adapter and initialize it. |
| 7137 | **/ |
| 7138 | void |
| 7139 | lpfc_sli4_ras_setup(struct lpfc_hba *phba) |
| 7140 | { |
| 7141 | /* Check RAS FW Log needs to be enabled or not */ |
| 7142 | if (lpfc_check_fwlog_support(phba)) |
| 7143 | return; |
| 7144 | |
| 7145 | lpfc_sli4_ras_fwlog_init(phba, fwlog_level: phba->cfg_ras_fwlog_level, |
| 7146 | LPFC_RAS_ENABLE_LOGGING); |
| 7147 | } |
| 7148 | |
| 7149 | /** |
| 7150 | * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. |
| 7151 | * @phba: Pointer to HBA context object. |
| 7152 | * |
| 7153 | * This function allocates all SLI4 resource identifiers. |
| 7154 | **/ |
| 7155 | int |
| 7156 | lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) |
| 7157 | { |
| 7158 | int i, rc, error = 0; |
| 7159 | uint16_t count, base; |
| 7160 | unsigned long longs; |
| 7161 | |
| 7162 | if (!phba->sli4_hba.rpi_hdrs_in_use) |
| 7163 | phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; |
| 7164 | if (phba->sli4_hba.extents_in_use) { |
| 7165 | /* |
| 7166 | * The port supports resource extents. The XRI, VPI, VFI, RPI |
| 7167 | * resource extent count must be read and allocated before |
| 7168 | * provisioning the resource id arrays. |
| 7169 | */ |
| 7170 | if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == |
| 7171 | LPFC_IDX_RSRC_RDY) { |
| 7172 | /* |
| 7173 | * Extent-based resources are set - the driver could |
| 7174 | * be in a port reset. Figure out if any corrective |
| 7175 | * actions need to be taken. |
| 7176 | */ |
| 7177 | rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, |
| 7178 | LPFC_RSC_TYPE_FCOE_VFI); |
| 7179 | if (rc != 0) |
| 7180 | error++; |
| 7181 | rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, |
| 7182 | LPFC_RSC_TYPE_FCOE_VPI); |
| 7183 | if (rc != 0) |
| 7184 | error++; |
| 7185 | rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, |
| 7186 | LPFC_RSC_TYPE_FCOE_XRI); |
| 7187 | if (rc != 0) |
| 7188 | error++; |
| 7189 | rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, |
| 7190 | LPFC_RSC_TYPE_FCOE_RPI); |
| 7191 | if (rc != 0) |
| 7192 | error++; |
| 7193 | |
| 7194 | /* |
| 7195 | * It's possible that the number of resources |
| 7196 | * provided to this port instance changed between |
| 7197 | * resets. Detect this condition and reallocate |
| 7198 | * resources. Otherwise, there is no action. |
| 7199 | */ |
| 7200 | if (error) { |
| 7201 | lpfc_printf_log(phba, KERN_INFO, |
| 7202 | LOG_MBOX | LOG_INIT, |
| 7203 | "2931 Detected extent resource " |
| 7204 | "change. Reallocating all " |
| 7205 | "extents.\n" ); |
| 7206 | rc = lpfc_sli4_dealloc_extent(phba, |
| 7207 | LPFC_RSC_TYPE_FCOE_VFI); |
| 7208 | rc = lpfc_sli4_dealloc_extent(phba, |
| 7209 | LPFC_RSC_TYPE_FCOE_VPI); |
| 7210 | rc = lpfc_sli4_dealloc_extent(phba, |
| 7211 | LPFC_RSC_TYPE_FCOE_XRI); |
| 7212 | rc = lpfc_sli4_dealloc_extent(phba, |
| 7213 | LPFC_RSC_TYPE_FCOE_RPI); |
| 7214 | } else |
| 7215 | return 0; |
| 7216 | } |
| 7217 | |
| 7218 | rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); |
| 7219 | if (unlikely(rc)) |
| 7220 | goto err_exit; |
| 7221 | |
| 7222 | rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); |
| 7223 | if (unlikely(rc)) |
| 7224 | goto err_exit; |
| 7225 | |
| 7226 | rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); |
| 7227 | if (unlikely(rc)) |
| 7228 | goto err_exit; |
| 7229 | |
| 7230 | rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); |
| 7231 | if (unlikely(rc)) |
| 7232 | goto err_exit; |
| 7233 | bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, |
| 7234 | LPFC_IDX_RSRC_RDY); |
| 7235 | return rc; |
| 7236 | } else { |
| 7237 | /* |
| 7238 | * The port does not support resource extents. The XRI, VPI, |
| 7239 | * VFI, RPI resource ids were determined from READ_CONFIG. |
| 7240 | * Just allocate the bitmasks and provision the resource id |
| 7241 | * arrays. If a port reset is active, the resources don't |
| 7242 | * need any action - just exit. |
| 7243 | */ |
| 7244 | if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == |
| 7245 | LPFC_IDX_RSRC_RDY) { |
| 7246 | lpfc_sli4_dealloc_resource_identifiers(phba); |
| 7247 | lpfc_sli4_remove_rpis(phba); |
| 7248 | } |
| 7249 | /* RPIs. */ |
| 7250 | count = phba->sli4_hba.max_cfg_param.max_rpi; |
| 7251 | if (count <= 0) { |
| 7252 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 7253 | "3279 Invalid provisioning of " |
| 7254 | "rpi:%d\n" , count); |
| 7255 | rc = -EINVAL; |
| 7256 | goto err_exit; |
| 7257 | } |
| 7258 | base = phba->sli4_hba.max_cfg_param.rpi_base; |
| 7259 | longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; |
| 7260 | phba->sli4_hba.rpi_bmask = kcalloc(longs, |
| 7261 | sizeof(unsigned long), |
| 7262 | GFP_KERNEL); |
| 7263 | if (unlikely(!phba->sli4_hba.rpi_bmask)) { |
| 7264 | rc = -ENOMEM; |
| 7265 | goto err_exit; |
| 7266 | } |
| 7267 | phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), |
| 7268 | GFP_KERNEL); |
| 7269 | if (unlikely(!phba->sli4_hba.rpi_ids)) { |
| 7270 | rc = -ENOMEM; |
| 7271 | goto free_rpi_bmask; |
| 7272 | } |
| 7273 | |
| 7274 | for (i = 0; i < count; i++) |
| 7275 | phba->sli4_hba.rpi_ids[i] = base + i; |
| 7276 | |
| 7277 | /* VPIs. */ |
| 7278 | count = phba->sli4_hba.max_cfg_param.max_vpi; |
| 7279 | if (count <= 0) { |
| 7280 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 7281 | "3280 Invalid provisioning of " |
| 7282 | "vpi:%d\n" , count); |
| 7283 | rc = -EINVAL; |
| 7284 | goto free_rpi_ids; |
| 7285 | } |
| 7286 | base = phba->sli4_hba.max_cfg_param.vpi_base; |
| 7287 | longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; |
| 7288 | phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), |
| 7289 | GFP_KERNEL); |
| 7290 | if (unlikely(!phba->vpi_bmask)) { |
| 7291 | rc = -ENOMEM; |
| 7292 | goto free_rpi_ids; |
| 7293 | } |
| 7294 | phba->vpi_ids = kcalloc(count, sizeof(uint16_t), |
| 7295 | GFP_KERNEL); |
| 7296 | if (unlikely(!phba->vpi_ids)) { |
| 7297 | rc = -ENOMEM; |
| 7298 | goto free_vpi_bmask; |
| 7299 | } |
| 7300 | |
| 7301 | for (i = 0; i < count; i++) |
| 7302 | phba->vpi_ids[i] = base + i; |
| 7303 | |
| 7304 | /* XRIs. */ |
| 7305 | count = phba->sli4_hba.max_cfg_param.max_xri; |
| 7306 | if (count <= 0) { |
| 7307 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 7308 | "3281 Invalid provisioning of " |
| 7309 | "xri:%d\n" , count); |
| 7310 | rc = -EINVAL; |
| 7311 | goto free_vpi_ids; |
| 7312 | } |
| 7313 | base = phba->sli4_hba.max_cfg_param.xri_base; |
| 7314 | longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; |
| 7315 | phba->sli4_hba.xri_bmask = kcalloc(longs, |
| 7316 | sizeof(unsigned long), |
| 7317 | GFP_KERNEL); |
| 7318 | if (unlikely(!phba->sli4_hba.xri_bmask)) { |
| 7319 | rc = -ENOMEM; |
| 7320 | goto free_vpi_ids; |
| 7321 | } |
| 7322 | phba->sli4_hba.max_cfg_param.xri_used = 0; |
| 7323 | phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), |
| 7324 | GFP_KERNEL); |
| 7325 | if (unlikely(!phba->sli4_hba.xri_ids)) { |
| 7326 | rc = -ENOMEM; |
| 7327 | goto free_xri_bmask; |
| 7328 | } |
| 7329 | |
| 7330 | for (i = 0; i < count; i++) |
| 7331 | phba->sli4_hba.xri_ids[i] = base + i; |
| 7332 | |
| 7333 | /* VFIs. */ |
| 7334 | count = phba->sli4_hba.max_cfg_param.max_vfi; |
| 7335 | if (count <= 0) { |
| 7336 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 7337 | "3282 Invalid provisioning of " |
| 7338 | "vfi:%d\n" , count); |
| 7339 | rc = -EINVAL; |
| 7340 | goto free_xri_ids; |
| 7341 | } |
| 7342 | base = phba->sli4_hba.max_cfg_param.vfi_base; |
| 7343 | longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; |
| 7344 | phba->sli4_hba.vfi_bmask = kcalloc(longs, |
| 7345 | sizeof(unsigned long), |
| 7346 | GFP_KERNEL); |
| 7347 | if (unlikely(!phba->sli4_hba.vfi_bmask)) { |
| 7348 | rc = -ENOMEM; |
| 7349 | goto free_xri_ids; |
| 7350 | } |
| 7351 | phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), |
| 7352 | GFP_KERNEL); |
| 7353 | if (unlikely(!phba->sli4_hba.vfi_ids)) { |
| 7354 | rc = -ENOMEM; |
| 7355 | goto free_vfi_bmask; |
| 7356 | } |
| 7357 | |
| 7358 | for (i = 0; i < count; i++) |
| 7359 | phba->sli4_hba.vfi_ids[i] = base + i; |
| 7360 | |
| 7361 | /* |
| 7362 | * Mark all resources ready. An HBA reset doesn't need |
| 7363 | * to reset the initialization. |
| 7364 | */ |
| 7365 | bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, |
| 7366 | LPFC_IDX_RSRC_RDY); |
| 7367 | return 0; |
| 7368 | } |
| 7369 | |
| 7370 | free_vfi_bmask: |
| 7371 | kfree(objp: phba->sli4_hba.vfi_bmask); |
| 7372 | phba->sli4_hba.vfi_bmask = NULL; |
| 7373 | free_xri_ids: |
| 7374 | kfree(objp: phba->sli4_hba.xri_ids); |
| 7375 | phba->sli4_hba.xri_ids = NULL; |
| 7376 | free_xri_bmask: |
| 7377 | kfree(objp: phba->sli4_hba.xri_bmask); |
| 7378 | phba->sli4_hba.xri_bmask = NULL; |
| 7379 | free_vpi_ids: |
| 7380 | kfree(objp: phba->vpi_ids); |
| 7381 | phba->vpi_ids = NULL; |
| 7382 | free_vpi_bmask: |
| 7383 | kfree(objp: phba->vpi_bmask); |
| 7384 | phba->vpi_bmask = NULL; |
| 7385 | free_rpi_ids: |
| 7386 | kfree(objp: phba->sli4_hba.rpi_ids); |
| 7387 | phba->sli4_hba.rpi_ids = NULL; |
| 7388 | free_rpi_bmask: |
| 7389 | kfree(objp: phba->sli4_hba.rpi_bmask); |
| 7390 | phba->sli4_hba.rpi_bmask = NULL; |
| 7391 | err_exit: |
| 7392 | return rc; |
| 7393 | } |
| 7394 | |
| 7395 | /** |
| 7396 | * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. |
| 7397 | * @phba: Pointer to HBA context object. |
| 7398 | * |
| 7399 | * This function allocates the number of elements for the specified |
| 7400 | * resource type. |
| 7401 | **/ |
| 7402 | int |
| 7403 | lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) |
| 7404 | { |
| 7405 | if (phba->sli4_hba.extents_in_use) { |
| 7406 | lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); |
| 7407 | lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); |
| 7408 | lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); |
| 7409 | lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); |
| 7410 | } else { |
| 7411 | kfree(objp: phba->vpi_bmask); |
| 7412 | phba->sli4_hba.max_cfg_param.vpi_used = 0; |
| 7413 | kfree(objp: phba->vpi_ids); |
| 7414 | bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); |
| 7415 | kfree(objp: phba->sli4_hba.xri_bmask); |
| 7416 | kfree(objp: phba->sli4_hba.xri_ids); |
| 7417 | kfree(objp: phba->sli4_hba.vfi_bmask); |
| 7418 | kfree(objp: phba->sli4_hba.vfi_ids); |
| 7419 | bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); |
| 7420 | bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); |
| 7421 | } |
| 7422 | |
| 7423 | return 0; |
| 7424 | } |
| 7425 | |
| 7426 | /** |
| 7427 | * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. |
| 7428 | * @phba: Pointer to HBA context object. |
| 7429 | * @type: The resource extent type. |
| 7430 | * @extnt_cnt: buffer to hold port extent count response |
| 7431 | * @extnt_size: buffer to hold port extent size response. |
| 7432 | * |
| 7433 | * This function calls the port to read the host allocated extents |
| 7434 | * for a particular type. |
| 7435 | **/ |
| 7436 | int |
| 7437 | lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, |
| 7438 | uint16_t *extnt_cnt, uint16_t *extnt_size) |
| 7439 | { |
| 7440 | bool emb; |
| 7441 | int rc = 0; |
| 7442 | uint16_t curr_blks = 0; |
| 7443 | uint32_t req_len, emb_len; |
| 7444 | uint32_t alloc_len, mbox_tmo; |
| 7445 | struct list_head *blk_list_head; |
| 7446 | struct lpfc_rsrc_blks *rsrc_blk; |
| 7447 | LPFC_MBOXQ_t *mbox; |
| 7448 | void *virtaddr = NULL; |
| 7449 | struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; |
| 7450 | struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; |
| 7451 | union lpfc_sli4_cfg_shdr *shdr; |
| 7452 | |
| 7453 | switch (type) { |
| 7454 | case LPFC_RSC_TYPE_FCOE_VPI: |
| 7455 | blk_list_head = &phba->lpfc_vpi_blk_list; |
| 7456 | break; |
| 7457 | case LPFC_RSC_TYPE_FCOE_XRI: |
| 7458 | blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; |
| 7459 | break; |
| 7460 | case LPFC_RSC_TYPE_FCOE_VFI: |
| 7461 | blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; |
| 7462 | break; |
| 7463 | case LPFC_RSC_TYPE_FCOE_RPI: |
| 7464 | blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; |
| 7465 | break; |
| 7466 | default: |
| 7467 | return -EIO; |
| 7468 | } |
| 7469 | |
| 7470 | /* Count the number of extents currently allocatd for this type. */ |
| 7471 | list_for_each_entry(rsrc_blk, blk_list_head, list) { |
| 7472 | if (curr_blks == 0) { |
| 7473 | /* |
| 7474 | * The GET_ALLOCATED mailbox does not return the size, |
| 7475 | * just the count. The size should be just the size |
| 7476 | * stored in the current allocated block and all sizes |
| 7477 | * for an extent type are the same so set the return |
| 7478 | * value now. |
| 7479 | */ |
| 7480 | *extnt_size = rsrc_blk->rsrc_size; |
| 7481 | } |
| 7482 | curr_blks++; |
| 7483 | } |
| 7484 | |
| 7485 | /* |
| 7486 | * Calculate the size of an embedded mailbox. The uint32_t |
| 7487 | * accounts for extents-specific word. |
| 7488 | */ |
| 7489 | emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - |
| 7490 | sizeof(uint32_t); |
| 7491 | |
| 7492 | /* |
| 7493 | * Presume the allocation and response will fit into an embedded |
| 7494 | * mailbox. If not true, reconfigure to a non-embedded mailbox. |
| 7495 | */ |
| 7496 | emb = LPFC_SLI4_MBX_EMBED; |
| 7497 | req_len = emb_len; |
| 7498 | if (req_len > emb_len) { |
| 7499 | req_len = curr_blks * sizeof(uint16_t) + |
| 7500 | sizeof(union lpfc_sli4_cfg_shdr) + |
| 7501 | sizeof(uint32_t); |
| 7502 | emb = LPFC_SLI4_MBX_NEMBED; |
| 7503 | } |
| 7504 | |
| 7505 | mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 7506 | if (!mbox) |
| 7507 | return -ENOMEM; |
| 7508 | memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); |
| 7509 | |
| 7510 | alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 7511 | LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, |
| 7512 | req_len, emb); |
| 7513 | if (alloc_len < req_len) { |
| 7514 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 7515 | "2983 Allocated DMA memory size (x%x) is " |
| 7516 | "less than the requested DMA memory " |
| 7517 | "size (x%x)\n" , alloc_len, req_len); |
| 7518 | rc = -ENOMEM; |
| 7519 | goto err_exit; |
| 7520 | } |
| 7521 | rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); |
| 7522 | if (unlikely(rc)) { |
| 7523 | rc = -EIO; |
| 7524 | goto err_exit; |
| 7525 | } |
| 7526 | |
| 7527 | if (!phba->sli4_hba.intr_enable) |
| 7528 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
| 7529 | else { |
| 7530 | mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); |
| 7531 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); |
| 7532 | } |
| 7533 | |
| 7534 | if (unlikely(rc)) { |
| 7535 | rc = -EIO; |
| 7536 | goto err_exit; |
| 7537 | } |
| 7538 | |
| 7539 | /* |
| 7540 | * Figure out where the response is located. Then get local pointers |
| 7541 | * to the response data. The port does not guarantee to respond to |
| 7542 | * all extents counts request so update the local variable with the |
| 7543 | * allocated count from the port. |
| 7544 | */ |
| 7545 | if (emb == LPFC_SLI4_MBX_EMBED) { |
| 7546 | rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; |
| 7547 | shdr = &rsrc_ext->header.cfg_shdr; |
| 7548 | *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); |
| 7549 | } else { |
| 7550 | virtaddr = mbox->sge_array->addr[0]; |
| 7551 | n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; |
| 7552 | shdr = &n_rsrc->cfg_shdr; |
| 7553 | *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); |
| 7554 | } |
| 7555 | |
| 7556 | if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { |
| 7557 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 7558 | "2984 Failed to read allocated resources " |
| 7559 | "for type %d - Status 0x%x Add'l Status 0x%x.\n" , |
| 7560 | type, |
| 7561 | bf_get(lpfc_mbox_hdr_status, &shdr->response), |
| 7562 | bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); |
| 7563 | rc = -EIO; |
| 7564 | goto err_exit; |
| 7565 | } |
| 7566 | err_exit: |
| 7567 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
| 7568 | return rc; |
| 7569 | } |
| 7570 | |
| 7571 | /** |
| 7572 | * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block |
| 7573 | * @phba: pointer to lpfc hba data structure. |
| 7574 | * @sgl_list: linked link of sgl buffers to post |
| 7575 | * @cnt: number of linked list buffers |
| 7576 | * |
| 7577 | * This routine walks the list of buffers that have been allocated and |
| 7578 | * repost them to the port by using SGL block post. This is needed after a |
| 7579 | * pci_function_reset/warm_start or start. It attempts to construct blocks |
| 7580 | * of buffer sgls which contains contiguous xris and uses the non-embedded |
| 7581 | * SGL block post mailbox commands to post them to the port. For single |
| 7582 | * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post |
| 7583 | * mailbox command for posting. |
| 7584 | * |
| 7585 | * Returns: 0 = success, non-zero failure. |
| 7586 | **/ |
| 7587 | static int |
| 7588 | lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, |
| 7589 | struct list_head *sgl_list, int cnt) |
| 7590 | { |
| 7591 | struct lpfc_sglq *sglq_entry = NULL; |
| 7592 | struct lpfc_sglq *sglq_entry_next = NULL; |
| 7593 | struct lpfc_sglq *sglq_entry_first = NULL; |
| 7594 | int status = 0, total_cnt; |
| 7595 | int post_cnt = 0, num_posted = 0, block_cnt = 0; |
| 7596 | int last_xritag = NO_XRI; |
| 7597 | LIST_HEAD(prep_sgl_list); |
| 7598 | LIST_HEAD(blck_sgl_list); |
| 7599 | LIST_HEAD(allc_sgl_list); |
| 7600 | LIST_HEAD(post_sgl_list); |
| 7601 | LIST_HEAD(free_sgl_list); |
| 7602 | |
| 7603 | spin_lock_irq(lock: &phba->hbalock); |
| 7604 | spin_lock(lock: &phba->sli4_hba.sgl_list_lock); |
| 7605 | list_splice_init(list: sgl_list, head: &allc_sgl_list); |
| 7606 | spin_unlock(lock: &phba->sli4_hba.sgl_list_lock); |
| 7607 | spin_unlock_irq(lock: &phba->hbalock); |
| 7608 | |
| 7609 | total_cnt = cnt; |
| 7610 | list_for_each_entry_safe(sglq_entry, sglq_entry_next, |
| 7611 | &allc_sgl_list, list) { |
| 7612 | list_del_init(entry: &sglq_entry->list); |
| 7613 | block_cnt++; |
| 7614 | if ((last_xritag != NO_XRI) && |
| 7615 | (sglq_entry->sli4_xritag != last_xritag + 1)) { |
| 7616 | /* a hole in xri block, form a sgl posting block */ |
| 7617 | list_splice_init(list: &prep_sgl_list, head: &blck_sgl_list); |
| 7618 | post_cnt = block_cnt - 1; |
| 7619 | /* prepare list for next posting block */ |
| 7620 | list_add_tail(new: &sglq_entry->list, head: &prep_sgl_list); |
| 7621 | block_cnt = 1; |
| 7622 | } else { |
| 7623 | /* prepare list for next posting block */ |
| 7624 | list_add_tail(new: &sglq_entry->list, head: &prep_sgl_list); |
| 7625 | /* enough sgls for non-embed sgl mbox command */ |
| 7626 | if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { |
| 7627 | list_splice_init(list: &prep_sgl_list, |
| 7628 | head: &blck_sgl_list); |
| 7629 | post_cnt = block_cnt; |
| 7630 | block_cnt = 0; |
| 7631 | } |
| 7632 | } |
| 7633 | num_posted++; |
| 7634 | |
| 7635 | /* keep track of last sgl's xritag */ |
| 7636 | last_xritag = sglq_entry->sli4_xritag; |
| 7637 | |
| 7638 | /* end of repost sgl list condition for buffers */ |
| 7639 | if (num_posted == total_cnt) { |
| 7640 | if (post_cnt == 0) { |
| 7641 | list_splice_init(list: &prep_sgl_list, |
| 7642 | head: &blck_sgl_list); |
| 7643 | post_cnt = block_cnt; |
| 7644 | } else if (block_cnt == 1) { |
| 7645 | status = lpfc_sli4_post_sgl(phba, |
| 7646 | sglq_entry->phys, 0, |
| 7647 | sglq_entry->sli4_xritag); |
| 7648 | if (!status) { |
| 7649 | /* successful, put sgl to posted list */ |
| 7650 | list_add_tail(new: &sglq_entry->list, |
| 7651 | head: &post_sgl_list); |
| 7652 | } else { |
| 7653 | /* Failure, put sgl to free list */ |
| 7654 | lpfc_printf_log(phba, KERN_WARNING, |
| 7655 | LOG_SLI, |
| 7656 | "3159 Failed to post " |
| 7657 | "sgl, xritag:x%x\n" , |
| 7658 | sglq_entry->sli4_xritag); |
| 7659 | list_add_tail(new: &sglq_entry->list, |
| 7660 | head: &free_sgl_list); |
| 7661 | total_cnt--; |
| 7662 | } |
| 7663 | } |
| 7664 | } |
| 7665 | |
| 7666 | /* continue until a nembed page worth of sgls */ |
| 7667 | if (post_cnt == 0) |
| 7668 | continue; |
| 7669 | |
| 7670 | /* post the buffer list sgls as a block */ |
| 7671 | status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, |
| 7672 | post_cnt); |
| 7673 | |
| 7674 | if (!status) { |
| 7675 | /* success, put sgl list to posted sgl list */ |
| 7676 | list_splice_init(list: &blck_sgl_list, head: &post_sgl_list); |
| 7677 | } else { |
| 7678 | /* Failure, put sgl list to free sgl list */ |
| 7679 | sglq_entry_first = list_first_entry(&blck_sgl_list, |
| 7680 | struct lpfc_sglq, |
| 7681 | list); |
| 7682 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 7683 | "3160 Failed to post sgl-list, " |
| 7684 | "xritag:x%x-x%x\n" , |
| 7685 | sglq_entry_first->sli4_xritag, |
| 7686 | (sglq_entry_first->sli4_xritag + |
| 7687 | post_cnt - 1)); |
| 7688 | list_splice_init(list: &blck_sgl_list, head: &free_sgl_list); |
| 7689 | total_cnt -= post_cnt; |
| 7690 | } |
| 7691 | |
| 7692 | /* don't reset xirtag due to hole in xri block */ |
| 7693 | if (block_cnt == 0) |
| 7694 | last_xritag = NO_XRI; |
| 7695 | |
| 7696 | /* reset sgl post count for next round of posting */ |
| 7697 | post_cnt = 0; |
| 7698 | } |
| 7699 | |
| 7700 | /* free the sgls failed to post */ |
| 7701 | lpfc_free_sgl_list(phba, &free_sgl_list); |
| 7702 | |
| 7703 | /* push sgls posted to the available list */ |
| 7704 | if (!list_empty(head: &post_sgl_list)) { |
| 7705 | spin_lock_irq(lock: &phba->hbalock); |
| 7706 | spin_lock(lock: &phba->sli4_hba.sgl_list_lock); |
| 7707 | list_splice_init(list: &post_sgl_list, head: sgl_list); |
| 7708 | spin_unlock(lock: &phba->sli4_hba.sgl_list_lock); |
| 7709 | spin_unlock_irq(lock: &phba->hbalock); |
| 7710 | } else { |
| 7711 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 7712 | "3161 Failure to post sgl to port,status %x " |
| 7713 | "blkcnt %d totalcnt %d postcnt %d\n" , |
| 7714 | status, block_cnt, total_cnt, post_cnt); |
| 7715 | return -EIO; |
| 7716 | } |
| 7717 | |
| 7718 | /* return the number of XRIs actually posted */ |
| 7719 | return total_cnt; |
| 7720 | } |
| 7721 | |
| 7722 | /** |
| 7723 | * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls |
| 7724 | * @phba: pointer to lpfc hba data structure. |
| 7725 | * |
| 7726 | * This routine walks the list of nvme buffers that have been allocated and |
| 7727 | * repost them to the port by using SGL block post. This is needed after a |
| 7728 | * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine |
| 7729 | * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list |
| 7730 | * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. |
| 7731 | * |
| 7732 | * Returns: 0 = success, non-zero failure. |
| 7733 | **/ |
| 7734 | static int |
| 7735 | lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) |
| 7736 | { |
| 7737 | LIST_HEAD(post_nblist); |
| 7738 | int num_posted, rc = 0; |
| 7739 | |
| 7740 | /* get all NVME buffers need to repost to a local list */ |
| 7741 | lpfc_io_buf_flush(phba, sglist: &post_nblist); |
| 7742 | |
| 7743 | /* post the list of nvme buffer sgls to port if available */ |
| 7744 | if (!list_empty(head: &post_nblist)) { |
| 7745 | num_posted = lpfc_sli4_post_io_sgl_list( |
| 7746 | phba, blist: &post_nblist, xricnt: phba->sli4_hba.io_xri_cnt); |
| 7747 | /* failed to post any nvme buffer, return error */ |
| 7748 | if (num_posted == 0) |
| 7749 | rc = -EIO; |
| 7750 | } |
| 7751 | return rc; |
| 7752 | } |
| 7753 | |
| 7754 | static void |
| 7755 | lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) |
| 7756 | { |
| 7757 | uint32_t len; |
| 7758 | |
| 7759 | len = sizeof(struct lpfc_mbx_set_host_data) - |
| 7760 | sizeof(struct lpfc_sli4_cfg_mhdr); |
| 7761 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 7762 | LPFC_MBOX_OPCODE_SET_HOST_DATA, len, |
| 7763 | LPFC_SLI4_MBX_EMBED); |
| 7764 | |
| 7765 | mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; |
| 7766 | mbox->u.mqe.un.set_host_data.param_len = |
| 7767 | LPFC_HOST_OS_DRIVER_VERSION_SIZE; |
| 7768 | snprintf(buf: mbox->u.mqe.un.set_host_data.un.data, |
| 7769 | LPFC_HOST_OS_DRIVER_VERSION_SIZE, |
| 7770 | fmt: "Linux %s v" LPFC_DRIVER_VERSION, |
| 7771 | test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? "FCoE" : "FC" ); |
| 7772 | } |
| 7773 | |
| 7774 | int |
| 7775 | lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, |
| 7776 | struct lpfc_queue *drq, int count, int idx) |
| 7777 | { |
| 7778 | int rc, i; |
| 7779 | struct lpfc_rqe hrqe; |
| 7780 | struct lpfc_rqe drqe; |
| 7781 | struct lpfc_rqb *rqbp; |
| 7782 | unsigned long flags; |
| 7783 | struct rqb_dmabuf *rqb_buffer; |
| 7784 | LIST_HEAD(rqb_buf_list); |
| 7785 | |
| 7786 | rqbp = hrq->rqbp; |
| 7787 | for (i = 0; i < count; i++) { |
| 7788 | spin_lock_irqsave(&phba->hbalock, flags); |
| 7789 | /* IF RQ is already full, don't bother */ |
| 7790 | if (rqbp->buffer_count + i >= rqbp->entry_count - 1) { |
| 7791 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
| 7792 | break; |
| 7793 | } |
| 7794 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
| 7795 | |
| 7796 | rqb_buffer = rqbp->rqb_alloc_buffer(phba); |
| 7797 | if (!rqb_buffer) |
| 7798 | break; |
| 7799 | rqb_buffer->hrq = hrq; |
| 7800 | rqb_buffer->drq = drq; |
| 7801 | rqb_buffer->idx = idx; |
| 7802 | list_add_tail(new: &rqb_buffer->hbuf.list, head: &rqb_buf_list); |
| 7803 | } |
| 7804 | |
| 7805 | spin_lock_irqsave(&phba->hbalock, flags); |
| 7806 | while (!list_empty(head: &rqb_buf_list)) { |
| 7807 | list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, |
| 7808 | hbuf.list); |
| 7809 | |
| 7810 | hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); |
| 7811 | hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); |
| 7812 | drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); |
| 7813 | drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); |
| 7814 | rc = lpfc_sli4_rq_put(hq: hrq, dq: drq, hrqe: &hrqe, drqe: &drqe); |
| 7815 | if (rc < 0) { |
| 7816 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 7817 | "6421 Cannot post to HRQ %d: %x %x %x " |
| 7818 | "DRQ %x %x\n" , |
| 7819 | hrq->queue_id, |
| 7820 | hrq->host_index, |
| 7821 | hrq->hba_index, |
| 7822 | hrq->entry_count, |
| 7823 | drq->host_index, |
| 7824 | drq->hba_index); |
| 7825 | rqbp->rqb_free_buffer(phba, rqb_buffer); |
| 7826 | } else { |
| 7827 | list_add_tail(new: &rqb_buffer->hbuf.list, |
| 7828 | head: &rqbp->rqb_buffer_list); |
| 7829 | rqbp->buffer_count++; |
| 7830 | } |
| 7831 | } |
| 7832 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
| 7833 | return 1; |
| 7834 | } |
| 7835 | |
| 7836 | static void |
| 7837 | lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
| 7838 | { |
| 7839 | union lpfc_sli4_cfg_shdr *shdr; |
| 7840 | u32 shdr_status, shdr_add_status; |
| 7841 | |
| 7842 | shdr = (union lpfc_sli4_cfg_shdr *) |
| 7843 | &pmb->u.mqe.un.sli4_config.header.cfg_shdr; |
| 7844 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 7845 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 7846 | if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) { |
| 7847 | lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX, |
| 7848 | "4622 SET_FEATURE (x%x) mbox failed, " |
| 7849 | "status x%x add_status x%x, mbx status x%x\n" , |
| 7850 | LPFC_SET_LD_SIGNAL, shdr_status, |
| 7851 | shdr_add_status, pmb->u.mb.mbxStatus); |
| 7852 | phba->degrade_activate_threshold = 0; |
| 7853 | phba->degrade_deactivate_threshold = 0; |
| 7854 | phba->fec_degrade_interval = 0; |
| 7855 | goto out; |
| 7856 | } |
| 7857 | |
| 7858 | phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7; |
| 7859 | phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8; |
| 7860 | phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10; |
| 7861 | |
| 7862 | lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT, |
| 7863 | "4624 Success: da x%x dd x%x interval x%x\n" , |
| 7864 | phba->degrade_activate_threshold, |
| 7865 | phba->degrade_deactivate_threshold, |
| 7866 | phba->fec_degrade_interval); |
| 7867 | out: |
| 7868 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
| 7869 | } |
| 7870 | |
| 7871 | int |
| 7872 | lpfc_read_lds_params(struct lpfc_hba *phba) |
| 7873 | { |
| 7874 | LPFC_MBOXQ_t *mboxq; |
| 7875 | int rc; |
| 7876 | |
| 7877 | mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 7878 | if (!mboxq) |
| 7879 | return -ENOMEM; |
| 7880 | |
| 7881 | lpfc_set_features(phba, mbox: mboxq, LPFC_SET_LD_SIGNAL); |
| 7882 | mboxq->vport = phba->pport; |
| 7883 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params; |
| 7884 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); |
| 7885 | if (rc == MBX_NOT_FINISHED) { |
| 7886 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
| 7887 | return -EIO; |
| 7888 | } |
| 7889 | return 0; |
| 7890 | } |
| 7891 | |
| 7892 | static void |
| 7893 | lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
| 7894 | { |
| 7895 | struct lpfc_vport *vport = pmb->vport; |
| 7896 | union lpfc_sli4_cfg_shdr *shdr; |
| 7897 | u32 shdr_status, shdr_add_status; |
| 7898 | u32 sig, acqe; |
| 7899 | |
| 7900 | /* Two outcomes. (1) Set featurs was successul and EDC negotiation |
| 7901 | * is done. (2) Mailbox failed and send FPIN support only. |
| 7902 | */ |
| 7903 | shdr = (union lpfc_sli4_cfg_shdr *) |
| 7904 | &pmb->u.mqe.un.sli4_config.header.cfg_shdr; |
| 7905 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 7906 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 7907 | if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) { |
| 7908 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, |
| 7909 | "2516 CGN SET_FEATURE mbox failed with " |
| 7910 | "status x%x add_status x%x, mbx status x%x " |
| 7911 | "Reset Congestion to FPINs only\n" , |
| 7912 | shdr_status, shdr_add_status, |
| 7913 | pmb->u.mb.mbxStatus); |
| 7914 | /* If there is a mbox error, move on to RDF */ |
| 7915 | phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; |
| 7916 | phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; |
| 7917 | goto out; |
| 7918 | } |
| 7919 | |
| 7920 | /* Zero out Congestion Signal ACQE counter */ |
| 7921 | phba->cgn_acqe_cnt = 0; |
| 7922 | |
| 7923 | acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq, |
| 7924 | &pmb->u.mqe.un.set_feature); |
| 7925 | sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq, |
| 7926 | &pmb->u.mqe.un.set_feature); |
| 7927 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 7928 | "4620 SET_FEATURES Success: Freq: %ds %dms " |
| 7929 | " Reg: x%x x%x\n" , acqe, sig, |
| 7930 | phba->cgn_reg_signal, phba->cgn_reg_fpin); |
| 7931 | out: |
| 7932 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
| 7933 | |
| 7934 | /* Register for FPIN events from the fabric now that the |
| 7935 | * EDC common_set_features has completed. |
| 7936 | */ |
| 7937 | lpfc_issue_els_rdf(vport, retry: 0); |
| 7938 | } |
| 7939 | |
| 7940 | int |
| 7941 | lpfc_config_cgn_signal(struct lpfc_hba *phba) |
| 7942 | { |
| 7943 | LPFC_MBOXQ_t *mboxq; |
| 7944 | u32 rc; |
| 7945 | |
| 7946 | mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 7947 | if (!mboxq) |
| 7948 | goto out_rdf; |
| 7949 | |
| 7950 | lpfc_set_features(phba, mbox: mboxq, LPFC_SET_CGN_SIGNAL); |
| 7951 | mboxq->vport = phba->pport; |
| 7952 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs; |
| 7953 | |
| 7954 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 7955 | "4621 SET_FEATURES: FREQ sig x%x acqe x%x: " |
| 7956 | "Reg: x%x x%x\n" , |
| 7957 | phba->cgn_sig_freq, lpfc_acqe_cgn_frequency, |
| 7958 | phba->cgn_reg_signal, phba->cgn_reg_fpin); |
| 7959 | |
| 7960 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); |
| 7961 | if (rc == MBX_NOT_FINISHED) |
| 7962 | goto out; |
| 7963 | return 0; |
| 7964 | |
| 7965 | out: |
| 7966 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
| 7967 | out_rdf: |
| 7968 | /* If there is a mbox error, move on to RDF */ |
| 7969 | phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; |
| 7970 | phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; |
| 7971 | lpfc_issue_els_rdf(vport: phba->pport, retry: 0); |
| 7972 | return -EIO; |
| 7973 | } |
| 7974 | |
| 7975 | /** |
| 7976 | * lpfc_init_idle_stat_hb - Initialize idle_stat tracking |
| 7977 | * @phba: pointer to lpfc hba data structure. |
| 7978 | * |
| 7979 | * This routine initializes the per-eq idle_stat to dynamically dictate |
| 7980 | * polling decisions. |
| 7981 | * |
| 7982 | * Return codes: |
| 7983 | * None |
| 7984 | **/ |
| 7985 | static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) |
| 7986 | { |
| 7987 | int i; |
| 7988 | struct lpfc_sli4_hdw_queue *hdwq; |
| 7989 | struct lpfc_queue *eq; |
| 7990 | struct lpfc_idle_stat *idle_stat; |
| 7991 | u64 wall; |
| 7992 | |
| 7993 | for_each_present_cpu(i) { |
| 7994 | hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; |
| 7995 | eq = hdwq->hba_eq; |
| 7996 | |
| 7997 | /* Skip if we've already handled this eq's primary CPU */ |
| 7998 | if (eq->chann != i) |
| 7999 | continue; |
| 8000 | |
| 8001 | idle_stat = &phba->sli4_hba.idle_stat[i]; |
| 8002 | |
| 8003 | idle_stat->prev_idle = get_cpu_idle_time(cpu: i, wall: &wall, io_busy: 1); |
| 8004 | idle_stat->prev_wall = wall; |
| 8005 | |
| 8006 | if (phba->nvmet_support || |
| 8007 | phba->cmf_active_mode != LPFC_CFG_OFF || |
| 8008 | phba->intr_type != MSIX) |
| 8009 | eq->poll_mode = LPFC_QUEUE_WORK; |
| 8010 | else |
| 8011 | eq->poll_mode = LPFC_THREADED_IRQ; |
| 8012 | } |
| 8013 | |
| 8014 | if (!phba->nvmet_support && phba->intr_type == MSIX) |
| 8015 | schedule_delayed_work(dwork: &phba->idle_stat_delay_work, |
| 8016 | delay: msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); |
| 8017 | } |
| 8018 | |
| 8019 | static void lpfc_sli4_dip(struct lpfc_hba *phba) |
| 8020 | { |
| 8021 | uint32_t if_type; |
| 8022 | |
| 8023 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
| 8024 | if (if_type == LPFC_SLI_INTF_IF_TYPE_2 || |
| 8025 | if_type == LPFC_SLI_INTF_IF_TYPE_6) { |
| 8026 | struct lpfc_register reg_data; |
| 8027 | |
| 8028 | if (lpfc_readl(addr: phba->sli4_hba.u.if_type2.STATUSregaddr, |
| 8029 | data: ®_data.word0)) |
| 8030 | return; |
| 8031 | |
| 8032 | if (bf_get(lpfc_sliport_status_dip, ®_data)) |
| 8033 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 8034 | "2904 Firmware Dump Image Present" |
| 8035 | " on Adapter" ); |
| 8036 | } |
| 8037 | } |
| 8038 | |
| 8039 | /** |
| 8040 | * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor |
| 8041 | * @rx_monitor: Pointer to lpfc_rx_info_monitor object |
| 8042 | * @entries: Number of rx_info_entry objects to allocate in ring |
| 8043 | * |
| 8044 | * Return: |
| 8045 | * 0 - Success |
| 8046 | * ENOMEM - Failure to kmalloc |
| 8047 | **/ |
| 8048 | int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor, |
| 8049 | u32 entries) |
| 8050 | { |
| 8051 | rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry), |
| 8052 | GFP_KERNEL); |
| 8053 | if (!rx_monitor->ring) |
| 8054 | return -ENOMEM; |
| 8055 | |
| 8056 | rx_monitor->head_idx = 0; |
| 8057 | rx_monitor->tail_idx = 0; |
| 8058 | spin_lock_init(&rx_monitor->lock); |
| 8059 | rx_monitor->entries = entries; |
| 8060 | |
| 8061 | return 0; |
| 8062 | } |
| 8063 | |
| 8064 | /** |
| 8065 | * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor |
| 8066 | * @rx_monitor: Pointer to lpfc_rx_info_monitor object |
| 8067 | * |
| 8068 | * Called after cancellation of cmf_timer. |
| 8069 | **/ |
| 8070 | void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor) |
| 8071 | { |
| 8072 | kfree(objp: rx_monitor->ring); |
| 8073 | rx_monitor->ring = NULL; |
| 8074 | rx_monitor->entries = 0; |
| 8075 | rx_monitor->head_idx = 0; |
| 8076 | rx_monitor->tail_idx = 0; |
| 8077 | } |
| 8078 | |
| 8079 | /** |
| 8080 | * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring |
| 8081 | * @rx_monitor: Pointer to lpfc_rx_info_monitor object |
| 8082 | * @entry: Pointer to rx_info_entry |
| 8083 | * |
| 8084 | * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a |
| 8085 | * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr. |
| 8086 | * |
| 8087 | * This is called from lpfc_cmf_timer, which is in timer/softirq context. |
| 8088 | * |
| 8089 | * In cases of old data overflow, we do a best effort of FIFO order. |
| 8090 | **/ |
| 8091 | void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor, |
| 8092 | struct rx_info_entry *entry) |
| 8093 | { |
| 8094 | struct rx_info_entry *ring = rx_monitor->ring; |
| 8095 | u32 *head_idx = &rx_monitor->head_idx; |
| 8096 | u32 *tail_idx = &rx_monitor->tail_idx; |
| 8097 | spinlock_t *ring_lock = &rx_monitor->lock; |
| 8098 | u32 ring_size = rx_monitor->entries; |
| 8099 | |
| 8100 | spin_lock(lock: ring_lock); |
| 8101 | memcpy(&ring[*tail_idx], entry, sizeof(*entry)); |
| 8102 | *tail_idx = (*tail_idx + 1) % ring_size; |
| 8103 | |
| 8104 | /* Best effort of FIFO saved data */ |
| 8105 | if (*tail_idx == *head_idx) |
| 8106 | *head_idx = (*head_idx + 1) % ring_size; |
| 8107 | |
| 8108 | spin_unlock(lock: ring_lock); |
| 8109 | } |
| 8110 | |
| 8111 | /** |
| 8112 | * lpfc_rx_monitor_report - Read out rx_monitor's ring |
| 8113 | * @phba: Pointer to lpfc_hba object |
| 8114 | * @rx_monitor: Pointer to lpfc_rx_info_monitor object |
| 8115 | * @buf: Pointer to char buffer that will contain rx monitor info data |
| 8116 | * @buf_len: Length buf including null char |
| 8117 | * @max_read_entries: Maximum number of entries to read out of ring |
| 8118 | * |
| 8119 | * Used to dump/read what's in rx_monitor's ring buffer. |
| 8120 | * |
| 8121 | * If buf is NULL || buf_len == 0, then it is implied that we want to log the |
| 8122 | * information to kmsg instead of filling out buf. |
| 8123 | * |
| 8124 | * Return: |
| 8125 | * Number of entries read out of the ring |
| 8126 | **/ |
| 8127 | u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, |
| 8128 | struct lpfc_rx_info_monitor *rx_monitor, char *buf, |
| 8129 | u32 buf_len, u32 max_read_entries) |
| 8130 | { |
| 8131 | struct rx_info_entry *ring = rx_monitor->ring; |
| 8132 | struct rx_info_entry *entry; |
| 8133 | u32 *head_idx = &rx_monitor->head_idx; |
| 8134 | u32 *tail_idx = &rx_monitor->tail_idx; |
| 8135 | spinlock_t *ring_lock = &rx_monitor->lock; |
| 8136 | u32 ring_size = rx_monitor->entries; |
| 8137 | u32 cnt = 0; |
| 8138 | char tmp[DBG_LOG_STR_SZ] = {0}; |
| 8139 | bool log_to_kmsg = (!buf || !buf_len) ? true : false; |
| 8140 | |
| 8141 | if (!log_to_kmsg) { |
| 8142 | /* clear the buffer to be sure */ |
| 8143 | memset(buf, 0, buf_len); |
| 8144 | |
| 8145 | scnprintf(buf, size: buf_len, fmt: "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s" |
| 8146 | "%-8s%-8s%-8s%-16s\n" , |
| 8147 | "MaxBPI" , "Tot_Data_CMF" , |
| 8148 | "Tot_Data_Cmd" , "Tot_Data_Cmpl" , |
| 8149 | "Lat(us)" , "Avg_IO" , "Max_IO" , "Bsy" , |
| 8150 | "IO_cnt" , "Info" , "BWutil(ms)" ); |
| 8151 | } |
| 8152 | |
| 8153 | /* Needs to be _irq because record is called from timer interrupt |
| 8154 | * context |
| 8155 | */ |
| 8156 | spin_lock_irq(lock: ring_lock); |
| 8157 | while (*head_idx != *tail_idx) { |
| 8158 | entry = &ring[*head_idx]; |
| 8159 | |
| 8160 | /* Read out this entry's data. */ |
| 8161 | if (!log_to_kmsg) { |
| 8162 | /* If !log_to_kmsg, then store to buf. */ |
| 8163 | scnprintf(buf: tmp, size: sizeof(tmp), |
| 8164 | fmt: "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu" |
| 8165 | "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n" , |
| 8166 | *head_idx, entry->max_bytes_per_interval, |
| 8167 | entry->cmf_bytes, entry->total_bytes, |
| 8168 | entry->rcv_bytes, entry->avg_io_latency, |
| 8169 | entry->avg_io_size, entry->max_read_cnt, |
| 8170 | entry->cmf_busy, entry->io_cnt, |
| 8171 | entry->cmf_info, entry->timer_utilization, |
| 8172 | entry->timer_interval); |
| 8173 | |
| 8174 | /* Check for buffer overflow */ |
| 8175 | if ((strlen(buf) + strlen(tmp)) >= buf_len) |
| 8176 | break; |
| 8177 | |
| 8178 | /* Append entry's data to buffer */ |
| 8179 | strlcat(p: buf, q: tmp, avail: buf_len); |
| 8180 | } else { |
| 8181 | lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, |
| 8182 | "4410 %02u: MBPI %llu Xmit %llu " |
| 8183 | "Cmpl %llu Lat %llu ASz %llu Info %02u " |
| 8184 | "BWUtil %u Int %u slot %u\n" , |
| 8185 | cnt, entry->max_bytes_per_interval, |
| 8186 | entry->total_bytes, entry->rcv_bytes, |
| 8187 | entry->avg_io_latency, |
| 8188 | entry->avg_io_size, entry->cmf_info, |
| 8189 | entry->timer_utilization, |
| 8190 | entry->timer_interval, *head_idx); |
| 8191 | } |
| 8192 | |
| 8193 | *head_idx = (*head_idx + 1) % ring_size; |
| 8194 | |
| 8195 | /* Don't feed more than max_read_entries */ |
| 8196 | cnt++; |
| 8197 | if (cnt >= max_read_entries) |
| 8198 | break; |
| 8199 | } |
| 8200 | spin_unlock_irq(lock: ring_lock); |
| 8201 | |
| 8202 | return cnt; |
| 8203 | } |
| 8204 | |
| 8205 | /** |
| 8206 | * lpfc_cmf_setup - Initialize idle_stat tracking |
| 8207 | * @phba: Pointer to HBA context object. |
| 8208 | * |
| 8209 | * This is called from HBA setup during driver load or when the HBA |
| 8210 | * comes online. this does all the initialization to support CMF and MI. |
| 8211 | **/ |
| 8212 | static int |
| 8213 | lpfc_cmf_setup(struct lpfc_hba *phba) |
| 8214 | { |
| 8215 | LPFC_MBOXQ_t *mboxq; |
| 8216 | struct lpfc_dmabuf *mp; |
| 8217 | struct lpfc_pc_sli4_params *sli4_params; |
| 8218 | int rc, cmf, mi_ver; |
| 8219 | |
| 8220 | rc = lpfc_sli4_refresh_params(phba); |
| 8221 | if (unlikely(rc)) |
| 8222 | return rc; |
| 8223 | |
| 8224 | mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 8225 | if (!mboxq) |
| 8226 | return -ENOMEM; |
| 8227 | |
| 8228 | sli4_params = &phba->sli4_hba.pc_sli4_params; |
| 8229 | |
| 8230 | /* Always try to enable MI feature if we can */ |
| 8231 | if (sli4_params->mi_ver) { |
| 8232 | lpfc_set_features(phba, mbox: mboxq, LPFC_SET_ENABLE_MI); |
| 8233 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 8234 | mi_ver = bf_get(lpfc_mbx_set_feature_mi, |
| 8235 | &mboxq->u.mqe.un.set_feature); |
| 8236 | |
| 8237 | if (rc == MBX_SUCCESS) { |
| 8238 | if (mi_ver) { |
| 8239 | lpfc_printf_log(phba, |
| 8240 | KERN_WARNING, LOG_CGN_MGMT, |
| 8241 | "6215 MI is enabled\n" ); |
| 8242 | sli4_params->mi_ver = mi_ver; |
| 8243 | } else { |
| 8244 | lpfc_printf_log(phba, |
| 8245 | KERN_WARNING, LOG_CGN_MGMT, |
| 8246 | "6338 MI is disabled\n" ); |
| 8247 | sli4_params->mi_ver = 0; |
| 8248 | } |
| 8249 | } else { |
| 8250 | /* mi_ver is already set from GET_SLI4_PARAMETERS */ |
| 8251 | lpfc_printf_log(phba, KERN_INFO, |
| 8252 | LOG_CGN_MGMT | LOG_INIT, |
| 8253 | "6245 Enable MI Mailbox x%x (x%x/x%x) " |
| 8254 | "failed, rc:x%x mi:x%x\n" , |
| 8255 | bf_get(lpfc_mqe_command, &mboxq->u.mqe), |
| 8256 | lpfc_sli_config_mbox_subsys_get |
| 8257 | (phba, mboxq), |
| 8258 | lpfc_sli_config_mbox_opcode_get |
| 8259 | (phba, mboxq), |
| 8260 | rc, sli4_params->mi_ver); |
| 8261 | } |
| 8262 | } else { |
| 8263 | lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, |
| 8264 | "6217 MI is disabled\n" ); |
| 8265 | } |
| 8266 | |
| 8267 | /* Ensure FDMI is enabled for MI if enable_mi is set */ |
| 8268 | if (sli4_params->mi_ver) |
| 8269 | phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; |
| 8270 | |
| 8271 | /* Always try to enable CMF feature if we can */ |
| 8272 | if (sli4_params->cmf) { |
| 8273 | lpfc_set_features(phba, mbox: mboxq, LPFC_SET_ENABLE_CMF); |
| 8274 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 8275 | cmf = bf_get(lpfc_mbx_set_feature_cmf, |
| 8276 | &mboxq->u.mqe.un.set_feature); |
| 8277 | if (rc == MBX_SUCCESS && cmf) { |
| 8278 | lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, |
| 8279 | "6218 CMF is enabled: mode %d\n" , |
| 8280 | phba->cmf_active_mode); |
| 8281 | } else { |
| 8282 | lpfc_printf_log(phba, KERN_WARNING, |
| 8283 | LOG_CGN_MGMT | LOG_INIT, |
| 8284 | "6219 Enable CMF Mailbox x%x (x%x/x%x) " |
| 8285 | "failed, rc:x%x dd:x%x\n" , |
| 8286 | bf_get(lpfc_mqe_command, &mboxq->u.mqe), |
| 8287 | lpfc_sli_config_mbox_subsys_get |
| 8288 | (phba, mboxq), |
| 8289 | lpfc_sli_config_mbox_opcode_get |
| 8290 | (phba, mboxq), |
| 8291 | rc, cmf); |
| 8292 | sli4_params->cmf = 0; |
| 8293 | phba->cmf_active_mode = LPFC_CFG_OFF; |
| 8294 | goto no_cmf; |
| 8295 | } |
| 8296 | |
| 8297 | /* Allocate Congestion Information Buffer */ |
| 8298 | if (!phba->cgn_i) { |
| 8299 | mp = kmalloc(sizeof(*mp), GFP_KERNEL); |
| 8300 | if (mp) |
| 8301 | mp->virt = dma_alloc_coherent |
| 8302 | (dev: &phba->pcidev->dev, |
| 8303 | size: sizeof(struct lpfc_cgn_info), |
| 8304 | dma_handle: &mp->phys, GFP_KERNEL); |
| 8305 | if (!mp || !mp->virt) { |
| 8306 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 8307 | "2640 Failed to alloc memory " |
| 8308 | "for Congestion Info\n" ); |
| 8309 | kfree(objp: mp); |
| 8310 | sli4_params->cmf = 0; |
| 8311 | phba->cmf_active_mode = LPFC_CFG_OFF; |
| 8312 | goto no_cmf; |
| 8313 | } |
| 8314 | phba->cgn_i = mp; |
| 8315 | |
| 8316 | /* initialize congestion buffer info */ |
| 8317 | lpfc_init_congestion_buf(phba); |
| 8318 | lpfc_init_congestion_stat(phba); |
| 8319 | |
| 8320 | /* Zero out Congestion Signal counters */ |
| 8321 | atomic64_set(v: &phba->cgn_acqe_stat.alarm, i: 0); |
| 8322 | atomic64_set(v: &phba->cgn_acqe_stat.warn, i: 0); |
| 8323 | } |
| 8324 | |
| 8325 | rc = lpfc_sli4_cgn_params_read(phba); |
| 8326 | if (rc < 0) { |
| 8327 | lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, |
| 8328 | "6242 Error reading Cgn Params (%d)\n" , |
| 8329 | rc); |
| 8330 | /* Ensure CGN Mode is off */ |
| 8331 | sli4_params->cmf = 0; |
| 8332 | } else if (!rc) { |
| 8333 | lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, |
| 8334 | "6243 CGN Event empty object.\n" ); |
| 8335 | /* Ensure CGN Mode is off */ |
| 8336 | sli4_params->cmf = 0; |
| 8337 | } |
| 8338 | } else { |
| 8339 | no_cmf: |
| 8340 | lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, |
| 8341 | "6220 CMF is disabled\n" ); |
| 8342 | } |
| 8343 | |
| 8344 | /* Only register congestion buffer with firmware if BOTH |
| 8345 | * CMF and E2E are enabled. |
| 8346 | */ |
| 8347 | if (sli4_params->cmf && sli4_params->mi_ver) { |
| 8348 | rc = lpfc_reg_congestion_buf(phba); |
| 8349 | if (rc) { |
| 8350 | dma_free_coherent(dev: &phba->pcidev->dev, |
| 8351 | size: sizeof(struct lpfc_cgn_info), |
| 8352 | cpu_addr: phba->cgn_i->virt, dma_handle: phba->cgn_i->phys); |
| 8353 | kfree(objp: phba->cgn_i); |
| 8354 | phba->cgn_i = NULL; |
| 8355 | /* Ensure CGN Mode is off */ |
| 8356 | phba->cmf_active_mode = LPFC_CFG_OFF; |
| 8357 | sli4_params->cmf = 0; |
| 8358 | return 0; |
| 8359 | } |
| 8360 | } |
| 8361 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 8362 | "6470 Setup MI version %d CMF %d mode %d\n" , |
| 8363 | sli4_params->mi_ver, sli4_params->cmf, |
| 8364 | phba->cmf_active_mode); |
| 8365 | |
| 8366 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
| 8367 | |
| 8368 | /* Initialize atomic counters */ |
| 8369 | atomic_set(v: &phba->cgn_fabric_warn_cnt, i: 0); |
| 8370 | atomic_set(v: &phba->cgn_fabric_alarm_cnt, i: 0); |
| 8371 | atomic_set(v: &phba->cgn_sync_alarm_cnt, i: 0); |
| 8372 | atomic_set(v: &phba->cgn_sync_warn_cnt, i: 0); |
| 8373 | atomic_set(v: &phba->cgn_driver_evt_cnt, i: 0); |
| 8374 | atomic_set(v: &phba->cgn_latency_evt_cnt, i: 0); |
| 8375 | atomic64_set(v: &phba->cgn_latency_evt, i: 0); |
| 8376 | |
| 8377 | phba->cmf_interval_rate = LPFC_CMF_INTERVAL; |
| 8378 | |
| 8379 | /* Allocate RX Monitor Buffer */ |
| 8380 | if (!phba->rx_monitor) { |
| 8381 | phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor), |
| 8382 | GFP_KERNEL); |
| 8383 | |
| 8384 | if (!phba->rx_monitor) { |
| 8385 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 8386 | "2644 Failed to alloc memory " |
| 8387 | "for RX Monitor Buffer\n" ); |
| 8388 | return -ENOMEM; |
| 8389 | } |
| 8390 | |
| 8391 | /* Instruct the rx_monitor object to instantiate its ring */ |
| 8392 | if (lpfc_rx_monitor_create_ring(rx_monitor: phba->rx_monitor, |
| 8393 | LPFC_MAX_RXMONITOR_ENTRY)) { |
| 8394 | kfree(objp: phba->rx_monitor); |
| 8395 | phba->rx_monitor = NULL; |
| 8396 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 8397 | "2645 Failed to alloc memory " |
| 8398 | "for RX Monitor's Ring\n" ); |
| 8399 | return -ENOMEM; |
| 8400 | } |
| 8401 | } |
| 8402 | |
| 8403 | return 0; |
| 8404 | } |
| 8405 | |
| 8406 | static int |
| 8407 | lpfc_set_host_tm(struct lpfc_hba *phba) |
| 8408 | { |
| 8409 | LPFC_MBOXQ_t *mboxq; |
| 8410 | uint32_t len, rc; |
| 8411 | struct timespec64 cur_time; |
| 8412 | struct tm broken; |
| 8413 | uint32_t month, day, year; |
| 8414 | uint32_t hour, minute, second; |
| 8415 | struct lpfc_mbx_set_host_date_time *tm; |
| 8416 | |
| 8417 | mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 8418 | if (!mboxq) |
| 8419 | return -ENOMEM; |
| 8420 | |
| 8421 | len = sizeof(struct lpfc_mbx_set_host_data) - |
| 8422 | sizeof(struct lpfc_sli4_cfg_mhdr); |
| 8423 | lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 8424 | LPFC_MBOX_OPCODE_SET_HOST_DATA, len, |
| 8425 | LPFC_SLI4_MBX_EMBED); |
| 8426 | |
| 8427 | mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME; |
| 8428 | mboxq->u.mqe.un.set_host_data.param_len = |
| 8429 | sizeof(struct lpfc_mbx_set_host_date_time); |
| 8430 | tm = &mboxq->u.mqe.un.set_host_data.un.tm; |
| 8431 | ktime_get_real_ts64(tv: &cur_time); |
| 8432 | time64_to_tm(totalsecs: cur_time.tv_sec, offset: 0, result: &broken); |
| 8433 | month = broken.tm_mon + 1; |
| 8434 | day = broken.tm_mday; |
| 8435 | year = broken.tm_year - 100; |
| 8436 | hour = broken.tm_hour; |
| 8437 | minute = broken.tm_min; |
| 8438 | second = broken.tm_sec; |
| 8439 | bf_set(lpfc_mbx_set_host_month, tm, month); |
| 8440 | bf_set(lpfc_mbx_set_host_day, tm, day); |
| 8441 | bf_set(lpfc_mbx_set_host_year, tm, year); |
| 8442 | bf_set(lpfc_mbx_set_host_hour, tm, hour); |
| 8443 | bf_set(lpfc_mbx_set_host_min, tm, minute); |
| 8444 | bf_set(lpfc_mbx_set_host_sec, tm, second); |
| 8445 | |
| 8446 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 8447 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
| 8448 | return rc; |
| 8449 | } |
| 8450 | |
| 8451 | /** |
| 8452 | * lpfc_get_platform_uuid - Attempts to extract a platform uuid |
| 8453 | * @phba: pointer to lpfc hba data structure. |
| 8454 | * |
| 8455 | * This routine attempts to first read SMBIOS DMI data for the System |
| 8456 | * Information structure offset 08h called System UUID. Else, no platform |
| 8457 | * UUID will be advertised. |
| 8458 | **/ |
| 8459 | static void |
| 8460 | lpfc_get_platform_uuid(struct lpfc_hba *phba) |
| 8461 | { |
| 8462 | int rc; |
| 8463 | const char *uuid; |
| 8464 | char pni[17] = {0}; /* 16 characters + '\0' */ |
| 8465 | bool is_ff = true, is_00 = true; |
| 8466 | u8 i; |
| 8467 | |
| 8468 | /* First attempt SMBIOS DMI */ |
| 8469 | uuid = dmi_get_system_info(field: DMI_PRODUCT_UUID); |
| 8470 | if (uuid) { |
| 8471 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 8472 | "2088 SMBIOS UUID %s\n" , |
| 8473 | uuid); |
| 8474 | } else { |
| 8475 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 8476 | "2099 Could not extract UUID\n" ); |
| 8477 | } |
| 8478 | |
| 8479 | if (uuid && uuid_is_valid(uuid)) { |
| 8480 | /* Generate PNI from UUID format. |
| 8481 | * |
| 8482 | * 1.) Extract lower 64 bits from UUID format. |
| 8483 | * 2.) Set 3h for NAA Locally Assigned Name Identifier format. |
| 8484 | * |
| 8485 | * e.g. xxxxxxxx-xxxx-xxxx-yyyy-yyyyyyyyyyyy |
| 8486 | * |
| 8487 | * extract the yyyy-yyyyyyyyyyyy portion |
| 8488 | * final PNI 3yyyyyyyyyyyyyyy |
| 8489 | */ |
| 8490 | scnprintf(buf: pni, size: sizeof(pni), fmt: "3%c%c%c%s" , |
| 8491 | uuid[20], uuid[21], uuid[22], &uuid[24]); |
| 8492 | |
| 8493 | /* Sanitize the converted PNI */ |
| 8494 | for (i = 1; i < 16 && (is_ff || is_00); i++) { |
| 8495 | if (pni[i] != '0') |
| 8496 | is_00 = false; |
| 8497 | if (pni[i] != 'f' && pni[i] != 'F') |
| 8498 | is_ff = false; |
| 8499 | } |
| 8500 | |
| 8501 | /* Convert from char* to unsigned long */ |
| 8502 | rc = kstrtoul(s: pni, base: 16, res: &phba->pni); |
| 8503 | if (!rc && !is_ff && !is_00) { |
| 8504 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 8505 | "2100 PNI 0x%016lx\n" , phba->pni); |
| 8506 | } else { |
| 8507 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 8508 | "2101 PNI %s generation status %d\n" , |
| 8509 | pni, rc); |
| 8510 | phba->pni = 0; |
| 8511 | } |
| 8512 | } |
| 8513 | } |
| 8514 | |
| 8515 | /** |
| 8516 | * lpfc_sli4_hba_setup - SLI4 device initialization PCI function |
| 8517 | * @phba: Pointer to HBA context object. |
| 8518 | * |
| 8519 | * This function is the main SLI4 device initialization PCI function. This |
| 8520 | * function is called by the HBA initialization code, HBA reset code and |
| 8521 | * HBA error attention handler code. Caller is not required to hold any |
| 8522 | * locks. |
| 8523 | **/ |
| 8524 | int |
| 8525 | lpfc_sli4_hba_setup(struct lpfc_hba *phba) |
| 8526 | { |
| 8527 | int rc, i, cnt, len, dd; |
| 8528 | LPFC_MBOXQ_t *mboxq; |
| 8529 | struct lpfc_mqe *mqe; |
| 8530 | uint8_t *vpd; |
| 8531 | uint32_t vpd_size; |
| 8532 | uint32_t ftr_rsp = 0; |
| 8533 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport: phba->pport); |
| 8534 | struct lpfc_vport *vport = phba->pport; |
| 8535 | struct lpfc_dmabuf *mp; |
| 8536 | struct lpfc_rqb *rqbp; |
| 8537 | u32 flg; |
| 8538 | |
| 8539 | /* Perform a PCI function reset to start from clean */ |
| 8540 | rc = lpfc_pci_function_reset(phba); |
| 8541 | if (unlikely(rc)) |
| 8542 | return -ENODEV; |
| 8543 | |
| 8544 | /* Check the HBA Host Status Register for readyness */ |
| 8545 | rc = lpfc_sli4_post_status_check(phba); |
| 8546 | if (unlikely(rc)) |
| 8547 | return -ENODEV; |
| 8548 | else { |
| 8549 | spin_lock_irq(lock: &phba->hbalock); |
| 8550 | phba->sli.sli_flag |= LPFC_SLI_ACTIVE; |
| 8551 | flg = phba->sli.sli_flag; |
| 8552 | spin_unlock_irq(lock: &phba->hbalock); |
| 8553 | /* Allow a little time after setting SLI_ACTIVE for any polled |
| 8554 | * MBX commands to complete via BSG. |
| 8555 | */ |
| 8556 | for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) { |
| 8557 | msleep(msecs: 20); |
| 8558 | spin_lock_irq(lock: &phba->hbalock); |
| 8559 | flg = phba->sli.sli_flag; |
| 8560 | spin_unlock_irq(lock: &phba->hbalock); |
| 8561 | } |
| 8562 | } |
| 8563 | clear_bit(nr: HBA_SETUP, addr: &phba->hba_flag); |
| 8564 | |
| 8565 | lpfc_sli4_dip(phba); |
| 8566 | |
| 8567 | /* |
| 8568 | * Allocate a single mailbox container for initializing the |
| 8569 | * port. |
| 8570 | */ |
| 8571 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 8572 | if (!mboxq) |
| 8573 | return -ENOMEM; |
| 8574 | |
| 8575 | /* Issue READ_REV to collect vpd and FW information. */ |
| 8576 | vpd_size = SLI4_PAGE_SIZE; |
| 8577 | vpd = kzalloc(vpd_size, GFP_KERNEL); |
| 8578 | if (!vpd) { |
| 8579 | rc = -ENOMEM; |
| 8580 | goto out_free_mbox; |
| 8581 | } |
| 8582 | |
| 8583 | rc = lpfc_sli4_read_rev(phba, mboxq, vpd, vpd_size: &vpd_size); |
| 8584 | if (unlikely(rc)) { |
| 8585 | kfree(objp: vpd); |
| 8586 | goto out_free_mbox; |
| 8587 | } |
| 8588 | |
| 8589 | mqe = &mboxq->u.mqe; |
| 8590 | phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); |
| 8591 | if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { |
| 8592 | set_bit(nr: HBA_FCOE_MODE, addr: &phba->hba_flag); |
| 8593 | phba->fcp_embed_io = 0; /* SLI4 FC support only */ |
| 8594 | } else { |
| 8595 | clear_bit(nr: HBA_FCOE_MODE, addr: &phba->hba_flag); |
| 8596 | } |
| 8597 | |
| 8598 | /* Obtain platform UUID, only for SLI4 FC adapters */ |
| 8599 | if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) |
| 8600 | lpfc_get_platform_uuid(phba); |
| 8601 | |
| 8602 | if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == |
| 8603 | LPFC_DCBX_CEE_MODE) |
| 8604 | set_bit(nr: HBA_FIP_SUPPORT, addr: &phba->hba_flag); |
| 8605 | else |
| 8606 | clear_bit(nr: HBA_FIP_SUPPORT, addr: &phba->hba_flag); |
| 8607 | |
| 8608 | clear_bit(nr: HBA_IOQ_FLUSH, addr: &phba->hba_flag); |
| 8609 | |
| 8610 | if (phba->sli_rev != LPFC_SLI_REV4) { |
| 8611 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 8612 | "0376 READ_REV Error. SLI Level %d " |
| 8613 | "FCoE enabled %d\n" , |
| 8614 | phba->sli_rev, |
| 8615 | test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? 1 : 0); |
| 8616 | rc = -EIO; |
| 8617 | kfree(objp: vpd); |
| 8618 | goto out_free_mbox; |
| 8619 | } |
| 8620 | |
| 8621 | rc = lpfc_set_host_tm(phba); |
| 8622 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, |
| 8623 | "6468 Set host date / time: Status x%x:\n" , rc); |
| 8624 | |
| 8625 | /* |
| 8626 | * Continue initialization with default values even if driver failed |
| 8627 | * to read FCoE param config regions, only read parameters if the |
| 8628 | * board is FCoE |
| 8629 | */ |
| 8630 | if (test_bit(HBA_FCOE_MODE, &phba->hba_flag) && |
| 8631 | lpfc_sli4_read_fcoe_params(phba)) |
| 8632 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, |
| 8633 | "2570 Failed to read FCoE parameters\n" ); |
| 8634 | |
| 8635 | /* |
| 8636 | * Retrieve sli4 device physical port name, failure of doing it |
| 8637 | * is considered as non-fatal. |
| 8638 | */ |
| 8639 | rc = lpfc_sli4_retrieve_pport_name(phba); |
| 8640 | if (!rc) |
| 8641 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
| 8642 | "3080 Successful retrieving SLI4 device " |
| 8643 | "physical port name: %s.\n" , phba->Port); |
| 8644 | |
| 8645 | rc = lpfc_sli4_get_ctl_attr(phba); |
| 8646 | if (!rc) |
| 8647 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
| 8648 | "8351 Successful retrieving SLI4 device " |
| 8649 | "CTL ATTR\n" ); |
| 8650 | |
| 8651 | /* |
| 8652 | * Evaluate the read rev and vpd data. Populate the driver |
| 8653 | * state with the results. If this routine fails, the failure |
| 8654 | * is not fatal as the driver will use generic values. |
| 8655 | */ |
| 8656 | rc = lpfc_parse_vpd(phba, vpd, vpd_size); |
| 8657 | if (unlikely(!rc)) |
| 8658 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 8659 | "0377 Error %d parsing vpd. " |
| 8660 | "Using defaults.\n" , rc); |
| 8661 | kfree(objp: vpd); |
| 8662 | |
| 8663 | /* Save information as VPD data */ |
| 8664 | phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; |
| 8665 | phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; |
| 8666 | |
| 8667 | /* |
| 8668 | * This is because first G7 ASIC doesn't support the standard |
| 8669 | * 0x5a NVME cmd descriptor type/subtype |
| 8670 | */ |
| 8671 | if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == |
| 8672 | LPFC_SLI_INTF_IF_TYPE_6) && |
| 8673 | (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) && |
| 8674 | (phba->vpd.rev.smRev == 0) && |
| 8675 | (phba->cfg_nvme_embed_cmd == 1)) |
| 8676 | phba->cfg_nvme_embed_cmd = 0; |
| 8677 | |
| 8678 | phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; |
| 8679 | phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, |
| 8680 | &mqe->un.read_rev); |
| 8681 | phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, |
| 8682 | &mqe->un.read_rev); |
| 8683 | phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, |
| 8684 | &mqe->un.read_rev); |
| 8685 | phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, |
| 8686 | &mqe->un.read_rev); |
| 8687 | phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; |
| 8688 | memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); |
| 8689 | phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; |
| 8690 | memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); |
| 8691 | phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; |
| 8692 | memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); |
| 8693 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
| 8694 | "(%d):0380 READ_REV Status x%x " |
| 8695 | "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n" , |
| 8696 | mboxq->vport ? mboxq->vport->vpi : 0, |
| 8697 | bf_get(lpfc_mqe_status, mqe), |
| 8698 | phba->vpd.rev.opFwName, |
| 8699 | phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, |
| 8700 | phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); |
| 8701 | |
| 8702 | if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == |
| 8703 | LPFC_SLI_INTF_IF_TYPE_0) { |
| 8704 | lpfc_set_features(phba, mbox: mboxq, LPFC_SET_UE_RECOVERY); |
| 8705 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 8706 | if (rc == MBX_SUCCESS) { |
| 8707 | set_bit(nr: HBA_RECOVERABLE_UE, addr: &phba->hba_flag); |
| 8708 | /* Set 1Sec interval to detect UE */ |
| 8709 | phba->eratt_poll_interval = 1; |
| 8710 | phba->sli4_hba.ue_to_sr = bf_get( |
| 8711 | lpfc_mbx_set_feature_UESR, |
| 8712 | &mboxq->u.mqe.un.set_feature); |
| 8713 | phba->sli4_hba.ue_to_rp = bf_get( |
| 8714 | lpfc_mbx_set_feature_UERP, |
| 8715 | &mboxq->u.mqe.un.set_feature); |
| 8716 | } |
| 8717 | } |
| 8718 | |
| 8719 | if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { |
| 8720 | /* Enable MDS Diagnostics only if the SLI Port supports it */ |
| 8721 | lpfc_set_features(phba, mbox: mboxq, LPFC_SET_MDS_DIAGS); |
| 8722 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 8723 | if (rc != MBX_SUCCESS) |
| 8724 | phba->mds_diags_support = 0; |
| 8725 | } |
| 8726 | |
| 8727 | /* |
| 8728 | * Discover the port's supported feature set and match it against the |
| 8729 | * hosts requests. |
| 8730 | */ |
| 8731 | lpfc_request_features(phba, mboxq); |
| 8732 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 8733 | if (unlikely(rc)) { |
| 8734 | rc = -EIO; |
| 8735 | goto out_free_mbox; |
| 8736 | } |
| 8737 | |
| 8738 | /* Disable VMID if app header is not supported */ |
| 8739 | if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr, |
| 8740 | &mqe->un.req_ftrs))) { |
| 8741 | bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0); |
| 8742 | phba->cfg_vmid_app_header = 0; |
| 8743 | lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI, |
| 8744 | "1242 vmid feature not supported\n" ); |
| 8745 | } |
| 8746 | |
| 8747 | /* |
| 8748 | * The port must support FCP initiator mode as this is the |
| 8749 | * only mode running in the host. |
| 8750 | */ |
| 8751 | if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { |
| 8752 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, |
| 8753 | "0378 No support for fcpi mode.\n" ); |
| 8754 | ftr_rsp++; |
| 8755 | } |
| 8756 | |
| 8757 | /* Performance Hints are ONLY for FCoE */ |
| 8758 | if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { |
| 8759 | if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) |
| 8760 | phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; |
| 8761 | else |
| 8762 | phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; |
| 8763 | } |
| 8764 | |
| 8765 | /* |
| 8766 | * If the port cannot support the host's requested features |
| 8767 | * then turn off the global config parameters to disable the |
| 8768 | * feature in the driver. This is not a fatal error. |
| 8769 | */ |
| 8770 | if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { |
| 8771 | if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { |
| 8772 | phba->cfg_enable_bg = 0; |
| 8773 | phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; |
| 8774 | ftr_rsp++; |
| 8775 | } |
| 8776 | } |
| 8777 | |
| 8778 | if (phba->max_vpi && phba->cfg_enable_npiv && |
| 8779 | !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) |
| 8780 | ftr_rsp++; |
| 8781 | |
| 8782 | if (ftr_rsp) { |
| 8783 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, |
| 8784 | "0379 Feature Mismatch Data: x%08x %08x " |
| 8785 | "x%x x%x x%x\n" , mqe->un.req_ftrs.word2, |
| 8786 | mqe->un.req_ftrs.word3, phba->cfg_enable_bg, |
| 8787 | phba->cfg_enable_npiv, phba->max_vpi); |
| 8788 | if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) |
| 8789 | phba->cfg_enable_bg = 0; |
| 8790 | if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) |
| 8791 | phba->cfg_enable_npiv = 0; |
| 8792 | } |
| 8793 | |
| 8794 | /* These SLI3 features are assumed in SLI4 */ |
| 8795 | spin_lock_irq(lock: &phba->hbalock); |
| 8796 | phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); |
| 8797 | spin_unlock_irq(lock: &phba->hbalock); |
| 8798 | |
| 8799 | /* Always try to enable dual dump feature if we can */ |
| 8800 | lpfc_set_features(phba, mbox: mboxq, LPFC_SET_DUAL_DUMP); |
| 8801 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 8802 | dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature); |
| 8803 | if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP)) |
| 8804 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 8805 | "6448 Dual Dump is enabled\n" ); |
| 8806 | else |
| 8807 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT, |
| 8808 | "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, " |
| 8809 | "rc:x%x dd:x%x\n" , |
| 8810 | bf_get(lpfc_mqe_command, &mboxq->u.mqe), |
| 8811 | lpfc_sli_config_mbox_subsys_get( |
| 8812 | phba, mboxq), |
| 8813 | lpfc_sli_config_mbox_opcode_get( |
| 8814 | phba, mboxq), |
| 8815 | rc, dd); |
| 8816 | |
| 8817 | /* |
| 8818 | * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent |
| 8819 | * calls depends on these resources to complete port setup. |
| 8820 | */ |
| 8821 | rc = lpfc_sli4_alloc_resource_identifiers(phba); |
| 8822 | if (rc) { |
| 8823 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 8824 | "2920 Failed to alloc Resource IDs " |
| 8825 | "rc = x%x\n" , rc); |
| 8826 | goto out_free_mbox; |
| 8827 | } |
| 8828 | |
| 8829 | lpfc_sli4_node_rpi_restore(phba); |
| 8830 | |
| 8831 | lpfc_set_host_data(phba, mbox: mboxq); |
| 8832 | |
| 8833 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 8834 | if (rc) { |
| 8835 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, |
| 8836 | "2134 Failed to set host os driver version %x" , |
| 8837 | rc); |
| 8838 | } |
| 8839 | |
| 8840 | /* Read the port's service parameters. */ |
| 8841 | rc = lpfc_read_sparam(phba, mboxq, vport->vpi); |
| 8842 | if (rc) { |
| 8843 | phba->link_state = LPFC_HBA_ERROR; |
| 8844 | rc = -ENOMEM; |
| 8845 | goto out_free_mbox; |
| 8846 | } |
| 8847 | |
| 8848 | mboxq->vport = vport; |
| 8849 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 8850 | mp = mboxq->ctx_buf; |
| 8851 | if (rc == MBX_SUCCESS) { |
| 8852 | memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); |
| 8853 | rc = 0; |
| 8854 | } |
| 8855 | |
| 8856 | /* |
| 8857 | * This memory was allocated by the lpfc_read_sparam routine but is |
| 8858 | * no longer needed. It is released and ctx_buf NULLed to prevent |
| 8859 | * unintended pointer access as the mbox is reused. |
| 8860 | */ |
| 8861 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
| 8862 | kfree(objp: mp); |
| 8863 | mboxq->ctx_buf = NULL; |
| 8864 | if (unlikely(rc)) { |
| 8865 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 8866 | "0382 READ_SPARAM command failed " |
| 8867 | "status %d, mbxStatus x%x\n" , |
| 8868 | rc, bf_get(lpfc_mqe_status, mqe)); |
| 8869 | phba->link_state = LPFC_HBA_ERROR; |
| 8870 | rc = -EIO; |
| 8871 | goto out_free_mbox; |
| 8872 | } |
| 8873 | |
| 8874 | lpfc_update_vport_wwn(vport); |
| 8875 | |
| 8876 | /* Update the fc_host data structures with new wwn. */ |
| 8877 | fc_host_node_name(shost) = wwn_to_u64(wwn: vport->fc_nodename.u.wwn); |
| 8878 | fc_host_port_name(shost) = wwn_to_u64(wwn: vport->fc_portname.u.wwn); |
| 8879 | |
| 8880 | /* Create all the SLI4 queues */ |
| 8881 | rc = lpfc_sli4_queue_create(phba); |
| 8882 | if (rc) { |
| 8883 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 8884 | "3089 Failed to allocate queues\n" ); |
| 8885 | rc = -ENODEV; |
| 8886 | goto out_free_mbox; |
| 8887 | } |
| 8888 | /* Set up all the queues to the device */ |
| 8889 | rc = lpfc_sli4_queue_setup(phba); |
| 8890 | if (unlikely(rc)) { |
| 8891 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 8892 | "0381 Error %d during queue setup.\n" , rc); |
| 8893 | goto out_destroy_queue; |
| 8894 | } |
| 8895 | /* Initialize the driver internal SLI layer lists. */ |
| 8896 | lpfc_sli4_setup(phba); |
| 8897 | lpfc_sli4_queue_init(phba); |
| 8898 | |
| 8899 | /* update host els xri-sgl sizes and mappings */ |
| 8900 | rc = lpfc_sli4_els_sgl_update(phba); |
| 8901 | if (unlikely(rc)) { |
| 8902 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 8903 | "1400 Failed to update xri-sgl size and " |
| 8904 | "mapping: %d\n" , rc); |
| 8905 | goto out_destroy_queue; |
| 8906 | } |
| 8907 | |
| 8908 | /* register the els sgl pool to the port */ |
| 8909 | rc = lpfc_sli4_repost_sgl_list(phba, sgl_list: &phba->sli4_hba.lpfc_els_sgl_list, |
| 8910 | cnt: phba->sli4_hba.els_xri_cnt); |
| 8911 | if (unlikely(rc < 0)) { |
| 8912 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 8913 | "0582 Error %d during els sgl post " |
| 8914 | "operation\n" , rc); |
| 8915 | rc = -ENODEV; |
| 8916 | goto out_destroy_queue; |
| 8917 | } |
| 8918 | phba->sli4_hba.els_xri_cnt = rc; |
| 8919 | |
| 8920 | if (phba->nvmet_support) { |
| 8921 | /* update host nvmet xri-sgl sizes and mappings */ |
| 8922 | rc = lpfc_sli4_nvmet_sgl_update(phba); |
| 8923 | if (unlikely(rc)) { |
| 8924 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 8925 | "6308 Failed to update nvmet-sgl size " |
| 8926 | "and mapping: %d\n" , rc); |
| 8927 | goto out_destroy_queue; |
| 8928 | } |
| 8929 | |
| 8930 | /* register the nvmet sgl pool to the port */ |
| 8931 | rc = lpfc_sli4_repost_sgl_list( |
| 8932 | phba, |
| 8933 | sgl_list: &phba->sli4_hba.lpfc_nvmet_sgl_list, |
| 8934 | cnt: phba->sli4_hba.nvmet_xri_cnt); |
| 8935 | if (unlikely(rc < 0)) { |
| 8936 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 8937 | "3117 Error %d during nvmet " |
| 8938 | "sgl post\n" , rc); |
| 8939 | rc = -ENODEV; |
| 8940 | goto out_destroy_queue; |
| 8941 | } |
| 8942 | phba->sli4_hba.nvmet_xri_cnt = rc; |
| 8943 | |
| 8944 | /* We allocate an iocbq for every receive context SGL. |
| 8945 | * The additional allocation is for abort and ls handling. |
| 8946 | */ |
| 8947 | cnt = phba->sli4_hba.nvmet_xri_cnt + |
| 8948 | phba->sli4_hba.max_cfg_param.max_xri; |
| 8949 | } else { |
| 8950 | /* update host common xri-sgl sizes and mappings */ |
| 8951 | rc = lpfc_sli4_io_sgl_update(phba); |
| 8952 | if (unlikely(rc)) { |
| 8953 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 8954 | "6082 Failed to update nvme-sgl size " |
| 8955 | "and mapping: %d\n" , rc); |
| 8956 | goto out_destroy_queue; |
| 8957 | } |
| 8958 | |
| 8959 | /* register the allocated common sgl pool to the port */ |
| 8960 | rc = lpfc_sli4_repost_io_sgl_list(phba); |
| 8961 | if (unlikely(rc)) { |
| 8962 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 8963 | "6116 Error %d during nvme sgl post " |
| 8964 | "operation\n" , rc); |
| 8965 | /* Some NVME buffers were moved to abort nvme list */ |
| 8966 | /* A pci function reset will repost them */ |
| 8967 | rc = -ENODEV; |
| 8968 | goto out_destroy_queue; |
| 8969 | } |
| 8970 | /* Each lpfc_io_buf job structure has an iocbq element. |
| 8971 | * This cnt provides for abort, els, ct and ls requests. |
| 8972 | */ |
| 8973 | cnt = phba->sli4_hba.max_cfg_param.max_xri; |
| 8974 | } |
| 8975 | |
| 8976 | if (!phba->sli.iocbq_lookup) { |
| 8977 | /* Initialize and populate the iocb list per host */ |
| 8978 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 8979 | "2821 initialize iocb list with %d entries\n" , |
| 8980 | cnt); |
| 8981 | rc = lpfc_init_iocb_list(phba, cnt); |
| 8982 | if (rc) { |
| 8983 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 8984 | "1413 Failed to init iocb list.\n" ); |
| 8985 | goto out_destroy_queue; |
| 8986 | } |
| 8987 | } |
| 8988 | |
| 8989 | if (phba->nvmet_support) |
| 8990 | lpfc_nvmet_create_targetport(phba); |
| 8991 | |
| 8992 | if (phba->nvmet_support && phba->cfg_nvmet_mrq) { |
| 8993 | /* Post initial buffers to all RQs created */ |
| 8994 | for (i = 0; i < phba->cfg_nvmet_mrq; i++) { |
| 8995 | rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; |
| 8996 | INIT_LIST_HEAD(list: &rqbp->rqb_buffer_list); |
| 8997 | rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; |
| 8998 | rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; |
| 8999 | rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; |
| 9000 | rqbp->buffer_count = 0; |
| 9001 | |
| 9002 | lpfc_post_rq_buffer( |
| 9003 | phba, hrq: phba->sli4_hba.nvmet_mrq_hdr[i], |
| 9004 | drq: phba->sli4_hba.nvmet_mrq_data[i], |
| 9005 | count: phba->cfg_nvmet_mrq_post, idx: i); |
| 9006 | } |
| 9007 | } |
| 9008 | |
| 9009 | /* Post the rpi header region to the device. */ |
| 9010 | rc = lpfc_sli4_post_all_rpi_hdrs(phba); |
| 9011 | if (unlikely(rc)) { |
| 9012 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 9013 | "0393 Error %d during rpi post operation\n" , |
| 9014 | rc); |
| 9015 | rc = -ENODEV; |
| 9016 | goto out_free_iocblist; |
| 9017 | } |
| 9018 | |
| 9019 | if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { |
| 9020 | if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { |
| 9021 | /* |
| 9022 | * The FC Port needs to register FCFI (index 0) |
| 9023 | */ |
| 9024 | lpfc_reg_fcfi(phba, mboxq); |
| 9025 | mboxq->vport = phba->pport; |
| 9026 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 9027 | if (rc != MBX_SUCCESS) |
| 9028 | goto out_unset_queue; |
| 9029 | rc = 0; |
| 9030 | phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, |
| 9031 | &mboxq->u.mqe.un.reg_fcfi); |
| 9032 | } else { |
| 9033 | /* We are a NVME Target mode with MRQ > 1 */ |
| 9034 | |
| 9035 | /* First register the FCFI */ |
| 9036 | lpfc_reg_fcfi_mrq(phba, mbox: mboxq, mode: 0); |
| 9037 | mboxq->vport = phba->pport; |
| 9038 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 9039 | if (rc != MBX_SUCCESS) |
| 9040 | goto out_unset_queue; |
| 9041 | rc = 0; |
| 9042 | phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, |
| 9043 | &mboxq->u.mqe.un.reg_fcfi_mrq); |
| 9044 | |
| 9045 | /* Next register the MRQs */ |
| 9046 | lpfc_reg_fcfi_mrq(phba, mbox: mboxq, mode: 1); |
| 9047 | mboxq->vport = phba->pport; |
| 9048 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 9049 | if (rc != MBX_SUCCESS) |
| 9050 | goto out_unset_queue; |
| 9051 | rc = 0; |
| 9052 | } |
| 9053 | /* Check if the port is configured to be disabled */ |
| 9054 | lpfc_sli_read_link_ste(phba); |
| 9055 | } |
| 9056 | |
| 9057 | /* Don't post more new bufs if repost already recovered |
| 9058 | * the nvme sgls. |
| 9059 | */ |
| 9060 | if (phba->nvmet_support == 0) { |
| 9061 | if (phba->sli4_hba.io_xri_cnt == 0) { |
| 9062 | len = lpfc_new_io_buf( |
| 9063 | phba, num_to_alloc: phba->sli4_hba.io_xri_max); |
| 9064 | if (len == 0) { |
| 9065 | rc = -ENOMEM; |
| 9066 | goto out_unset_queue; |
| 9067 | } |
| 9068 | |
| 9069 | if (phba->cfg_xri_rebalancing) |
| 9070 | lpfc_create_multixri_pools(phba); |
| 9071 | } |
| 9072 | } else { |
| 9073 | phba->cfg_xri_rebalancing = 0; |
| 9074 | } |
| 9075 | |
| 9076 | /* Allow asynchronous mailbox command to go through */ |
| 9077 | spin_lock_irq(lock: &phba->hbalock); |
| 9078 | phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; |
| 9079 | spin_unlock_irq(lock: &phba->hbalock); |
| 9080 | |
| 9081 | /* Post receive buffers to the device */ |
| 9082 | lpfc_sli4_rb_setup(phba); |
| 9083 | |
| 9084 | /* Reset HBA FCF states after HBA reset */ |
| 9085 | phba->fcf.fcf_flag = 0; |
| 9086 | phba->fcf.current_rec.flag = 0; |
| 9087 | |
| 9088 | /* Start the ELS watchdog timer */ |
| 9089 | mod_timer(timer: &vport->els_tmofunc, |
| 9090 | expires: jiffies + secs_to_jiffies(phba->fc_ratov * 2)); |
| 9091 | |
| 9092 | /* Start heart beat timer */ |
| 9093 | mod_timer(timer: &phba->hb_tmofunc, |
| 9094 | expires: jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); |
| 9095 | clear_bit(nr: HBA_HBEAT_INP, addr: &phba->hba_flag); |
| 9096 | clear_bit(nr: HBA_HBEAT_TMO, addr: &phba->hba_flag); |
| 9097 | phba->last_completion_time = jiffies; |
| 9098 | |
| 9099 | /* start eq_delay heartbeat */ |
| 9100 | if (phba->cfg_auto_imax) |
| 9101 | queue_delayed_work(wq: phba->wq, dwork: &phba->eq_delay_work, |
| 9102 | delay: msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); |
| 9103 | |
| 9104 | /* start per phba idle_stat_delay heartbeat */ |
| 9105 | lpfc_init_idle_stat_hb(phba); |
| 9106 | |
| 9107 | /* Start error attention (ERATT) polling timer */ |
| 9108 | mod_timer(timer: &phba->eratt_poll, |
| 9109 | expires: jiffies + secs_to_jiffies(phba->eratt_poll_interval)); |
| 9110 | |
| 9111 | /* |
| 9112 | * The port is ready, set the host's link state to LINK_DOWN |
| 9113 | * in preparation for link interrupts. |
| 9114 | */ |
| 9115 | spin_lock_irq(lock: &phba->hbalock); |
| 9116 | phba->link_state = LPFC_LINK_DOWN; |
| 9117 | |
| 9118 | /* Check if physical ports are trunked */ |
| 9119 | if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) |
| 9120 | phba->trunk_link.link0.state = LPFC_LINK_DOWN; |
| 9121 | if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) |
| 9122 | phba->trunk_link.link1.state = LPFC_LINK_DOWN; |
| 9123 | if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) |
| 9124 | phba->trunk_link.link2.state = LPFC_LINK_DOWN; |
| 9125 | if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) |
| 9126 | phba->trunk_link.link3.state = LPFC_LINK_DOWN; |
| 9127 | spin_unlock_irq(lock: &phba->hbalock); |
| 9128 | |
| 9129 | /* Arm the CQs and then EQs on device */ |
| 9130 | lpfc_sli4_arm_cqeq_intr(phba); |
| 9131 | |
| 9132 | /* Indicate device interrupt mode */ |
| 9133 | phba->sli4_hba.intr_enable = 1; |
| 9134 | |
| 9135 | /* Setup CMF after HBA is initialized */ |
| 9136 | lpfc_cmf_setup(phba); |
| 9137 | |
| 9138 | if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && |
| 9139 | test_bit(LINK_DISABLED, &phba->hba_flag)) { |
| 9140 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 9141 | "3103 Adapter Link is disabled.\n" ); |
| 9142 | lpfc_down_link(phba, mboxq); |
| 9143 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
| 9144 | if (rc != MBX_SUCCESS) { |
| 9145 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 9146 | "3104 Adapter failed to issue " |
| 9147 | "DOWN_LINK mbox cmd, rc:x%x\n" , rc); |
| 9148 | goto out_io_buff_free; |
| 9149 | } |
| 9150 | } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { |
| 9151 | /* don't perform init_link on SLI4 FC port loopback test */ |
| 9152 | if (!(phba->link_flag & LS_LOOPBACK_MODE)) { |
| 9153 | rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); |
| 9154 | if (rc) |
| 9155 | goto out_io_buff_free; |
| 9156 | } |
| 9157 | } |
| 9158 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
| 9159 | |
| 9160 | /* Enable RAS FW log support */ |
| 9161 | lpfc_sli4_ras_setup(phba); |
| 9162 | |
| 9163 | set_bit(nr: HBA_SETUP, addr: &phba->hba_flag); |
| 9164 | return rc; |
| 9165 | |
| 9166 | out_io_buff_free: |
| 9167 | /* Free allocated IO Buffers */ |
| 9168 | lpfc_io_free(phba); |
| 9169 | out_unset_queue: |
| 9170 | /* Unset all the queues set up in this routine when error out */ |
| 9171 | lpfc_sli4_queue_unset(phba); |
| 9172 | out_free_iocblist: |
| 9173 | lpfc_free_iocb_list(phba); |
| 9174 | out_destroy_queue: |
| 9175 | lpfc_sli4_queue_destroy(phba); |
| 9176 | lpfc_stop_hba_timers(phba); |
| 9177 | out_free_mbox: |
| 9178 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
| 9179 | return rc; |
| 9180 | } |
| 9181 | |
| 9182 | /** |
| 9183 | * lpfc_mbox_timeout - Timeout call back function for mbox timer |
| 9184 | * @t: Context to fetch pointer to hba structure from. |
| 9185 | * |
| 9186 | * This is the callback function for mailbox timer. The mailbox |
| 9187 | * timer is armed when a new mailbox command is issued and the timer |
| 9188 | * is deleted when the mailbox complete. The function is called by |
| 9189 | * the kernel timer code when a mailbox does not complete within |
| 9190 | * expected time. This function wakes up the worker thread to |
| 9191 | * process the mailbox timeout and returns. All the processing is |
| 9192 | * done by the worker thread function lpfc_mbox_timeout_handler. |
| 9193 | **/ |
| 9194 | void |
| 9195 | lpfc_mbox_timeout(struct timer_list *t) |
| 9196 | { |
| 9197 | struct lpfc_hba *phba = timer_container_of(phba, t, sli.mbox_tmo); |
| 9198 | unsigned long iflag; |
| 9199 | uint32_t tmo_posted; |
| 9200 | |
| 9201 | spin_lock_irqsave(&phba->pport->work_port_lock, iflag); |
| 9202 | tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; |
| 9203 | if (!tmo_posted) |
| 9204 | phba->pport->work_port_events |= WORKER_MBOX_TMO; |
| 9205 | spin_unlock_irqrestore(lock: &phba->pport->work_port_lock, flags: iflag); |
| 9206 | |
| 9207 | if (!tmo_posted) |
| 9208 | lpfc_worker_wake_up(phba); |
| 9209 | return; |
| 9210 | } |
| 9211 | |
| 9212 | /** |
| 9213 | * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions |
| 9214 | * are pending |
| 9215 | * @phba: Pointer to HBA context object. |
| 9216 | * |
| 9217 | * This function checks if any mailbox completions are present on the mailbox |
| 9218 | * completion queue. |
| 9219 | **/ |
| 9220 | static bool |
| 9221 | lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) |
| 9222 | { |
| 9223 | |
| 9224 | uint32_t idx; |
| 9225 | struct lpfc_queue *mcq; |
| 9226 | struct lpfc_mcqe *mcqe; |
| 9227 | bool pending_completions = false; |
| 9228 | uint8_t qe_valid; |
| 9229 | |
| 9230 | if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) |
| 9231 | return false; |
| 9232 | |
| 9233 | /* Check for completions on mailbox completion queue */ |
| 9234 | |
| 9235 | mcq = phba->sli4_hba.mbx_cq; |
| 9236 | idx = mcq->hba_index; |
| 9237 | qe_valid = mcq->qe_valid; |
| 9238 | while (bf_get_le32(lpfc_cqe_valid, |
| 9239 | (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) { |
| 9240 | mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(q: mcq, idx)); |
| 9241 | if (bf_get_le32(lpfc_trailer_completed, mcqe) && |
| 9242 | (!bf_get_le32(lpfc_trailer_async, mcqe))) { |
| 9243 | pending_completions = true; |
| 9244 | break; |
| 9245 | } |
| 9246 | idx = (idx + 1) % mcq->entry_count; |
| 9247 | if (mcq->hba_index == idx) |
| 9248 | break; |
| 9249 | |
| 9250 | /* if the index wrapped around, toggle the valid bit */ |
| 9251 | if (phba->sli4_hba.pc_sli4_params.cqav && !idx) |
| 9252 | qe_valid = (qe_valid) ? 0 : 1; |
| 9253 | } |
| 9254 | return pending_completions; |
| 9255 | |
| 9256 | } |
| 9257 | |
| 9258 | /** |
| 9259 | * lpfc_sli4_process_missed_mbox_completions - process mbox completions |
| 9260 | * that were missed. |
| 9261 | * @phba: Pointer to HBA context object. |
| 9262 | * |
| 9263 | * For sli4, it is possible to miss an interrupt. As such mbox completions |
| 9264 | * maybe missed causing erroneous mailbox timeouts to occur. This function |
| 9265 | * checks to see if mbox completions are on the mailbox completion queue |
| 9266 | * and will process all the completions associated with the eq for the |
| 9267 | * mailbox completion queue. |
| 9268 | **/ |
| 9269 | static bool |
| 9270 | lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) |
| 9271 | { |
| 9272 | struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; |
| 9273 | uint32_t eqidx; |
| 9274 | struct lpfc_queue *fpeq = NULL; |
| 9275 | struct lpfc_queue *eq; |
| 9276 | bool mbox_pending; |
| 9277 | |
| 9278 | if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) |
| 9279 | return false; |
| 9280 | |
| 9281 | /* Find the EQ associated with the mbox CQ */ |
| 9282 | if (sli4_hba->hdwq) { |
| 9283 | for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) { |
| 9284 | eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq; |
| 9285 | if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { |
| 9286 | fpeq = eq; |
| 9287 | break; |
| 9288 | } |
| 9289 | } |
| 9290 | } |
| 9291 | if (!fpeq) |
| 9292 | return false; |
| 9293 | |
| 9294 | /* Turn off interrupts from this EQ */ |
| 9295 | |
| 9296 | sli4_hba->sli4_eq_clr_intr(fpeq); |
| 9297 | |
| 9298 | /* Check to see if a mbox completion is pending */ |
| 9299 | |
| 9300 | mbox_pending = lpfc_sli4_mbox_completions_pending(phba); |
| 9301 | |
| 9302 | /* |
| 9303 | * If a mbox completion is pending, process all the events on EQ |
| 9304 | * associated with the mbox completion queue (this could include |
| 9305 | * mailbox commands, async events, els commands, receive queue data |
| 9306 | * and fcp commands) |
| 9307 | */ |
| 9308 | |
| 9309 | if (mbox_pending) |
| 9310 | /* process and rearm the EQ */ |
| 9311 | lpfc_sli4_process_eq(phba, eq: fpeq, LPFC_QUEUE_REARM, |
| 9312 | poll_mode: LPFC_QUEUE_WORK); |
| 9313 | else |
| 9314 | /* Always clear and re-arm the EQ */ |
| 9315 | sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); |
| 9316 | |
| 9317 | return mbox_pending; |
| 9318 | |
| 9319 | } |
| 9320 | |
| 9321 | /** |
| 9322 | * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout |
| 9323 | * @phba: Pointer to HBA context object. |
| 9324 | * |
| 9325 | * This function is called from worker thread when a mailbox command times out. |
| 9326 | * The caller is not required to hold any locks. This function will reset the |
| 9327 | * HBA and recover all the pending commands. |
| 9328 | **/ |
| 9329 | void |
| 9330 | lpfc_mbox_timeout_handler(struct lpfc_hba *phba) |
| 9331 | { |
| 9332 | LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; |
| 9333 | MAILBOX_t *mb = NULL; |
| 9334 | |
| 9335 | struct lpfc_sli *psli = &phba->sli; |
| 9336 | |
| 9337 | /* If the mailbox completed, process the completion */ |
| 9338 | lpfc_sli4_process_missed_mbox_completions(phba); |
| 9339 | |
| 9340 | if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) |
| 9341 | return; |
| 9342 | |
| 9343 | if (pmbox != NULL) |
| 9344 | mb = &pmbox->u.mb; |
| 9345 | /* Check the pmbox pointer first. There is a race condition |
| 9346 | * between the mbox timeout handler getting executed in the |
| 9347 | * worklist and the mailbox actually completing. When this |
| 9348 | * race condition occurs, the mbox_active will be NULL. |
| 9349 | */ |
| 9350 | spin_lock_irq(lock: &phba->hbalock); |
| 9351 | if (pmbox == NULL) { |
| 9352 | lpfc_printf_log(phba, KERN_WARNING, |
| 9353 | LOG_MBOX | LOG_SLI, |
| 9354 | "0353 Active Mailbox cleared - mailbox timeout " |
| 9355 | "exiting\n" ); |
| 9356 | spin_unlock_irq(lock: &phba->hbalock); |
| 9357 | return; |
| 9358 | } |
| 9359 | |
| 9360 | /* Mbox cmd <mbxCommand> timeout */ |
| 9361 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 9362 | "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n" , |
| 9363 | mb->mbxCommand, |
| 9364 | phba->pport->port_state, |
| 9365 | phba->sli.sli_flag, |
| 9366 | phba->sli.mbox_active); |
| 9367 | spin_unlock_irq(lock: &phba->hbalock); |
| 9368 | |
| 9369 | /* Setting state unknown so lpfc_sli_abort_iocb_ring |
| 9370 | * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing |
| 9371 | * it to fail all outstanding SCSI IO. |
| 9372 | */ |
| 9373 | set_bit(nr: MBX_TMO_ERR, addr: &phba->bit_flags); |
| 9374 | spin_lock_irq(lock: &phba->pport->work_port_lock); |
| 9375 | phba->pport->work_port_events &= ~WORKER_MBOX_TMO; |
| 9376 | spin_unlock_irq(lock: &phba->pport->work_port_lock); |
| 9377 | spin_lock_irq(lock: &phba->hbalock); |
| 9378 | phba->link_state = LPFC_LINK_UNKNOWN; |
| 9379 | psli->sli_flag &= ~LPFC_SLI_ACTIVE; |
| 9380 | spin_unlock_irq(lock: &phba->hbalock); |
| 9381 | |
| 9382 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 9383 | "0345 Resetting board due to mailbox timeout\n" ); |
| 9384 | |
| 9385 | /* Reset the HBA device */ |
| 9386 | lpfc_reset_hba(phba); |
| 9387 | } |
| 9388 | |
| 9389 | /** |
| 9390 | * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware |
| 9391 | * @phba: Pointer to HBA context object. |
| 9392 | * @pmbox: Pointer to mailbox object. |
| 9393 | * @flag: Flag indicating how the mailbox need to be processed. |
| 9394 | * |
| 9395 | * This function is called by discovery code and HBA management code |
| 9396 | * to submit a mailbox command to firmware with SLI-3 interface spec. This |
| 9397 | * function gets the hbalock to protect the data structures. |
| 9398 | * The mailbox command can be submitted in polling mode, in which case |
| 9399 | * this function will wait in a polling loop for the completion of the |
| 9400 | * mailbox. |
| 9401 | * If the mailbox is submitted in no_wait mode (not polling) the |
| 9402 | * function will submit the command and returns immediately without waiting |
| 9403 | * for the mailbox completion. The no_wait is supported only when HBA |
| 9404 | * is in SLI2/SLI3 mode - interrupts are enabled. |
| 9405 | * The SLI interface allows only one mailbox pending at a time. If the |
| 9406 | * mailbox is issued in polling mode and there is already a mailbox |
| 9407 | * pending, then the function will return an error. If the mailbox is issued |
| 9408 | * in NO_WAIT mode and there is a mailbox pending already, the function |
| 9409 | * will return MBX_BUSY after queuing the mailbox into mailbox queue. |
| 9410 | * The sli layer owns the mailbox object until the completion of mailbox |
| 9411 | * command if this function return MBX_BUSY or MBX_SUCCESS. For all other |
| 9412 | * return codes the caller owns the mailbox command after the return of |
| 9413 | * the function. |
| 9414 | **/ |
| 9415 | static int |
| 9416 | lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, |
| 9417 | uint32_t flag) |
| 9418 | { |
| 9419 | MAILBOX_t *mbx; |
| 9420 | struct lpfc_sli *psli = &phba->sli; |
| 9421 | uint32_t status, evtctr; |
| 9422 | uint32_t ha_copy, hc_copy; |
| 9423 | int i; |
| 9424 | unsigned long timeout; |
| 9425 | unsigned long drvr_flag = 0; |
| 9426 | uint32_t word0, ldata; |
| 9427 | void __iomem *to_slim; |
| 9428 | int processing_queue = 0; |
| 9429 | |
| 9430 | spin_lock_irqsave(&phba->hbalock, drvr_flag); |
| 9431 | if (!pmbox) { |
| 9432 | phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
| 9433 | /* processing mbox queue from intr_handler */ |
| 9434 | if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { |
| 9435 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: drvr_flag); |
| 9436 | return MBX_SUCCESS; |
| 9437 | } |
| 9438 | processing_queue = 1; |
| 9439 | pmbox = lpfc_mbox_get(phba); |
| 9440 | if (!pmbox) { |
| 9441 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: drvr_flag); |
| 9442 | return MBX_SUCCESS; |
| 9443 | } |
| 9444 | } |
| 9445 | |
| 9446 | if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && |
| 9447 | pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { |
| 9448 | if(!pmbox->vport) { |
| 9449 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: drvr_flag); |
| 9450 | lpfc_printf_log(phba, KERN_ERR, |
| 9451 | LOG_MBOX | LOG_VPORT, |
| 9452 | "1806 Mbox x%x failed. No vport\n" , |
| 9453 | pmbox->u.mb.mbxCommand); |
| 9454 | dump_stack(); |
| 9455 | goto out_not_finished; |
| 9456 | } |
| 9457 | } |
| 9458 | |
| 9459 | /* If the PCI channel is in offline state, do not post mbox. */ |
| 9460 | if (unlikely(pci_channel_offline(phba->pcidev))) { |
| 9461 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: drvr_flag); |
| 9462 | goto out_not_finished; |
| 9463 | } |
| 9464 | |
| 9465 | /* If HBA has a deferred error attention, fail the iocb. */ |
| 9466 | if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { |
| 9467 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: drvr_flag); |
| 9468 | goto out_not_finished; |
| 9469 | } |
| 9470 | |
| 9471 | psli = &phba->sli; |
| 9472 | |
| 9473 | mbx = &pmbox->u.mb; |
| 9474 | status = MBX_SUCCESS; |
| 9475 | |
| 9476 | if (phba->link_state == LPFC_HBA_ERROR) { |
| 9477 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: drvr_flag); |
| 9478 | |
| 9479 | /* Mbox command <mbxCommand> cannot issue */ |
| 9480 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 9481 | "(%d):0311 Mailbox command x%x cannot " |
| 9482 | "issue Data: x%x x%x\n" , |
| 9483 | pmbox->vport ? pmbox->vport->vpi : 0, |
| 9484 | pmbox->u.mb.mbxCommand, psli->sli_flag, flag); |
| 9485 | goto out_not_finished; |
| 9486 | } |
| 9487 | |
| 9488 | if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { |
| 9489 | if (lpfc_readl(addr: phba->HCregaddr, data: &hc_copy) || |
| 9490 | !(hc_copy & HC_MBINT_ENA)) { |
| 9491 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: drvr_flag); |
| 9492 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 9493 | "(%d):2528 Mailbox command x%x cannot " |
| 9494 | "issue Data: x%x x%x\n" , |
| 9495 | pmbox->vport ? pmbox->vport->vpi : 0, |
| 9496 | pmbox->u.mb.mbxCommand, psli->sli_flag, flag); |
| 9497 | goto out_not_finished; |
| 9498 | } |
| 9499 | } |
| 9500 | |
| 9501 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { |
| 9502 | /* Polling for a mbox command when another one is already active |
| 9503 | * is not allowed in SLI. Also, the driver must have established |
| 9504 | * SLI2 mode to queue and process multiple mbox commands. |
| 9505 | */ |
| 9506 | |
| 9507 | if (flag & MBX_POLL) { |
| 9508 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: drvr_flag); |
| 9509 | |
| 9510 | /* Mbox command <mbxCommand> cannot issue */ |
| 9511 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 9512 | "(%d):2529 Mailbox command x%x " |
| 9513 | "cannot issue Data: x%x x%x\n" , |
| 9514 | pmbox->vport ? pmbox->vport->vpi : 0, |
| 9515 | pmbox->u.mb.mbxCommand, |
| 9516 | psli->sli_flag, flag); |
| 9517 | goto out_not_finished; |
| 9518 | } |
| 9519 | |
| 9520 | if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { |
| 9521 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: drvr_flag); |
| 9522 | /* Mbox command <mbxCommand> cannot issue */ |
| 9523 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 9524 | "(%d):2530 Mailbox command x%x " |
| 9525 | "cannot issue Data: x%x x%x\n" , |
| 9526 | pmbox->vport ? pmbox->vport->vpi : 0, |
| 9527 | pmbox->u.mb.mbxCommand, |
| 9528 | psli->sli_flag, flag); |
| 9529 | goto out_not_finished; |
| 9530 | } |
| 9531 | |
| 9532 | /* Another mailbox command is still being processed, queue this |
| 9533 | * command to be processed later. |
| 9534 | */ |
| 9535 | lpfc_mbox_put(phba, pmbox); |
| 9536 | |
| 9537 | /* Mbox cmd issue - BUSY */ |
| 9538 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
| 9539 | "(%d):0308 Mbox cmd issue - BUSY Data: " |
| 9540 | "x%x x%x x%x x%x\n" , |
| 9541 | pmbox->vport ? pmbox->vport->vpi : 0xffffff, |
| 9542 | mbx->mbxCommand, |
| 9543 | phba->pport ? phba->pport->port_state : 0xff, |
| 9544 | psli->sli_flag, flag); |
| 9545 | |
| 9546 | psli->slistat.mbox_busy++; |
| 9547 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: drvr_flag); |
| 9548 | |
| 9549 | if (pmbox->vport) { |
| 9550 | lpfc_debugfs_disc_trc(pmbox->vport, |
| 9551 | LPFC_DISC_TRC_MBOX_VPORT, |
| 9552 | "MBOX Bsy vport: cmd:x%x mb:x%x x%x" , |
| 9553 | (uint32_t)mbx->mbxCommand, |
| 9554 | mbx->un.varWords[0], mbx->un.varWords[1]); |
| 9555 | } |
| 9556 | else { |
| 9557 | lpfc_debugfs_disc_trc(phba->pport, |
| 9558 | LPFC_DISC_TRC_MBOX, |
| 9559 | "MBOX Bsy: cmd:x%x mb:x%x x%x" , |
| 9560 | (uint32_t)mbx->mbxCommand, |
| 9561 | mbx->un.varWords[0], mbx->un.varWords[1]); |
| 9562 | } |
| 9563 | |
| 9564 | return MBX_BUSY; |
| 9565 | } |
| 9566 | |
| 9567 | psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; |
| 9568 | |
| 9569 | /* If we are not polling, we MUST be in SLI2 mode */ |
| 9570 | if (flag != MBX_POLL) { |
| 9571 | if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && |
| 9572 | (mbx->mbxCommand != MBX_KILL_BOARD)) { |
| 9573 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
| 9574 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: drvr_flag); |
| 9575 | /* Mbox command <mbxCommand> cannot issue */ |
| 9576 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 9577 | "(%d):2531 Mailbox command x%x " |
| 9578 | "cannot issue Data: x%x x%x\n" , |
| 9579 | pmbox->vport ? pmbox->vport->vpi : 0, |
| 9580 | pmbox->u.mb.mbxCommand, |
| 9581 | psli->sli_flag, flag); |
| 9582 | goto out_not_finished; |
| 9583 | } |
| 9584 | /* timeout active mbox command */ |
| 9585 | timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox)); |
| 9586 | mod_timer(timer: &psli->mbox_tmo, expires: jiffies + timeout); |
| 9587 | } |
| 9588 | |
| 9589 | /* Mailbox cmd <cmd> issue */ |
| 9590 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
| 9591 | "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " |
| 9592 | "x%x\n" , |
| 9593 | pmbox->vport ? pmbox->vport->vpi : 0, |
| 9594 | mbx->mbxCommand, |
| 9595 | phba->pport ? phba->pport->port_state : 0xff, |
| 9596 | psli->sli_flag, flag); |
| 9597 | |
| 9598 | if (mbx->mbxCommand != MBX_HEARTBEAT) { |
| 9599 | if (pmbox->vport) { |
| 9600 | lpfc_debugfs_disc_trc(pmbox->vport, |
| 9601 | LPFC_DISC_TRC_MBOX_VPORT, |
| 9602 | "MBOX Send vport: cmd:x%x mb:x%x x%x" , |
| 9603 | (uint32_t)mbx->mbxCommand, |
| 9604 | mbx->un.varWords[0], mbx->un.varWords[1]); |
| 9605 | } |
| 9606 | else { |
| 9607 | lpfc_debugfs_disc_trc(phba->pport, |
| 9608 | LPFC_DISC_TRC_MBOX, |
| 9609 | "MBOX Send: cmd:x%x mb:x%x x%x" , |
| 9610 | (uint32_t)mbx->mbxCommand, |
| 9611 | mbx->un.varWords[0], mbx->un.varWords[1]); |
| 9612 | } |
| 9613 | } |
| 9614 | |
| 9615 | psli->slistat.mbox_cmd++; |
| 9616 | evtctr = psli->slistat.mbox_event; |
| 9617 | |
| 9618 | /* next set own bit for the adapter and copy over command word */ |
| 9619 | mbx->mbxOwner = OWN_CHIP; |
| 9620 | |
| 9621 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
| 9622 | /* Populate mbox extension offset word. */ |
| 9623 | if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { |
| 9624 | *(((uint32_t *)mbx) + pmbox->mbox_offset_word) |
| 9625 | = (uint8_t *)phba->mbox_ext |
| 9626 | - (uint8_t *)phba->mbox; |
| 9627 | } |
| 9628 | |
| 9629 | /* Copy the mailbox extension data */ |
| 9630 | if (pmbox->in_ext_byte_len && pmbox->ext_buf) { |
| 9631 | lpfc_sli_pcimem_bcopy(pmbox->ext_buf, |
| 9632 | (uint8_t *)phba->mbox_ext, |
| 9633 | pmbox->in_ext_byte_len); |
| 9634 | } |
| 9635 | /* Copy command data to host SLIM area */ |
| 9636 | lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); |
| 9637 | } else { |
| 9638 | /* Populate mbox extension offset word. */ |
| 9639 | if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) |
| 9640 | *(((uint32_t *)mbx) + pmbox->mbox_offset_word) |
| 9641 | = MAILBOX_HBA_EXT_OFFSET; |
| 9642 | |
| 9643 | /* Copy the mailbox extension data */ |
| 9644 | if (pmbox->in_ext_byte_len && pmbox->ext_buf) |
| 9645 | lpfc_memcpy_to_slim(dest: phba->MBslimaddr + |
| 9646 | MAILBOX_HBA_EXT_OFFSET, |
| 9647 | src: pmbox->ext_buf, bytes: pmbox->in_ext_byte_len); |
| 9648 | |
| 9649 | if (mbx->mbxCommand == MBX_CONFIG_PORT) |
| 9650 | /* copy command data into host mbox for cmpl */ |
| 9651 | lpfc_sli_pcimem_bcopy(mbx, phba->mbox, |
| 9652 | MAILBOX_CMD_SIZE); |
| 9653 | |
| 9654 | /* First copy mbox command data to HBA SLIM, skip past first |
| 9655 | word */ |
| 9656 | to_slim = phba->MBslimaddr + sizeof (uint32_t); |
| 9657 | lpfc_memcpy_to_slim(dest: to_slim, src: &mbx->un.varWords[0], |
| 9658 | MAILBOX_CMD_SIZE - sizeof (uint32_t)); |
| 9659 | |
| 9660 | /* Next copy over first word, with mbxOwner set */ |
| 9661 | ldata = *((uint32_t *)mbx); |
| 9662 | to_slim = phba->MBslimaddr; |
| 9663 | writel(val: ldata, addr: to_slim); |
| 9664 | readl(addr: to_slim); /* flush */ |
| 9665 | |
| 9666 | if (mbx->mbxCommand == MBX_CONFIG_PORT) |
| 9667 | /* switch over to host mailbox */ |
| 9668 | psli->sli_flag |= LPFC_SLI_ACTIVE; |
| 9669 | } |
| 9670 | |
| 9671 | wmb(); |
| 9672 | |
| 9673 | switch (flag) { |
| 9674 | case MBX_NOWAIT: |
| 9675 | /* Set up reference to mailbox command */ |
| 9676 | psli->mbox_active = pmbox; |
| 9677 | /* Interrupt board to do it */ |
| 9678 | writel(CA_MBATT, addr: phba->CAregaddr); |
| 9679 | readl(addr: phba->CAregaddr); /* flush */ |
| 9680 | /* Don't wait for it to finish, just return */ |
| 9681 | break; |
| 9682 | |
| 9683 | case MBX_POLL: |
| 9684 | /* Set up null reference to mailbox command */ |
| 9685 | psli->mbox_active = NULL; |
| 9686 | /* Interrupt board to do it */ |
| 9687 | writel(CA_MBATT, addr: phba->CAregaddr); |
| 9688 | readl(addr: phba->CAregaddr); /* flush */ |
| 9689 | |
| 9690 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
| 9691 | /* First read mbox status word */ |
| 9692 | word0 = *((uint32_t *)phba->mbox); |
| 9693 | word0 = le32_to_cpu(word0); |
| 9694 | } else { |
| 9695 | /* First read mbox status word */ |
| 9696 | if (lpfc_readl(addr: phba->MBslimaddr, data: &word0)) { |
| 9697 | spin_unlock_irqrestore(lock: &phba->hbalock, |
| 9698 | flags: drvr_flag); |
| 9699 | goto out_not_finished; |
| 9700 | } |
| 9701 | } |
| 9702 | |
| 9703 | /* Read the HBA Host Attention Register */ |
| 9704 | if (lpfc_readl(addr: phba->HAregaddr, data: &ha_copy)) { |
| 9705 | spin_unlock_irqrestore(lock: &phba->hbalock, |
| 9706 | flags: drvr_flag); |
| 9707 | goto out_not_finished; |
| 9708 | } |
| 9709 | timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox)) + jiffies; |
| 9710 | i = 0; |
| 9711 | /* Wait for command to complete */ |
| 9712 | while (((word0 & OWN_CHIP) == OWN_CHIP) || |
| 9713 | (!(ha_copy & HA_MBATT) && |
| 9714 | (phba->link_state > LPFC_WARM_START))) { |
| 9715 | if (time_after(jiffies, timeout)) { |
| 9716 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
| 9717 | spin_unlock_irqrestore(lock: &phba->hbalock, |
| 9718 | flags: drvr_flag); |
| 9719 | goto out_not_finished; |
| 9720 | } |
| 9721 | |
| 9722 | /* Check if we took a mbox interrupt while we were |
| 9723 | polling */ |
| 9724 | if (((word0 & OWN_CHIP) != OWN_CHIP) |
| 9725 | && (evtctr != psli->slistat.mbox_event)) |
| 9726 | break; |
| 9727 | |
| 9728 | if (i++ > 10) { |
| 9729 | spin_unlock_irqrestore(lock: &phba->hbalock, |
| 9730 | flags: drvr_flag); |
| 9731 | msleep(msecs: 1); |
| 9732 | spin_lock_irqsave(&phba->hbalock, drvr_flag); |
| 9733 | } |
| 9734 | |
| 9735 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
| 9736 | /* First copy command data */ |
| 9737 | word0 = *((uint32_t *)phba->mbox); |
| 9738 | word0 = le32_to_cpu(word0); |
| 9739 | if (mbx->mbxCommand == MBX_CONFIG_PORT) { |
| 9740 | MAILBOX_t *slimmb; |
| 9741 | uint32_t slimword0; |
| 9742 | /* Check real SLIM for any errors */ |
| 9743 | slimword0 = readl(addr: phba->MBslimaddr); |
| 9744 | slimmb = (MAILBOX_t *) & slimword0; |
| 9745 | if (((slimword0 & OWN_CHIP) != OWN_CHIP) |
| 9746 | && slimmb->mbxStatus) { |
| 9747 | psli->sli_flag &= |
| 9748 | ~LPFC_SLI_ACTIVE; |
| 9749 | word0 = slimword0; |
| 9750 | } |
| 9751 | } |
| 9752 | } else { |
| 9753 | /* First copy command data */ |
| 9754 | word0 = readl(addr: phba->MBslimaddr); |
| 9755 | } |
| 9756 | /* Read the HBA Host Attention Register */ |
| 9757 | if (lpfc_readl(addr: phba->HAregaddr, data: &ha_copy)) { |
| 9758 | spin_unlock_irqrestore(lock: &phba->hbalock, |
| 9759 | flags: drvr_flag); |
| 9760 | goto out_not_finished; |
| 9761 | } |
| 9762 | } |
| 9763 | |
| 9764 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
| 9765 | /* copy results back to user */ |
| 9766 | lpfc_sli_pcimem_bcopy(phba->mbox, mbx, |
| 9767 | MAILBOX_CMD_SIZE); |
| 9768 | /* Copy the mailbox extension data */ |
| 9769 | if (pmbox->out_ext_byte_len && pmbox->ext_buf) { |
| 9770 | lpfc_sli_pcimem_bcopy(phba->mbox_ext, |
| 9771 | pmbox->ext_buf, |
| 9772 | pmbox->out_ext_byte_len); |
| 9773 | } |
| 9774 | } else { |
| 9775 | /* First copy command data */ |
| 9776 | lpfc_memcpy_from_slim(dest: mbx, src: phba->MBslimaddr, |
| 9777 | MAILBOX_CMD_SIZE); |
| 9778 | /* Copy the mailbox extension data */ |
| 9779 | if (pmbox->out_ext_byte_len && pmbox->ext_buf) { |
| 9780 | lpfc_memcpy_from_slim( |
| 9781 | dest: pmbox->ext_buf, |
| 9782 | src: phba->MBslimaddr + |
| 9783 | MAILBOX_HBA_EXT_OFFSET, |
| 9784 | bytes: pmbox->out_ext_byte_len); |
| 9785 | } |
| 9786 | } |
| 9787 | |
| 9788 | writel(HA_MBATT, addr: phba->HAregaddr); |
| 9789 | readl(addr: phba->HAregaddr); /* flush */ |
| 9790 | |
| 9791 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
| 9792 | status = mbx->mbxStatus; |
| 9793 | } |
| 9794 | |
| 9795 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: drvr_flag); |
| 9796 | return status; |
| 9797 | |
| 9798 | out_not_finished: |
| 9799 | if (processing_queue) { |
| 9800 | pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; |
| 9801 | lpfc_mbox_cmpl_put(phba, pmbox); |
| 9802 | } |
| 9803 | return MBX_NOT_FINISHED; |
| 9804 | } |
| 9805 | |
| 9806 | /** |
| 9807 | * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command |
| 9808 | * @phba: Pointer to HBA context object. |
| 9809 | * |
| 9810 | * The function blocks the posting of SLI4 asynchronous mailbox commands from |
| 9811 | * the driver internal pending mailbox queue. It will then try to wait out the |
| 9812 | * possible outstanding mailbox command before return. |
| 9813 | * |
| 9814 | * Returns: |
| 9815 | * 0 - the outstanding mailbox command completed; otherwise, the wait for |
| 9816 | * the outstanding mailbox command timed out. |
| 9817 | **/ |
| 9818 | static int |
| 9819 | lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) |
| 9820 | { |
| 9821 | struct lpfc_sli *psli = &phba->sli; |
| 9822 | LPFC_MBOXQ_t *mboxq; |
| 9823 | int rc = 0; |
| 9824 | unsigned long timeout = 0; |
| 9825 | u32 sli_flag; |
| 9826 | u8 cmd, subsys, opcode; |
| 9827 | |
| 9828 | /* Mark the asynchronous mailbox command posting as blocked */ |
| 9829 | spin_lock_irq(lock: &phba->hbalock); |
| 9830 | psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; |
| 9831 | /* Determine how long we might wait for the active mailbox |
| 9832 | * command to be gracefully completed by firmware. |
| 9833 | */ |
| 9834 | if (phba->sli.mbox_active) |
| 9835 | timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, |
| 9836 | phba->sli.mbox_active)) + jiffies; |
| 9837 | spin_unlock_irq(lock: &phba->hbalock); |
| 9838 | |
| 9839 | /* Make sure the mailbox is really active */ |
| 9840 | if (timeout) |
| 9841 | lpfc_sli4_process_missed_mbox_completions(phba); |
| 9842 | |
| 9843 | /* Wait for the outstanding mailbox command to complete */ |
| 9844 | while (phba->sli.mbox_active) { |
| 9845 | /* Check active mailbox complete status every 2ms */ |
| 9846 | msleep(msecs: 2); |
| 9847 | if (time_after(jiffies, timeout)) { |
| 9848 | /* Timeout, mark the outstanding cmd not complete */ |
| 9849 | |
| 9850 | /* Sanity check sli.mbox_active has not completed or |
| 9851 | * cancelled from another context during last 2ms sleep, |
| 9852 | * so take hbalock to be sure before logging. |
| 9853 | */ |
| 9854 | spin_lock_irq(lock: &phba->hbalock); |
| 9855 | if (phba->sli.mbox_active) { |
| 9856 | mboxq = phba->sli.mbox_active; |
| 9857 | cmd = mboxq->u.mb.mbxCommand; |
| 9858 | subsys = lpfc_sli_config_mbox_subsys_get(phba, |
| 9859 | mboxq); |
| 9860 | opcode = lpfc_sli_config_mbox_opcode_get(phba, |
| 9861 | mboxq); |
| 9862 | sli_flag = psli->sli_flag; |
| 9863 | spin_unlock_irq(lock: &phba->hbalock); |
| 9864 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 9865 | "2352 Mailbox command x%x " |
| 9866 | "(x%x/x%x) sli_flag x%x could " |
| 9867 | "not complete\n" , |
| 9868 | cmd, subsys, opcode, |
| 9869 | sli_flag); |
| 9870 | } else { |
| 9871 | spin_unlock_irq(lock: &phba->hbalock); |
| 9872 | } |
| 9873 | |
| 9874 | rc = 1; |
| 9875 | break; |
| 9876 | } |
| 9877 | } |
| 9878 | |
| 9879 | /* Can not cleanly block async mailbox command, fails it */ |
| 9880 | if (rc) { |
| 9881 | spin_lock_irq(lock: &phba->hbalock); |
| 9882 | psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; |
| 9883 | spin_unlock_irq(lock: &phba->hbalock); |
| 9884 | } |
| 9885 | return rc; |
| 9886 | } |
| 9887 | |
| 9888 | /** |
| 9889 | * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command |
| 9890 | * @phba: Pointer to HBA context object. |
| 9891 | * |
| 9892 | * The function unblocks and resume posting of SLI4 asynchronous mailbox |
| 9893 | * commands from the driver internal pending mailbox queue. It makes sure |
| 9894 | * that there is no outstanding mailbox command before resuming posting |
| 9895 | * asynchronous mailbox commands. If, for any reason, there is outstanding |
| 9896 | * mailbox command, it will try to wait it out before resuming asynchronous |
| 9897 | * mailbox command posting. |
| 9898 | **/ |
| 9899 | static void |
| 9900 | lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) |
| 9901 | { |
| 9902 | struct lpfc_sli *psli = &phba->sli; |
| 9903 | |
| 9904 | spin_lock_irq(lock: &phba->hbalock); |
| 9905 | if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { |
| 9906 | /* Asynchronous mailbox posting is not blocked, do nothing */ |
| 9907 | spin_unlock_irq(lock: &phba->hbalock); |
| 9908 | return; |
| 9909 | } |
| 9910 | |
| 9911 | /* Outstanding synchronous mailbox command is guaranteed to be done, |
| 9912 | * successful or timeout, after timing-out the outstanding mailbox |
| 9913 | * command shall always be removed, so just unblock posting async |
| 9914 | * mailbox command and resume |
| 9915 | */ |
| 9916 | psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; |
| 9917 | spin_unlock_irq(lock: &phba->hbalock); |
| 9918 | |
| 9919 | /* wake up worker thread to post asynchronous mailbox command */ |
| 9920 | lpfc_worker_wake_up(phba); |
| 9921 | } |
| 9922 | |
| 9923 | /** |
| 9924 | * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready |
| 9925 | * @phba: Pointer to HBA context object. |
| 9926 | * @mboxq: Pointer to mailbox object. |
| 9927 | * |
| 9928 | * The function waits for the bootstrap mailbox register ready bit from |
| 9929 | * port for twice the regular mailbox command timeout value. |
| 9930 | * |
| 9931 | * 0 - no timeout on waiting for bootstrap mailbox register ready. |
| 9932 | * MBXERR_ERROR - wait for bootstrap mailbox register timed out or port |
| 9933 | * is in an unrecoverable state. |
| 9934 | **/ |
| 9935 | static int |
| 9936 | lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
| 9937 | { |
| 9938 | uint32_t db_ready; |
| 9939 | unsigned long timeout; |
| 9940 | struct lpfc_register bmbx_reg; |
| 9941 | struct lpfc_register portstat_reg = {-1}; |
| 9942 | |
| 9943 | /* Sanity check - there is no point to wait if the port is in an |
| 9944 | * unrecoverable state. |
| 9945 | */ |
| 9946 | if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= |
| 9947 | LPFC_SLI_INTF_IF_TYPE_2) { |
| 9948 | if (lpfc_readl(addr: phba->sli4_hba.u.if_type2.STATUSregaddr, |
| 9949 | data: &portstat_reg.word0) || |
| 9950 | lpfc_sli4_unrecoverable_port(portstat_reg: &portstat_reg)) { |
| 9951 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 9952 | "3858 Skipping bmbx ready because " |
| 9953 | "Port Status x%x\n" , |
| 9954 | portstat_reg.word0); |
| 9955 | return MBXERR_ERROR; |
| 9956 | } |
| 9957 | } |
| 9958 | |
| 9959 | timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)) + jiffies; |
| 9960 | |
| 9961 | do { |
| 9962 | bmbx_reg.word0 = readl(addr: phba->sli4_hba.BMBXregaddr); |
| 9963 | db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); |
| 9964 | if (!db_ready) |
| 9965 | mdelay(2); |
| 9966 | |
| 9967 | if (time_after(jiffies, timeout)) |
| 9968 | return MBXERR_ERROR; |
| 9969 | } while (!db_ready); |
| 9970 | |
| 9971 | return 0; |
| 9972 | } |
| 9973 | |
| 9974 | /** |
| 9975 | * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox |
| 9976 | * @phba: Pointer to HBA context object. |
| 9977 | * @mboxq: Pointer to mailbox object. |
| 9978 | * |
| 9979 | * The function posts a mailbox to the port. The mailbox is expected |
| 9980 | * to be comletely filled in and ready for the port to operate on it. |
| 9981 | * This routine executes a synchronous completion operation on the |
| 9982 | * mailbox by polling for its completion. |
| 9983 | * |
| 9984 | * The caller must not be holding any locks when calling this routine. |
| 9985 | * |
| 9986 | * Returns: |
| 9987 | * MBX_SUCCESS - mailbox posted successfully |
| 9988 | * Any of the MBX error values. |
| 9989 | **/ |
| 9990 | static int |
| 9991 | lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
| 9992 | { |
| 9993 | int rc = MBX_SUCCESS; |
| 9994 | unsigned long iflag; |
| 9995 | uint32_t mcqe_status; |
| 9996 | uint32_t mbx_cmnd; |
| 9997 | struct lpfc_sli *psli = &phba->sli; |
| 9998 | struct lpfc_mqe *mb = &mboxq->u.mqe; |
| 9999 | struct lpfc_bmbx_create *mbox_rgn; |
| 10000 | struct dma_address *dma_address; |
| 10001 | |
| 10002 | /* |
| 10003 | * Only one mailbox can be active to the bootstrap mailbox region |
| 10004 | * at a time and there is no queueing provided. |
| 10005 | */ |
| 10006 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 10007 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { |
| 10008 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 10009 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 10010 | "(%d):2532 Mailbox command x%x (x%x/x%x) " |
| 10011 | "cannot issue Data: x%x x%x\n" , |
| 10012 | mboxq->vport ? mboxq->vport->vpi : 0, |
| 10013 | mboxq->u.mb.mbxCommand, |
| 10014 | lpfc_sli_config_mbox_subsys_get(phba, mboxq), |
| 10015 | lpfc_sli_config_mbox_opcode_get(phba, mboxq), |
| 10016 | psli->sli_flag, MBX_POLL); |
| 10017 | return MBXERR_ERROR; |
| 10018 | } |
| 10019 | /* The server grabs the token and owns it until release */ |
| 10020 | psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; |
| 10021 | phba->sli.mbox_active = mboxq; |
| 10022 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 10023 | |
| 10024 | /* wait for bootstrap mbox register for readyness */ |
| 10025 | rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); |
| 10026 | if (rc) |
| 10027 | goto exit; |
| 10028 | /* |
| 10029 | * Initialize the bootstrap memory region to avoid stale data areas |
| 10030 | * in the mailbox post. Then copy the caller's mailbox contents to |
| 10031 | * the bmbx mailbox region. |
| 10032 | */ |
| 10033 | mbx_cmnd = bf_get(lpfc_mqe_command, mb); |
| 10034 | memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); |
| 10035 | lpfc_sli4_pcimem_bcopy(srcp: mb, destp: phba->sli4_hba.bmbx.avirt, |
| 10036 | cnt: sizeof(struct lpfc_mqe)); |
| 10037 | |
| 10038 | /* Post the high mailbox dma address to the port and wait for ready. */ |
| 10039 | dma_address = &phba->sli4_hba.bmbx.dma_address; |
| 10040 | writel(val: dma_address->addr_hi, addr: phba->sli4_hba.BMBXregaddr); |
| 10041 | |
| 10042 | /* wait for bootstrap mbox register for hi-address write done */ |
| 10043 | rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); |
| 10044 | if (rc) |
| 10045 | goto exit; |
| 10046 | |
| 10047 | /* Post the low mailbox dma address to the port. */ |
| 10048 | writel(val: dma_address->addr_lo, addr: phba->sli4_hba.BMBXregaddr); |
| 10049 | |
| 10050 | /* wait for bootstrap mbox register for low address write done */ |
| 10051 | rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); |
| 10052 | if (rc) |
| 10053 | goto exit; |
| 10054 | |
| 10055 | /* |
| 10056 | * Read the CQ to ensure the mailbox has completed. |
| 10057 | * If so, update the mailbox status so that the upper layers |
| 10058 | * can complete the request normally. |
| 10059 | */ |
| 10060 | lpfc_sli4_pcimem_bcopy(srcp: phba->sli4_hba.bmbx.avirt, destp: mb, |
| 10061 | cnt: sizeof(struct lpfc_mqe)); |
| 10062 | mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; |
| 10063 | lpfc_sli4_pcimem_bcopy(srcp: &mbox_rgn->mcqe, destp: &mboxq->mcqe, |
| 10064 | cnt: sizeof(struct lpfc_mcqe)); |
| 10065 | mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); |
| 10066 | /* |
| 10067 | * When the CQE status indicates a failure and the mailbox status |
| 10068 | * indicates success then copy the CQE status into the mailbox status |
| 10069 | * (and prefix it with x4000). |
| 10070 | */ |
| 10071 | if (mcqe_status != MB_CQE_STATUS_SUCCESS) { |
| 10072 | if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) |
| 10073 | bf_set(lpfc_mqe_status, mb, |
| 10074 | (LPFC_MBX_ERROR_RANGE | mcqe_status)); |
| 10075 | rc = MBXERR_ERROR; |
| 10076 | } else |
| 10077 | lpfc_sli4_swap_str(phba, mboxq); |
| 10078 | |
| 10079 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
| 10080 | "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " |
| 10081 | "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" |
| 10082 | " x%x x%x CQ: x%x x%x x%x x%x\n" , |
| 10083 | mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, |
| 10084 | lpfc_sli_config_mbox_subsys_get(phba, mboxq), |
| 10085 | lpfc_sli_config_mbox_opcode_get(phba, mboxq), |
| 10086 | bf_get(lpfc_mqe_status, mb), |
| 10087 | mb->un.mb_words[0], mb->un.mb_words[1], |
| 10088 | mb->un.mb_words[2], mb->un.mb_words[3], |
| 10089 | mb->un.mb_words[4], mb->un.mb_words[5], |
| 10090 | mb->un.mb_words[6], mb->un.mb_words[7], |
| 10091 | mb->un.mb_words[8], mb->un.mb_words[9], |
| 10092 | mb->un.mb_words[10], mb->un.mb_words[11], |
| 10093 | mb->un.mb_words[12], mboxq->mcqe.word0, |
| 10094 | mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, |
| 10095 | mboxq->mcqe.trailer); |
| 10096 | exit: |
| 10097 | /* We are holding the token, no needed for lock when release */ |
| 10098 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 10099 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
| 10100 | phba->sli.mbox_active = NULL; |
| 10101 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 10102 | return rc; |
| 10103 | } |
| 10104 | |
| 10105 | /** |
| 10106 | * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware |
| 10107 | * @phba: Pointer to HBA context object. |
| 10108 | * @mboxq: Pointer to mailbox object. |
| 10109 | * @flag: Flag indicating how the mailbox need to be processed. |
| 10110 | * |
| 10111 | * This function is called by discovery code and HBA management code to submit |
| 10112 | * a mailbox command to firmware with SLI-4 interface spec. |
| 10113 | * |
| 10114 | * Return codes the caller owns the mailbox command after the return of the |
| 10115 | * function. |
| 10116 | **/ |
| 10117 | static int |
| 10118 | lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, |
| 10119 | uint32_t flag) |
| 10120 | { |
| 10121 | struct lpfc_sli *psli = &phba->sli; |
| 10122 | unsigned long iflags; |
| 10123 | int rc; |
| 10124 | |
| 10125 | /* dump from issue mailbox command if setup */ |
| 10126 | lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); |
| 10127 | |
| 10128 | rc = lpfc_mbox_dev_check(phba); |
| 10129 | if (unlikely(rc)) { |
| 10130 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 10131 | "(%d):2544 Mailbox command x%x (x%x/x%x) " |
| 10132 | "cannot issue Data: x%x x%x\n" , |
| 10133 | mboxq->vport ? mboxq->vport->vpi : 0, |
| 10134 | mboxq->u.mb.mbxCommand, |
| 10135 | lpfc_sli_config_mbox_subsys_get(phba, mboxq), |
| 10136 | lpfc_sli_config_mbox_opcode_get(phba, mboxq), |
| 10137 | psli->sli_flag, flag); |
| 10138 | goto out_not_finished; |
| 10139 | } |
| 10140 | |
| 10141 | /* Detect polling mode and jump to a handler */ |
| 10142 | if (!phba->sli4_hba.intr_enable) { |
| 10143 | if (flag == MBX_POLL) |
| 10144 | rc = lpfc_sli4_post_sync_mbox(phba, mboxq); |
| 10145 | else |
| 10146 | rc = -EIO; |
| 10147 | if (rc != MBX_SUCCESS) |
| 10148 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, |
| 10149 | "(%d):2541 Mailbox command x%x " |
| 10150 | "(x%x/x%x) failure: " |
| 10151 | "mqe_sta: x%x mcqe_sta: x%x/x%x " |
| 10152 | "Data: x%x x%x\n" , |
| 10153 | mboxq->vport ? mboxq->vport->vpi : 0, |
| 10154 | mboxq->u.mb.mbxCommand, |
| 10155 | lpfc_sli_config_mbox_subsys_get(phba, |
| 10156 | mboxq), |
| 10157 | lpfc_sli_config_mbox_opcode_get(phba, |
| 10158 | mboxq), |
| 10159 | bf_get(lpfc_mqe_status, &mboxq->u.mqe), |
| 10160 | bf_get(lpfc_mcqe_status, &mboxq->mcqe), |
| 10161 | bf_get(lpfc_mcqe_ext_status, |
| 10162 | &mboxq->mcqe), |
| 10163 | psli->sli_flag, flag); |
| 10164 | return rc; |
| 10165 | } else if (flag == MBX_POLL) { |
| 10166 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, |
| 10167 | "(%d):2542 Try to issue mailbox command " |
| 10168 | "x%x (x%x/x%x) synchronously ahead of async " |
| 10169 | "mailbox command queue: x%x x%x\n" , |
| 10170 | mboxq->vport ? mboxq->vport->vpi : 0, |
| 10171 | mboxq->u.mb.mbxCommand, |
| 10172 | lpfc_sli_config_mbox_subsys_get(phba, mboxq), |
| 10173 | lpfc_sli_config_mbox_opcode_get(phba, mboxq), |
| 10174 | psli->sli_flag, flag); |
| 10175 | /* Try to block the asynchronous mailbox posting */ |
| 10176 | rc = lpfc_sli4_async_mbox_block(phba); |
| 10177 | if (!rc) { |
| 10178 | /* Successfully blocked, now issue sync mbox cmd */ |
| 10179 | rc = lpfc_sli4_post_sync_mbox(phba, mboxq); |
| 10180 | if (rc != MBX_SUCCESS) |
| 10181 | lpfc_printf_log(phba, KERN_WARNING, |
| 10182 | LOG_MBOX | LOG_SLI, |
| 10183 | "(%d):2597 Sync Mailbox command " |
| 10184 | "x%x (x%x/x%x) failure: " |
| 10185 | "mqe_sta: x%x mcqe_sta: x%x/x%x " |
| 10186 | "Data: x%x x%x\n" , |
| 10187 | mboxq->vport ? mboxq->vport->vpi : 0, |
| 10188 | mboxq->u.mb.mbxCommand, |
| 10189 | lpfc_sli_config_mbox_subsys_get(phba, |
| 10190 | mboxq), |
| 10191 | lpfc_sli_config_mbox_opcode_get(phba, |
| 10192 | mboxq), |
| 10193 | bf_get(lpfc_mqe_status, &mboxq->u.mqe), |
| 10194 | bf_get(lpfc_mcqe_status, &mboxq->mcqe), |
| 10195 | bf_get(lpfc_mcqe_ext_status, |
| 10196 | &mboxq->mcqe), |
| 10197 | psli->sli_flag, flag); |
| 10198 | /* Unblock the async mailbox posting afterward */ |
| 10199 | lpfc_sli4_async_mbox_unblock(phba); |
| 10200 | } |
| 10201 | return rc; |
| 10202 | } |
| 10203 | |
| 10204 | /* Now, interrupt mode asynchronous mailbox command */ |
| 10205 | rc = lpfc_mbox_cmd_check(phba, mboxq); |
| 10206 | if (rc) { |
| 10207 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 10208 | "(%d):2543 Mailbox command x%x (x%x/x%x) " |
| 10209 | "cannot issue Data: x%x x%x\n" , |
| 10210 | mboxq->vport ? mboxq->vport->vpi : 0, |
| 10211 | mboxq->u.mb.mbxCommand, |
| 10212 | lpfc_sli_config_mbox_subsys_get(phba, mboxq), |
| 10213 | lpfc_sli_config_mbox_opcode_get(phba, mboxq), |
| 10214 | psli->sli_flag, flag); |
| 10215 | goto out_not_finished; |
| 10216 | } |
| 10217 | |
| 10218 | /* Put the mailbox command to the driver internal FIFO */ |
| 10219 | psli->slistat.mbox_busy++; |
| 10220 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 10221 | lpfc_mbox_put(phba, mboxq); |
| 10222 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 10223 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
| 10224 | "(%d):0354 Mbox cmd issue - Enqueue Data: " |
| 10225 | "x%x (x%x/x%x) x%x x%x x%x x%x\n" , |
| 10226 | mboxq->vport ? mboxq->vport->vpi : 0xffffff, |
| 10227 | bf_get(lpfc_mqe_command, &mboxq->u.mqe), |
| 10228 | lpfc_sli_config_mbox_subsys_get(phba, mboxq), |
| 10229 | lpfc_sli_config_mbox_opcode_get(phba, mboxq), |
| 10230 | mboxq->u.mb.un.varUnregLogin.rpi, |
| 10231 | phba->pport->port_state, |
| 10232 | psli->sli_flag, MBX_NOWAIT); |
| 10233 | /* Wake up worker thread to transport mailbox command from head */ |
| 10234 | lpfc_worker_wake_up(phba); |
| 10235 | |
| 10236 | return MBX_BUSY; |
| 10237 | |
| 10238 | out_not_finished: |
| 10239 | return MBX_NOT_FINISHED; |
| 10240 | } |
| 10241 | |
| 10242 | /** |
| 10243 | * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device |
| 10244 | * @phba: Pointer to HBA context object. |
| 10245 | * |
| 10246 | * This function is called by worker thread to send a mailbox command to |
| 10247 | * SLI4 HBA firmware. |
| 10248 | * |
| 10249 | **/ |
| 10250 | int |
| 10251 | lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) |
| 10252 | { |
| 10253 | struct lpfc_sli *psli = &phba->sli; |
| 10254 | LPFC_MBOXQ_t *mboxq; |
| 10255 | int rc = MBX_SUCCESS; |
| 10256 | unsigned long iflags; |
| 10257 | struct lpfc_mqe *mqe; |
| 10258 | uint32_t mbx_cmnd; |
| 10259 | |
| 10260 | /* Check interrupt mode before post async mailbox command */ |
| 10261 | if (unlikely(!phba->sli4_hba.intr_enable)) |
| 10262 | return MBX_NOT_FINISHED; |
| 10263 | |
| 10264 | /* Check for mailbox command service token */ |
| 10265 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 10266 | if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { |
| 10267 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 10268 | return MBX_NOT_FINISHED; |
| 10269 | } |
| 10270 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { |
| 10271 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 10272 | return MBX_NOT_FINISHED; |
| 10273 | } |
| 10274 | if (unlikely(phba->sli.mbox_active)) { |
| 10275 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 10276 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 10277 | "0384 There is pending active mailbox cmd\n" ); |
| 10278 | return MBX_NOT_FINISHED; |
| 10279 | } |
| 10280 | /* Take the mailbox command service token */ |
| 10281 | psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; |
| 10282 | |
| 10283 | /* Get the next mailbox command from head of queue */ |
| 10284 | mboxq = lpfc_mbox_get(phba); |
| 10285 | |
| 10286 | /* If no more mailbox command waiting for post, we're done */ |
| 10287 | if (!mboxq) { |
| 10288 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
| 10289 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 10290 | return MBX_SUCCESS; |
| 10291 | } |
| 10292 | phba->sli.mbox_active = mboxq; |
| 10293 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 10294 | |
| 10295 | /* Check device readiness for posting mailbox command */ |
| 10296 | rc = lpfc_mbox_dev_check(phba); |
| 10297 | if (unlikely(rc)) |
| 10298 | /* Driver clean routine will clean up pending mailbox */ |
| 10299 | goto out_not_finished; |
| 10300 | |
| 10301 | /* Prepare the mbox command to be posted */ |
| 10302 | mqe = &mboxq->u.mqe; |
| 10303 | mbx_cmnd = bf_get(lpfc_mqe_command, mqe); |
| 10304 | |
| 10305 | /* Start timer for the mbox_tmo and log some mailbox post messages */ |
| 10306 | mod_timer(timer: &psli->mbox_tmo, expires: (jiffies + |
| 10307 | secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)))); |
| 10308 | |
| 10309 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
| 10310 | "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " |
| 10311 | "x%x x%x\n" , |
| 10312 | mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, |
| 10313 | lpfc_sli_config_mbox_subsys_get(phba, mboxq), |
| 10314 | lpfc_sli_config_mbox_opcode_get(phba, mboxq), |
| 10315 | phba->pport->port_state, psli->sli_flag); |
| 10316 | |
| 10317 | if (mbx_cmnd != MBX_HEARTBEAT) { |
| 10318 | if (mboxq->vport) { |
| 10319 | lpfc_debugfs_disc_trc(mboxq->vport, |
| 10320 | LPFC_DISC_TRC_MBOX_VPORT, |
| 10321 | "MBOX Send vport: cmd:x%x mb:x%x x%x" , |
| 10322 | mbx_cmnd, mqe->un.mb_words[0], |
| 10323 | mqe->un.mb_words[1]); |
| 10324 | } else { |
| 10325 | lpfc_debugfs_disc_trc(phba->pport, |
| 10326 | LPFC_DISC_TRC_MBOX, |
| 10327 | "MBOX Send: cmd:x%x mb:x%x x%x" , |
| 10328 | mbx_cmnd, mqe->un.mb_words[0], |
| 10329 | mqe->un.mb_words[1]); |
| 10330 | } |
| 10331 | } |
| 10332 | psli->slistat.mbox_cmd++; |
| 10333 | |
| 10334 | /* Post the mailbox command to the port */ |
| 10335 | rc = lpfc_sli4_mq_put(q: phba->sli4_hba.mbx_wq, mqe); |
| 10336 | if (rc != MBX_SUCCESS) { |
| 10337 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 10338 | "(%d):2533 Mailbox command x%x (x%x/x%x) " |
| 10339 | "cannot issue Data: x%x x%x\n" , |
| 10340 | mboxq->vport ? mboxq->vport->vpi : 0, |
| 10341 | mboxq->u.mb.mbxCommand, |
| 10342 | lpfc_sli_config_mbox_subsys_get(phba, mboxq), |
| 10343 | lpfc_sli_config_mbox_opcode_get(phba, mboxq), |
| 10344 | psli->sli_flag, MBX_NOWAIT); |
| 10345 | goto out_not_finished; |
| 10346 | } |
| 10347 | |
| 10348 | return rc; |
| 10349 | |
| 10350 | out_not_finished: |
| 10351 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 10352 | if (phba->sli.mbox_active) { |
| 10353 | mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; |
| 10354 | __lpfc_mbox_cmpl_put(phba, mboxq); |
| 10355 | /* Release the token */ |
| 10356 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
| 10357 | phba->sli.mbox_active = NULL; |
| 10358 | } |
| 10359 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 10360 | |
| 10361 | return MBX_NOT_FINISHED; |
| 10362 | } |
| 10363 | |
| 10364 | /** |
| 10365 | * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command |
| 10366 | * @phba: Pointer to HBA context object. |
| 10367 | * @pmbox: Pointer to mailbox object. |
| 10368 | * @flag: Flag indicating how the mailbox need to be processed. |
| 10369 | * |
| 10370 | * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from |
| 10371 | * the API jump table function pointer from the lpfc_hba struct. |
| 10372 | * |
| 10373 | * Return codes the caller owns the mailbox command after the return of the |
| 10374 | * function. |
| 10375 | **/ |
| 10376 | int |
| 10377 | lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) |
| 10378 | { |
| 10379 | return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); |
| 10380 | } |
| 10381 | |
| 10382 | /** |
| 10383 | * lpfc_mbox_api_table_setup - Set up mbox api function jump table |
| 10384 | * @phba: The hba struct for which this call is being executed. |
| 10385 | * @dev_grp: The HBA PCI-Device group number. |
| 10386 | * |
| 10387 | * This routine sets up the mbox interface API function jump table in @phba |
| 10388 | * struct. |
| 10389 | * Returns: 0 - success, -ENODEV - failure. |
| 10390 | **/ |
| 10391 | int |
| 10392 | lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) |
| 10393 | { |
| 10394 | |
| 10395 | switch (dev_grp) { |
| 10396 | case LPFC_PCI_DEV_LP: |
| 10397 | phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; |
| 10398 | phba->lpfc_sli_handle_slow_ring_event = |
| 10399 | lpfc_sli_handle_slow_ring_event_s3; |
| 10400 | phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; |
| 10401 | phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; |
| 10402 | phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; |
| 10403 | break; |
| 10404 | case LPFC_PCI_DEV_OC: |
| 10405 | phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; |
| 10406 | phba->lpfc_sli_handle_slow_ring_event = |
| 10407 | lpfc_sli_handle_slow_ring_event_s4; |
| 10408 | phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; |
| 10409 | phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; |
| 10410 | phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; |
| 10411 | break; |
| 10412 | default: |
| 10413 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 10414 | "1420 Invalid HBA PCI-device group: 0x%x\n" , |
| 10415 | dev_grp); |
| 10416 | return -ENODEV; |
| 10417 | } |
| 10418 | return 0; |
| 10419 | } |
| 10420 | |
| 10421 | /** |
| 10422 | * __lpfc_sli_ringtx_put - Add an iocb to the txq |
| 10423 | * @phba: Pointer to HBA context object. |
| 10424 | * @pring: Pointer to driver SLI ring object. |
| 10425 | * @piocb: Pointer to address of newly added command iocb. |
| 10426 | * |
| 10427 | * This function is called with hbalock held for SLI3 ports or |
| 10428 | * the ring lock held for SLI4 ports to add a command |
| 10429 | * iocb to the txq when SLI layer cannot submit the command iocb |
| 10430 | * to the ring. |
| 10431 | **/ |
| 10432 | void |
| 10433 | __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 10434 | struct lpfc_iocbq *piocb) |
| 10435 | { |
| 10436 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 10437 | lockdep_assert_held(&pring->ring_lock); |
| 10438 | else |
| 10439 | lockdep_assert_held(&phba->hbalock); |
| 10440 | /* Insert the caller's iocb in the txq tail for later processing. */ |
| 10441 | list_add_tail(new: &piocb->list, head: &pring->txq); |
| 10442 | } |
| 10443 | |
| 10444 | /** |
| 10445 | * lpfc_sli_next_iocb - Get the next iocb in the txq |
| 10446 | * @phba: Pointer to HBA context object. |
| 10447 | * @pring: Pointer to driver SLI ring object. |
| 10448 | * @piocb: Pointer to address of newly added command iocb. |
| 10449 | * |
| 10450 | * This function is called with hbalock held before a new |
| 10451 | * iocb is submitted to the firmware. This function checks |
| 10452 | * txq to flush the iocbs in txq to Firmware before |
| 10453 | * submitting new iocbs to the Firmware. |
| 10454 | * If there are iocbs in the txq which need to be submitted |
| 10455 | * to firmware, lpfc_sli_next_iocb returns the first element |
| 10456 | * of the txq after dequeuing it from txq. |
| 10457 | * If there is no iocb in the txq then the function will return |
| 10458 | * *piocb and *piocb is set to NULL. Caller needs to check |
| 10459 | * *piocb to find if there are more commands in the txq. |
| 10460 | **/ |
| 10461 | static struct lpfc_iocbq * |
| 10462 | lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 10463 | struct lpfc_iocbq **piocb) |
| 10464 | { |
| 10465 | struct lpfc_iocbq * nextiocb; |
| 10466 | |
| 10467 | lockdep_assert_held(&phba->hbalock); |
| 10468 | |
| 10469 | nextiocb = lpfc_sli_ringtx_get(phba, pring); |
| 10470 | if (!nextiocb) { |
| 10471 | nextiocb = *piocb; |
| 10472 | *piocb = NULL; |
| 10473 | } |
| 10474 | |
| 10475 | return nextiocb; |
| 10476 | } |
| 10477 | |
| 10478 | /** |
| 10479 | * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb |
| 10480 | * @phba: Pointer to HBA context object. |
| 10481 | * @ring_number: SLI ring number to issue iocb on. |
| 10482 | * @piocb: Pointer to command iocb. |
| 10483 | * @flag: Flag indicating if this command can be put into txq. |
| 10484 | * |
| 10485 | * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue |
| 10486 | * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is |
| 10487 | * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT |
| 10488 | * flag is turned on, the function returns IOCB_ERROR. When the link is down, |
| 10489 | * this function allows only iocbs for posting buffers. This function finds |
| 10490 | * next available slot in the command ring and posts the command to the |
| 10491 | * available slot and writes the port attention register to request HBA start |
| 10492 | * processing new iocb. If there is no slot available in the ring and |
| 10493 | * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise |
| 10494 | * the function returns IOCB_BUSY. |
| 10495 | * |
| 10496 | * This function is called with hbalock held. The function will return success |
| 10497 | * after it successfully submit the iocb to firmware or after adding to the |
| 10498 | * txq. |
| 10499 | **/ |
| 10500 | static int |
| 10501 | __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, |
| 10502 | struct lpfc_iocbq *piocb, uint32_t flag) |
| 10503 | { |
| 10504 | struct lpfc_iocbq *nextiocb; |
| 10505 | IOCB_t *iocb; |
| 10506 | struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; |
| 10507 | |
| 10508 | lockdep_assert_held(&phba->hbalock); |
| 10509 | |
| 10510 | if (piocb->cmd_cmpl && (!piocb->vport) && |
| 10511 | (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && |
| 10512 | (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { |
| 10513 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 10514 | "1807 IOCB x%x failed. No vport\n" , |
| 10515 | piocb->iocb.ulpCommand); |
| 10516 | dump_stack(); |
| 10517 | return IOCB_ERROR; |
| 10518 | } |
| 10519 | |
| 10520 | |
| 10521 | /* If the PCI channel is in offline state, do not post iocbs. */ |
| 10522 | if (unlikely(pci_channel_offline(phba->pcidev))) |
| 10523 | return IOCB_ERROR; |
| 10524 | |
| 10525 | /* If HBA has a deferred error attention, fail the iocb. */ |
| 10526 | if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) |
| 10527 | return IOCB_ERROR; |
| 10528 | |
| 10529 | /* |
| 10530 | * We should never get an IOCB if we are in a < LINK_DOWN state |
| 10531 | */ |
| 10532 | if (unlikely(phba->link_state < LPFC_LINK_DOWN)) |
| 10533 | return IOCB_ERROR; |
| 10534 | |
| 10535 | /* |
| 10536 | * Check to see if we are blocking IOCB processing because of a |
| 10537 | * outstanding event. |
| 10538 | */ |
| 10539 | if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) |
| 10540 | goto iocb_busy; |
| 10541 | |
| 10542 | if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { |
| 10543 | /* |
| 10544 | * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF |
| 10545 | * can be issued if the link is not up. |
| 10546 | */ |
| 10547 | switch (piocb->iocb.ulpCommand) { |
| 10548 | case CMD_QUE_RING_BUF_CN: |
| 10549 | case CMD_QUE_RING_BUF64_CN: |
| 10550 | /* |
| 10551 | * For IOCBs, like QUE_RING_BUF, that have no rsp ring |
| 10552 | * completion, cmd_cmpl MUST be 0. |
| 10553 | */ |
| 10554 | if (piocb->cmd_cmpl) |
| 10555 | piocb->cmd_cmpl = NULL; |
| 10556 | fallthrough; |
| 10557 | case CMD_CREATE_XRI_CR: |
| 10558 | case CMD_CLOSE_XRI_CN: |
| 10559 | case CMD_CLOSE_XRI_CX: |
| 10560 | break; |
| 10561 | default: |
| 10562 | goto iocb_busy; |
| 10563 | } |
| 10564 | |
| 10565 | /* |
| 10566 | * For FCP commands, we must be in a state where we can process link |
| 10567 | * attention events. |
| 10568 | */ |
| 10569 | } else if (unlikely(pring->ringno == LPFC_FCP_RING && |
| 10570 | !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { |
| 10571 | goto iocb_busy; |
| 10572 | } |
| 10573 | |
| 10574 | while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && |
| 10575 | (nextiocb = lpfc_sli_next_iocb(phba, pring, piocb: &piocb))) |
| 10576 | lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); |
| 10577 | |
| 10578 | if (iocb) |
| 10579 | lpfc_sli_update_ring(phba, pring); |
| 10580 | else |
| 10581 | lpfc_sli_update_full_ring(phba, pring); |
| 10582 | |
| 10583 | if (!piocb) |
| 10584 | return IOCB_SUCCESS; |
| 10585 | |
| 10586 | goto out_busy; |
| 10587 | |
| 10588 | iocb_busy: |
| 10589 | pring->stats.iocb_cmd_delay++; |
| 10590 | |
| 10591 | out_busy: |
| 10592 | |
| 10593 | if (!(flag & SLI_IOCB_RET_IOCB)) { |
| 10594 | __lpfc_sli_ringtx_put(phba, pring, piocb); |
| 10595 | return IOCB_SUCCESS; |
| 10596 | } |
| 10597 | |
| 10598 | return IOCB_BUSY; |
| 10599 | } |
| 10600 | |
| 10601 | /** |
| 10602 | * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb |
| 10603 | * @phba: Pointer to HBA context object. |
| 10604 | * @ring_number: SLI ring number to issue wqe on. |
| 10605 | * @piocb: Pointer to command iocb. |
| 10606 | * @flag: Flag indicating if this command can be put into txq. |
| 10607 | * |
| 10608 | * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to |
| 10609 | * send an iocb command to an HBA with SLI-3 interface spec. |
| 10610 | * |
| 10611 | * This function takes the hbalock before invoking the lockless version. |
| 10612 | * The function will return success after it successfully submit the wqe to |
| 10613 | * firmware or after adding to the txq. |
| 10614 | **/ |
| 10615 | static int |
| 10616 | __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number, |
| 10617 | struct lpfc_iocbq *piocb, uint32_t flag) |
| 10618 | { |
| 10619 | unsigned long iflags; |
| 10620 | int rc; |
| 10621 | |
| 10622 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 10623 | rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag); |
| 10624 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 10625 | |
| 10626 | return rc; |
| 10627 | } |
| 10628 | |
| 10629 | /** |
| 10630 | * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe |
| 10631 | * @phba: Pointer to HBA context object. |
| 10632 | * @ring_number: SLI ring number to issue wqe on. |
| 10633 | * @piocb: Pointer to command iocb. |
| 10634 | * @flag: Flag indicating if this command can be put into txq. |
| 10635 | * |
| 10636 | * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue |
| 10637 | * an wqe command to an HBA with SLI-4 interface spec. |
| 10638 | * |
| 10639 | * This function is a lockless version. The function will return success |
| 10640 | * after it successfully submit the wqe to firmware or after adding to the |
| 10641 | * txq. |
| 10642 | **/ |
| 10643 | static int |
| 10644 | __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number, |
| 10645 | struct lpfc_iocbq *piocb, uint32_t flag) |
| 10646 | { |
| 10647 | struct lpfc_io_buf *lpfc_cmd = piocb->io_buf; |
| 10648 | |
| 10649 | lpfc_prep_embed_io(phba, lpfc_ncmd: lpfc_cmd); |
| 10650 | return lpfc_sli4_issue_wqe(phba, qp: lpfc_cmd->hdwq, pwqe: piocb); |
| 10651 | } |
| 10652 | |
| 10653 | void |
| 10654 | lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) |
| 10655 | { |
| 10656 | struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq; |
| 10657 | union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe; |
| 10658 | struct sli4_sge_le *sgl; |
| 10659 | u32 type_size; |
| 10660 | |
| 10661 | /* 128 byte wqe support here */ |
| 10662 | sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl; |
| 10663 | |
| 10664 | if (phba->fcp_embed_io) { |
| 10665 | struct fcp_cmnd *fcp_cmnd; |
| 10666 | u32 *ptr; |
| 10667 | |
| 10668 | fcp_cmnd = lpfc_cmd->fcp_cmnd; |
| 10669 | |
| 10670 | /* Word 0-2 - FCP_CMND */ |
| 10671 | type_size = le32_to_cpu(sgl->sge_len); |
| 10672 | type_size |= ULP_BDE64_TYPE_BDE_IMMED; |
| 10673 | wqe->generic.bde.tus.w = type_size; |
| 10674 | wqe->generic.bde.addrHigh = 0; |
| 10675 | wqe->generic.bde.addrLow = 72; /* Word 18 */ |
| 10676 | |
| 10677 | bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); |
| 10678 | bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); |
| 10679 | |
| 10680 | /* Word 18-29 FCP CMND Payload */ |
| 10681 | ptr = &wqe->words[18]; |
| 10682 | lpfc_sli_pcimem_bcopy(fcp_cmnd, ptr, le32_to_cpu(sgl->sge_len)); |
| 10683 | } else { |
| 10684 | /* Word 0-2 - Inline BDE */ |
| 10685 | wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
| 10686 | wqe->generic.bde.tus.f.bdeSize = le32_to_cpu(sgl->sge_len); |
| 10687 | wqe->generic.bde.addrHigh = le32_to_cpu(sgl->addr_hi); |
| 10688 | wqe->generic.bde.addrLow = le32_to_cpu(sgl->addr_lo); |
| 10689 | |
| 10690 | /* Word 10 */ |
| 10691 | bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); |
| 10692 | bf_set(wqe_wqes, &wqe->generic.wqe_com, 0); |
| 10693 | } |
| 10694 | |
| 10695 | /* add the VMID tags as per switch response */ |
| 10696 | if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) { |
| 10697 | if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) { |
| 10698 | bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); |
| 10699 | bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, |
| 10700 | (piocb->vmid_tag.cs_ctl_vmid)); |
| 10701 | } else if (phba->cfg_vmid_app_header) { |
| 10702 | bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1); |
| 10703 | bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); |
| 10704 | wqe->words[31] = piocb->vmid_tag.app_id; |
| 10705 | } |
| 10706 | } |
| 10707 | } |
| 10708 | |
| 10709 | /** |
| 10710 | * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb |
| 10711 | * @phba: Pointer to HBA context object. |
| 10712 | * @ring_number: SLI ring number to issue iocb on. |
| 10713 | * @piocb: Pointer to command iocb. |
| 10714 | * @flag: Flag indicating if this command can be put into txq. |
| 10715 | * |
| 10716 | * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue |
| 10717 | * an iocb command to an HBA with SLI-4 interface spec. |
| 10718 | * |
| 10719 | * This function is called with ringlock held. The function will return success |
| 10720 | * after it successfully submit the iocb to firmware or after adding to the |
| 10721 | * txq. |
| 10722 | **/ |
| 10723 | static int |
| 10724 | __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, |
| 10725 | struct lpfc_iocbq *piocb, uint32_t flag) |
| 10726 | { |
| 10727 | struct lpfc_sglq *sglq; |
| 10728 | union lpfc_wqe128 *wqe; |
| 10729 | struct lpfc_queue *wq; |
| 10730 | struct lpfc_sli_ring *pring; |
| 10731 | u32 ulp_command = get_job_cmnd(phba, iocbq: piocb); |
| 10732 | |
| 10733 | /* Get the WQ */ |
| 10734 | if ((piocb->cmd_flag & LPFC_IO_FCP) || |
| 10735 | (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) { |
| 10736 | wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq; |
| 10737 | } else { |
| 10738 | wq = phba->sli4_hba.els_wq; |
| 10739 | } |
| 10740 | |
| 10741 | /* Get corresponding ring */ |
| 10742 | pring = wq->pring; |
| 10743 | |
| 10744 | /* |
| 10745 | * The WQE can be either 64 or 128 bytes, |
| 10746 | */ |
| 10747 | |
| 10748 | lockdep_assert_held(&pring->ring_lock); |
| 10749 | wqe = &piocb->wqe; |
| 10750 | if (piocb->sli4_xritag == NO_XRI) { |
| 10751 | if (ulp_command == CMD_ABORT_XRI_CX) |
| 10752 | sglq = NULL; |
| 10753 | else { |
| 10754 | sglq = __lpfc_sli_get_els_sglq(phba, piocbq: piocb); |
| 10755 | if (!sglq) { |
| 10756 | if (!(flag & SLI_IOCB_RET_IOCB)) { |
| 10757 | __lpfc_sli_ringtx_put(phba, |
| 10758 | pring, |
| 10759 | piocb); |
| 10760 | return IOCB_SUCCESS; |
| 10761 | } else { |
| 10762 | return IOCB_BUSY; |
| 10763 | } |
| 10764 | } |
| 10765 | } |
| 10766 | } else if (piocb->cmd_flag & LPFC_IO_FCP) { |
| 10767 | /* These IO's already have an XRI and a mapped sgl. */ |
| 10768 | sglq = NULL; |
| 10769 | } |
| 10770 | else { |
| 10771 | /* |
| 10772 | * This is a continuation of a commandi,(CX) so this |
| 10773 | * sglq is on the active list |
| 10774 | */ |
| 10775 | sglq = __lpfc_get_active_sglq(phba, xritag: piocb->sli4_lxritag); |
| 10776 | if (!sglq) |
| 10777 | return IOCB_ERROR; |
| 10778 | } |
| 10779 | |
| 10780 | if (sglq) { |
| 10781 | piocb->sli4_lxritag = sglq->sli4_lxritag; |
| 10782 | piocb->sli4_xritag = sglq->sli4_xritag; |
| 10783 | |
| 10784 | /* ABTS sent by initiator to CT exchange, the |
| 10785 | * RX_ID field will be filled with the newly |
| 10786 | * allocated responder XRI. |
| 10787 | */ |
| 10788 | if (ulp_command == CMD_XMIT_BLS_RSP64_CX && |
| 10789 | piocb->abort_bls == LPFC_ABTS_UNSOL_INT) |
| 10790 | bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, |
| 10791 | piocb->sli4_xritag); |
| 10792 | |
| 10793 | bf_set(wqe_xri_tag, &wqe->generic.wqe_com, |
| 10794 | piocb->sli4_xritag); |
| 10795 | |
| 10796 | if (lpfc_wqe_bpl2sgl(phba, pwqeq: piocb, sglq) == NO_XRI) |
| 10797 | return IOCB_ERROR; |
| 10798 | } |
| 10799 | |
| 10800 | if (lpfc_sli4_wq_put(q: wq, wqe)) |
| 10801 | return IOCB_ERROR; |
| 10802 | |
| 10803 | lpfc_sli_ringtxcmpl_put(phba, pring, piocb); |
| 10804 | |
| 10805 | return 0; |
| 10806 | } |
| 10807 | |
| 10808 | /* |
| 10809 | * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o |
| 10810 | * |
| 10811 | * This routine wraps the actual fcp i/o function for issusing WQE for sli-4 |
| 10812 | * or IOCB for sli-3 function. |
| 10813 | * pointer from the lpfc_hba struct. |
| 10814 | * |
| 10815 | * Return codes: |
| 10816 | * IOCB_ERROR - Error |
| 10817 | * IOCB_SUCCESS - Success |
| 10818 | * IOCB_BUSY - Busy |
| 10819 | **/ |
| 10820 | int |
| 10821 | lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number, |
| 10822 | struct lpfc_iocbq *piocb, uint32_t flag) |
| 10823 | { |
| 10824 | return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag); |
| 10825 | } |
| 10826 | |
| 10827 | /* |
| 10828 | * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb |
| 10829 | * |
| 10830 | * This routine wraps the actual lockless version for issusing IOCB function |
| 10831 | * pointer from the lpfc_hba struct. |
| 10832 | * |
| 10833 | * Return codes: |
| 10834 | * IOCB_ERROR - Error |
| 10835 | * IOCB_SUCCESS - Success |
| 10836 | * IOCB_BUSY - Busy |
| 10837 | **/ |
| 10838 | int |
| 10839 | __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, |
| 10840 | struct lpfc_iocbq *piocb, uint32_t flag) |
| 10841 | { |
| 10842 | return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); |
| 10843 | } |
| 10844 | |
| 10845 | static void |
| 10846 | __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq, |
| 10847 | struct lpfc_vport *vport, |
| 10848 | struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, |
| 10849 | u32 elscmd, u8 tmo, u8 expect_rsp) |
| 10850 | { |
| 10851 | struct lpfc_hba *phba = vport->phba; |
| 10852 | IOCB_t *cmd; |
| 10853 | |
| 10854 | cmd = &cmdiocbq->iocb; |
| 10855 | memset(cmd, 0, sizeof(*cmd)); |
| 10856 | |
| 10857 | cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); |
| 10858 | cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys); |
| 10859 | cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; |
| 10860 | |
| 10861 | if (expect_rsp) { |
| 10862 | cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); |
| 10863 | cmd->un.elsreq64.remoteID = did; /* DID */ |
| 10864 | cmd->ulpCommand = CMD_ELS_REQUEST64_CR; |
| 10865 | cmd->ulpTimeout = tmo; |
| 10866 | } else { |
| 10867 | cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64); |
| 10868 | cmd->un.genreq64.xmit_els_remoteID = did; /* DID */ |
| 10869 | cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; |
| 10870 | cmd->ulpPU = PARM_NPIV_DID; |
| 10871 | } |
| 10872 | cmd->ulpBdeCount = 1; |
| 10873 | cmd->ulpLe = 1; |
| 10874 | cmd->ulpClass = CLASS3; |
| 10875 | |
| 10876 | /* If we have NPIV enabled, we want to send ELS traffic by VPI. */ |
| 10877 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { |
| 10878 | if (expect_rsp) { |
| 10879 | cmd->un.elsreq64.myID = vport->fc_myDID; |
| 10880 | |
| 10881 | /* For ELS_REQUEST64_CR, use the VPI by default */ |
| 10882 | cmd->ulpContext = phba->vpi_ids[vport->vpi]; |
| 10883 | } |
| 10884 | |
| 10885 | cmd->ulpCt_h = 0; |
| 10886 | /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ |
| 10887 | if (elscmd == ELS_CMD_ECHO) |
| 10888 | cmd->ulpCt_l = 0; /* context = invalid RPI */ |
| 10889 | else |
| 10890 | cmd->ulpCt_l = 1; /* context = VPI */ |
| 10891 | } |
| 10892 | } |
| 10893 | |
| 10894 | static void |
| 10895 | __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq, |
| 10896 | struct lpfc_vport *vport, |
| 10897 | struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, |
| 10898 | u32 elscmd, u8 tmo, u8 expect_rsp) |
| 10899 | { |
| 10900 | struct lpfc_hba *phba = vport->phba; |
| 10901 | union lpfc_wqe128 *wqe; |
| 10902 | struct ulp_bde64_le *bde; |
| 10903 | u8 els_id; |
| 10904 | |
| 10905 | wqe = &cmdiocbq->wqe; |
| 10906 | memset(wqe, 0, sizeof(*wqe)); |
| 10907 | |
| 10908 | /* Word 0 - 2 BDE */ |
| 10909 | bde = (struct ulp_bde64_le *)&wqe->generic.bde; |
| 10910 | bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys)); |
| 10911 | bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys)); |
| 10912 | bde->type_size = cpu_to_le32(cmd_size); |
| 10913 | bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); |
| 10914 | |
| 10915 | if (expect_rsp) { |
| 10916 | bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE); |
| 10917 | |
| 10918 | /* Transfer length */ |
| 10919 | wqe->els_req.payload_len = cmd_size; |
| 10920 | wqe->els_req.max_response_payload_len = FCELSSIZE; |
| 10921 | |
| 10922 | /* DID */ |
| 10923 | bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did); |
| 10924 | |
| 10925 | /* Word 11 - ELS_ID */ |
| 10926 | switch (elscmd) { |
| 10927 | case ELS_CMD_PLOGI: |
| 10928 | els_id = LPFC_ELS_ID_PLOGI; |
| 10929 | break; |
| 10930 | case ELS_CMD_FLOGI: |
| 10931 | els_id = LPFC_ELS_ID_FLOGI; |
| 10932 | break; |
| 10933 | case ELS_CMD_LOGO: |
| 10934 | els_id = LPFC_ELS_ID_LOGO; |
| 10935 | break; |
| 10936 | case ELS_CMD_FDISC: |
| 10937 | if (!vport->fc_myDID) { |
| 10938 | els_id = LPFC_ELS_ID_FDISC; |
| 10939 | break; |
| 10940 | } |
| 10941 | fallthrough; |
| 10942 | default: |
| 10943 | els_id = LPFC_ELS_ID_DEFAULT; |
| 10944 | break; |
| 10945 | } |
| 10946 | |
| 10947 | bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); |
| 10948 | } else { |
| 10949 | /* DID */ |
| 10950 | bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did); |
| 10951 | |
| 10952 | /* Transfer length */ |
| 10953 | wqe->xmit_els_rsp.response_payload_len = cmd_size; |
| 10954 | |
| 10955 | bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com, |
| 10956 | CMD_XMIT_ELS_RSP64_WQE); |
| 10957 | } |
| 10958 | |
| 10959 | bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo); |
| 10960 | bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag); |
| 10961 | bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); |
| 10962 | |
| 10963 | /* If we have NPIV enabled, we want to send ELS traffic by VPI. |
| 10964 | * For SLI4, since the driver controls VPIs we also want to include |
| 10965 | * all ELS pt2pt protocol traffic as well. |
| 10966 | */ |
| 10967 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || |
| 10968 | test_bit(FC_PT2PT, &vport->fc_flag)) { |
| 10969 | if (expect_rsp) { |
| 10970 | bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID); |
| 10971 | |
| 10972 | /* For ELS_REQUEST64_WQE, use the VPI by default */ |
| 10973 | bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, |
| 10974 | phba->vpi_ids[vport->vpi]); |
| 10975 | } |
| 10976 | |
| 10977 | /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ |
| 10978 | if (elscmd == ELS_CMD_ECHO) |
| 10979 | bf_set(wqe_ct, &wqe->generic.wqe_com, 0); |
| 10980 | else |
| 10981 | bf_set(wqe_ct, &wqe->generic.wqe_com, 1); |
| 10982 | } |
| 10983 | } |
| 10984 | |
| 10985 | void |
| 10986 | lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, |
| 10987 | struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, |
| 10988 | u16 cmd_size, u32 did, u32 elscmd, u8 tmo, |
| 10989 | u8 expect_rsp) |
| 10990 | { |
| 10991 | phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did, |
| 10992 | elscmd, tmo, expect_rsp); |
| 10993 | } |
| 10994 | |
| 10995 | static void |
| 10996 | __lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, |
| 10997 | u16 rpi, u32 num_entry, u8 tmo) |
| 10998 | { |
| 10999 | IOCB_t *cmd; |
| 11000 | |
| 11001 | cmd = &cmdiocbq->iocb; |
| 11002 | memset(cmd, 0, sizeof(*cmd)); |
| 11003 | |
| 11004 | cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); |
| 11005 | cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); |
| 11006 | cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; |
| 11007 | cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64); |
| 11008 | |
| 11009 | cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; |
| 11010 | cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; |
| 11011 | cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); |
| 11012 | |
| 11013 | cmd->ulpContext = rpi; |
| 11014 | cmd->ulpClass = CLASS3; |
| 11015 | cmd->ulpCommand = CMD_GEN_REQUEST64_CR; |
| 11016 | cmd->ulpBdeCount = 1; |
| 11017 | cmd->ulpLe = 1; |
| 11018 | cmd->ulpOwner = OWN_CHIP; |
| 11019 | cmd->ulpTimeout = tmo; |
| 11020 | } |
| 11021 | |
| 11022 | static void |
| 11023 | __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, |
| 11024 | u16 rpi, u32 num_entry, u8 tmo) |
| 11025 | { |
| 11026 | union lpfc_wqe128 *cmdwqe; |
| 11027 | struct ulp_bde64_le *bde, *bpl; |
| 11028 | u32 xmit_len = 0, total_len = 0, size, type, i; |
| 11029 | |
| 11030 | cmdwqe = &cmdiocbq->wqe; |
| 11031 | memset(cmdwqe, 0, sizeof(*cmdwqe)); |
| 11032 | |
| 11033 | /* Calculate total_len and xmit_len */ |
| 11034 | bpl = (struct ulp_bde64_le *)bmp->virt; |
| 11035 | for (i = 0; i < num_entry; i++) { |
| 11036 | size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK; |
| 11037 | total_len += size; |
| 11038 | } |
| 11039 | for (i = 0; i < num_entry; i++) { |
| 11040 | size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK; |
| 11041 | type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK; |
| 11042 | if (type != ULP_BDE64_TYPE_BDE_64) |
| 11043 | break; |
| 11044 | xmit_len += size; |
| 11045 | } |
| 11046 | |
| 11047 | /* Words 0 - 2 */ |
| 11048 | bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde; |
| 11049 | bde->addr_low = bpl->addr_low; |
| 11050 | bde->addr_high = bpl->addr_high; |
| 11051 | bde->type_size = cpu_to_le32(xmit_len); |
| 11052 | bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); |
| 11053 | |
| 11054 | /* Word 3 */ |
| 11055 | cmdwqe->gen_req.request_payload_len = xmit_len; |
| 11056 | |
| 11057 | /* Word 5 */ |
| 11058 | bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT); |
| 11059 | bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL); |
| 11060 | bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1); |
| 11061 | bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1); |
| 11062 | |
| 11063 | /* Word 6 */ |
| 11064 | bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi); |
| 11065 | |
| 11066 | /* Word 7 */ |
| 11067 | bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo); |
| 11068 | bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3); |
| 11069 | bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR); |
| 11070 | bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI); |
| 11071 | |
| 11072 | /* Word 12 */ |
| 11073 | cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len; |
| 11074 | } |
| 11075 | |
| 11076 | void |
| 11077 | lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, |
| 11078 | struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo) |
| 11079 | { |
| 11080 | phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo); |
| 11081 | } |
| 11082 | |
| 11083 | static void |
| 11084 | __lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq, |
| 11085 | struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, |
| 11086 | u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd) |
| 11087 | { |
| 11088 | IOCB_t *icmd; |
| 11089 | |
| 11090 | icmd = &cmdiocbq->iocb; |
| 11091 | memset(icmd, 0, sizeof(*icmd)); |
| 11092 | |
| 11093 | icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); |
| 11094 | icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); |
| 11095 | icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; |
| 11096 | icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); |
| 11097 | icmd->un.xseq64.w5.hcsw.Fctl = LA; |
| 11098 | if (last_seq) |
| 11099 | icmd->un.xseq64.w5.hcsw.Fctl |= LS; |
| 11100 | icmd->un.xseq64.w5.hcsw.Dfctl = 0; |
| 11101 | icmd->un.xseq64.w5.hcsw.Rctl = rctl; |
| 11102 | icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; |
| 11103 | |
| 11104 | icmd->ulpBdeCount = 1; |
| 11105 | icmd->ulpLe = 1; |
| 11106 | icmd->ulpClass = CLASS3; |
| 11107 | |
| 11108 | switch (cr_cx_cmd) { |
| 11109 | case CMD_XMIT_SEQUENCE64_CR: |
| 11110 | icmd->ulpContext = rpi; |
| 11111 | icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; |
| 11112 | break; |
| 11113 | case CMD_XMIT_SEQUENCE64_CX: |
| 11114 | icmd->ulpContext = ox_id; |
| 11115 | icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; |
| 11116 | break; |
| 11117 | default: |
| 11118 | break; |
| 11119 | } |
| 11120 | } |
| 11121 | |
| 11122 | static void |
| 11123 | __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq, |
| 11124 | struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, |
| 11125 | u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd) |
| 11126 | { |
| 11127 | union lpfc_wqe128 *wqe; |
| 11128 | struct ulp_bde64 *bpl; |
| 11129 | |
| 11130 | wqe = &cmdiocbq->wqe; |
| 11131 | memset(wqe, 0, sizeof(*wqe)); |
| 11132 | |
| 11133 | /* Words 0 - 2 */ |
| 11134 | bpl = (struct ulp_bde64 *)bmp->virt; |
| 11135 | wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh; |
| 11136 | wqe->xmit_sequence.bde.addrLow = bpl->addrLow; |
| 11137 | wqe->xmit_sequence.bde.tus.w = bpl->tus.w; |
| 11138 | |
| 11139 | /* Word 5 */ |
| 11140 | bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq); |
| 11141 | bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1); |
| 11142 | bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); |
| 11143 | bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl); |
| 11144 | bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT); |
| 11145 | |
| 11146 | /* Word 6 */ |
| 11147 | bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi); |
| 11148 | |
| 11149 | bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, |
| 11150 | CMD_XMIT_SEQUENCE64_WQE); |
| 11151 | |
| 11152 | /* Word 7 */ |
| 11153 | bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); |
| 11154 | |
| 11155 | /* Word 9 */ |
| 11156 | bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id); |
| 11157 | |
| 11158 | if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) { |
| 11159 | /* Word 10 */ |
| 11160 | if (cmdiocbq->cmd_flag & LPFC_IO_VMID) { |
| 11161 | bf_set(wqe_appid, &wqe->xmit_sequence.wqe_com, 1); |
| 11162 | bf_set(wqe_wqes, &wqe->xmit_sequence.wqe_com, 1); |
| 11163 | wqe->words[31] = LOOPBACK_SRC_APPID; |
| 11164 | } |
| 11165 | |
| 11166 | /* Word 12 */ |
| 11167 | wqe->xmit_sequence.xmit_len = full_size; |
| 11168 | } |
| 11169 | else |
| 11170 | wqe->xmit_sequence.xmit_len = |
| 11171 | wqe->xmit_sequence.bde.tus.f.bdeSize; |
| 11172 | } |
| 11173 | |
| 11174 | void |
| 11175 | lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, |
| 11176 | struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, |
| 11177 | u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd) |
| 11178 | { |
| 11179 | phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry, |
| 11180 | rctl, last_seq, cr_cx_cmd); |
| 11181 | } |
| 11182 | |
| 11183 | static void |
| 11184 | __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context, |
| 11185 | u16 iotag, u8 ulp_class, u16 cqid, bool ia, |
| 11186 | bool wqec) |
| 11187 | { |
| 11188 | IOCB_t *icmd = NULL; |
| 11189 | |
| 11190 | icmd = &cmdiocbq->iocb; |
| 11191 | memset(icmd, 0, sizeof(*icmd)); |
| 11192 | |
| 11193 | /* Word 5 */ |
| 11194 | icmd->un.acxri.abortContextTag = ulp_context; |
| 11195 | icmd->un.acxri.abortIoTag = iotag; |
| 11196 | |
| 11197 | if (ia) { |
| 11198 | /* Word 7 */ |
| 11199 | icmd->ulpCommand = CMD_CLOSE_XRI_CN; |
| 11200 | } else { |
| 11201 | /* Word 3 */ |
| 11202 | icmd->un.acxri.abortType = ABORT_TYPE_ABTS; |
| 11203 | |
| 11204 | /* Word 7 */ |
| 11205 | icmd->ulpClass = ulp_class; |
| 11206 | icmd->ulpCommand = CMD_ABORT_XRI_CN; |
| 11207 | } |
| 11208 | |
| 11209 | /* Word 7 */ |
| 11210 | icmd->ulpLe = 1; |
| 11211 | } |
| 11212 | |
| 11213 | static void |
| 11214 | __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context, |
| 11215 | u16 iotag, u8 ulp_class, u16 cqid, bool ia, |
| 11216 | bool wqec) |
| 11217 | { |
| 11218 | union lpfc_wqe128 *wqe; |
| 11219 | |
| 11220 | wqe = &cmdiocbq->wqe; |
| 11221 | memset(wqe, 0, sizeof(*wqe)); |
| 11222 | |
| 11223 | /* Word 3 */ |
| 11224 | bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); |
| 11225 | if (ia) |
| 11226 | bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); |
| 11227 | else |
| 11228 | bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); |
| 11229 | |
| 11230 | /* Word 7 */ |
| 11231 | bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE); |
| 11232 | |
| 11233 | /* Word 8 */ |
| 11234 | wqe->abort_cmd.wqe_com.abort_tag = ulp_context; |
| 11235 | |
| 11236 | /* Word 9 */ |
| 11237 | bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag); |
| 11238 | |
| 11239 | /* Word 10 */ |
| 11240 | bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); |
| 11241 | |
| 11242 | /* Word 11 */ |
| 11243 | if (wqec) |
| 11244 | bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1); |
| 11245 | bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid); |
| 11246 | bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND); |
| 11247 | } |
| 11248 | |
| 11249 | void |
| 11250 | lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, |
| 11251 | u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid, |
| 11252 | bool ia, bool wqec) |
| 11253 | { |
| 11254 | phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class, |
| 11255 | cqid, ia, wqec); |
| 11256 | } |
| 11257 | |
| 11258 | /** |
| 11259 | * lpfc_sli_api_table_setup - Set up sli api function jump table |
| 11260 | * @phba: The hba struct for which this call is being executed. |
| 11261 | * @dev_grp: The HBA PCI-Device group number. |
| 11262 | * |
| 11263 | * This routine sets up the SLI interface API function jump table in @phba |
| 11264 | * struct. |
| 11265 | * Returns: 0 - success, -ENODEV - failure. |
| 11266 | **/ |
| 11267 | int |
| 11268 | lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) |
| 11269 | { |
| 11270 | |
| 11271 | switch (dev_grp) { |
| 11272 | case LPFC_PCI_DEV_LP: |
| 11273 | phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; |
| 11274 | phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; |
| 11275 | phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3; |
| 11276 | phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3; |
| 11277 | phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3; |
| 11278 | phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3; |
| 11279 | phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3; |
| 11280 | break; |
| 11281 | case LPFC_PCI_DEV_OC: |
| 11282 | phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; |
| 11283 | phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; |
| 11284 | phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4; |
| 11285 | phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4; |
| 11286 | phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4; |
| 11287 | phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4; |
| 11288 | phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4; |
| 11289 | break; |
| 11290 | default: |
| 11291 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 11292 | "1419 Invalid HBA PCI-device group: 0x%x\n" , |
| 11293 | dev_grp); |
| 11294 | return -ENODEV; |
| 11295 | } |
| 11296 | return 0; |
| 11297 | } |
| 11298 | |
| 11299 | /** |
| 11300 | * lpfc_sli4_calc_ring - Calculates which ring to use |
| 11301 | * @phba: Pointer to HBA context object. |
| 11302 | * @piocb: Pointer to command iocb. |
| 11303 | * |
| 11304 | * For SLI4 only, FCP IO can deferred to one fo many WQs, based on |
| 11305 | * hba_wqidx, thus we need to calculate the corresponding ring. |
| 11306 | * Since ABORTS must go on the same WQ of the command they are |
| 11307 | * aborting, we use command's hba_wqidx. |
| 11308 | */ |
| 11309 | struct lpfc_sli_ring * |
| 11310 | lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) |
| 11311 | { |
| 11312 | struct lpfc_io_buf *lpfc_cmd; |
| 11313 | |
| 11314 | if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { |
| 11315 | if (unlikely(!phba->sli4_hba.hdwq)) |
| 11316 | return NULL; |
| 11317 | /* |
| 11318 | * for abort iocb hba_wqidx should already |
| 11319 | * be setup based on what work queue we used. |
| 11320 | */ |
| 11321 | if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) { |
| 11322 | lpfc_cmd = piocb->io_buf; |
| 11323 | piocb->hba_wqidx = lpfc_cmd->hdwq_no; |
| 11324 | } |
| 11325 | return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring; |
| 11326 | } else { |
| 11327 | if (unlikely(!phba->sli4_hba.els_wq)) |
| 11328 | return NULL; |
| 11329 | piocb->hba_wqidx = 0; |
| 11330 | return phba->sli4_hba.els_wq->pring; |
| 11331 | } |
| 11332 | } |
| 11333 | |
| 11334 | inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq) |
| 11335 | { |
| 11336 | struct lpfc_hba *phba = eq->phba; |
| 11337 | |
| 11338 | /* |
| 11339 | * Unlocking an irq is one of the entry point to check |
| 11340 | * for re-schedule, but we are good for io submission |
| 11341 | * path as midlayer does a get_cpu to glue us in. Flush |
| 11342 | * out the invalidate queue so we can see the updated |
| 11343 | * value for flag. |
| 11344 | */ |
| 11345 | smp_rmb(); |
| 11346 | |
| 11347 | if (READ_ONCE(eq->mode) == LPFC_EQ_POLL) |
| 11348 | /* We will not likely get the completion for the caller |
| 11349 | * during this iteration but i guess that's fine. |
| 11350 | * Future io's coming on this eq should be able to |
| 11351 | * pick it up. As for the case of single io's, they |
| 11352 | * will be handled through a sched from polling timer |
| 11353 | * function which is currently triggered every 1msec. |
| 11354 | */ |
| 11355 | lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM, |
| 11356 | poll_mode: LPFC_QUEUE_WORK); |
| 11357 | } |
| 11358 | |
| 11359 | /** |
| 11360 | * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb |
| 11361 | * @phba: Pointer to HBA context object. |
| 11362 | * @ring_number: Ring number |
| 11363 | * @piocb: Pointer to command iocb. |
| 11364 | * @flag: Flag indicating if this command can be put into txq. |
| 11365 | * |
| 11366 | * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb |
| 11367 | * function. This function gets the hbalock and calls |
| 11368 | * __lpfc_sli_issue_iocb function and will return the error returned |
| 11369 | * by __lpfc_sli_issue_iocb function. This wrapper is used by |
| 11370 | * functions which do not hold hbalock. |
| 11371 | **/ |
| 11372 | int |
| 11373 | lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, |
| 11374 | struct lpfc_iocbq *piocb, uint32_t flag) |
| 11375 | { |
| 11376 | struct lpfc_sli_ring *pring; |
| 11377 | struct lpfc_queue *eq; |
| 11378 | unsigned long iflags; |
| 11379 | int rc; |
| 11380 | |
| 11381 | /* If the PCI channel is in offline state, do not post iocbs. */ |
| 11382 | if (unlikely(pci_channel_offline(phba->pcidev))) |
| 11383 | return IOCB_ERROR; |
| 11384 | |
| 11385 | if (phba->sli_rev == LPFC_SLI_REV4) { |
| 11386 | lpfc_sli_prep_wqe(phba, job: piocb); |
| 11387 | |
| 11388 | eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq; |
| 11389 | |
| 11390 | pring = lpfc_sli4_calc_ring(phba, piocb); |
| 11391 | if (unlikely(pring == NULL)) |
| 11392 | return IOCB_ERROR; |
| 11393 | |
| 11394 | spin_lock_irqsave(&pring->ring_lock, iflags); |
| 11395 | rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); |
| 11396 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 11397 | |
| 11398 | lpfc_sli4_poll_eq(eq); |
| 11399 | } else { |
| 11400 | /* For now, SLI2/3 will still use hbalock */ |
| 11401 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 11402 | rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); |
| 11403 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 11404 | } |
| 11405 | return rc; |
| 11406 | } |
| 11407 | |
| 11408 | /** |
| 11409 | * lpfc_extra_ring_setup - Extra ring setup function |
| 11410 | * @phba: Pointer to HBA context object. |
| 11411 | * |
| 11412 | * This function is called while driver attaches with the |
| 11413 | * HBA to setup the extra ring. The extra ring is used |
| 11414 | * only when driver needs to support target mode functionality |
| 11415 | * or IP over FC functionalities. |
| 11416 | * |
| 11417 | * This function is called with no lock held. SLI3 only. |
| 11418 | **/ |
| 11419 | static int |
| 11420 | ( struct lpfc_hba *phba) |
| 11421 | { |
| 11422 | struct lpfc_sli *psli; |
| 11423 | struct lpfc_sli_ring *pring; |
| 11424 | |
| 11425 | psli = &phba->sli; |
| 11426 | |
| 11427 | /* Adjust cmd/rsp ring iocb entries more evenly */ |
| 11428 | |
| 11429 | /* Take some away from the FCP ring */ |
| 11430 | pring = &psli->sli3_ring[LPFC_FCP_RING]; |
| 11431 | pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; |
| 11432 | pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; |
| 11433 | pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; |
| 11434 | pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; |
| 11435 | |
| 11436 | /* and give them to the extra ring */ |
| 11437 | pring = &psli->sli3_ring[LPFC_EXTRA_RING]; |
| 11438 | |
| 11439 | pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; |
| 11440 | pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; |
| 11441 | pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; |
| 11442 | pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; |
| 11443 | |
| 11444 | /* Setup default profile for this ring */ |
| 11445 | pring->iotag_max = 4096; |
| 11446 | pring->num_mask = 1; |
| 11447 | pring->prt[0].profile = 0; /* Mask 0 */ |
| 11448 | pring->prt[0].rctl = phba->cfg_multi_ring_rctl; |
| 11449 | pring->prt[0].type = phba->cfg_multi_ring_type; |
| 11450 | pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; |
| 11451 | return 0; |
| 11452 | } |
| 11453 | |
| 11454 | static void |
| 11455 | lpfc_sli_post_recovery_event(struct lpfc_hba *phba, |
| 11456 | struct lpfc_nodelist *ndlp) |
| 11457 | { |
| 11458 | unsigned long iflags; |
| 11459 | struct lpfc_work_evt *evtp = &ndlp->recovery_evt; |
| 11460 | |
| 11461 | /* Hold a node reference for outstanding queued work */ |
| 11462 | if (!lpfc_nlp_get(ndlp)) |
| 11463 | return; |
| 11464 | |
| 11465 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 11466 | if (!list_empty(head: &evtp->evt_listp)) { |
| 11467 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 11468 | lpfc_nlp_put(ndlp); |
| 11469 | return; |
| 11470 | } |
| 11471 | |
| 11472 | evtp->evt_arg1 = ndlp; |
| 11473 | evtp->evt = LPFC_EVT_RECOVER_PORT; |
| 11474 | list_add_tail(new: &evtp->evt_listp, head: &phba->work_list); |
| 11475 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 11476 | |
| 11477 | lpfc_worker_wake_up(phba); |
| 11478 | } |
| 11479 | |
| 11480 | /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. |
| 11481 | * @phba: Pointer to HBA context object. |
| 11482 | * @iocbq: Pointer to iocb object. |
| 11483 | * |
| 11484 | * The async_event handler calls this routine when it receives |
| 11485 | * an ASYNC_STATUS_CN event from the port. The port generates |
| 11486 | * this event when an Abort Sequence request to an rport fails |
| 11487 | * twice in succession. The abort could be originated by the |
| 11488 | * driver or by the port. The ABTS could have been for an ELS |
| 11489 | * or FCP IO. The port only generates this event when an ABTS |
| 11490 | * fails to complete after one retry. |
| 11491 | */ |
| 11492 | static void |
| 11493 | lpfc_sli_abts_err_handler(struct lpfc_hba *phba, |
| 11494 | struct lpfc_iocbq *iocbq) |
| 11495 | { |
| 11496 | struct lpfc_nodelist *ndlp = NULL; |
| 11497 | uint16_t rpi = 0, vpi = 0; |
| 11498 | struct lpfc_vport *vport = NULL; |
| 11499 | |
| 11500 | /* The rpi in the ulpContext is vport-sensitive. */ |
| 11501 | vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; |
| 11502 | rpi = iocbq->iocb.ulpContext; |
| 11503 | |
| 11504 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 11505 | "3092 Port generated ABTS async event " |
| 11506 | "on vpi %d rpi %d status 0x%x\n" , |
| 11507 | vpi, rpi, iocbq->iocb.ulpStatus); |
| 11508 | |
| 11509 | vport = lpfc_find_vport_by_vpid(phba, vpi); |
| 11510 | if (!vport) |
| 11511 | goto err_exit; |
| 11512 | ndlp = lpfc_findnode_rpi(vport, rpi); |
| 11513 | if (!ndlp) |
| 11514 | goto err_exit; |
| 11515 | |
| 11516 | if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) |
| 11517 | lpfc_sli_abts_recover_port(vport, ndlp); |
| 11518 | return; |
| 11519 | |
| 11520 | err_exit: |
| 11521 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 11522 | "3095 Event Context not found, no " |
| 11523 | "action on vpi %d rpi %d status 0x%x, reason 0x%x\n" , |
| 11524 | vpi, rpi, iocbq->iocb.ulpStatus, |
| 11525 | iocbq->iocb.ulpContext); |
| 11526 | } |
| 11527 | |
| 11528 | /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. |
| 11529 | * @phba: pointer to HBA context object. |
| 11530 | * @ndlp: nodelist pointer for the impacted rport. |
| 11531 | * @axri: pointer to the wcqe containing the failed exchange. |
| 11532 | * |
| 11533 | * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the |
| 11534 | * port. The port generates this event when an abort exchange request to an |
| 11535 | * rport fails twice in succession with no reply. The abort could be originated |
| 11536 | * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. |
| 11537 | */ |
| 11538 | void |
| 11539 | lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, |
| 11540 | struct lpfc_nodelist *ndlp, |
| 11541 | struct sli4_wcqe_xri_aborted *axri) |
| 11542 | { |
| 11543 | uint32_t ext_status = 0; |
| 11544 | |
| 11545 | if (!ndlp) { |
| 11546 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 11547 | "3115 Node Context not found, driver " |
| 11548 | "ignoring abts err event\n" ); |
| 11549 | return; |
| 11550 | } |
| 11551 | |
| 11552 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 11553 | "3116 Port generated FCP XRI ABORT event on " |
| 11554 | "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n" , |
| 11555 | ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], |
| 11556 | bf_get(lpfc_wcqe_xa_xri, axri), |
| 11557 | bf_get(lpfc_wcqe_xa_status, axri), |
| 11558 | axri->parameter); |
| 11559 | |
| 11560 | /* |
| 11561 | * Catch the ABTS protocol failure case. Older OCe FW releases returned |
| 11562 | * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and |
| 11563 | * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. |
| 11564 | */ |
| 11565 | ext_status = axri->parameter & IOERR_PARAM_MASK; |
| 11566 | if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && |
| 11567 | ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) |
| 11568 | lpfc_sli_post_recovery_event(phba, ndlp); |
| 11569 | } |
| 11570 | |
| 11571 | /** |
| 11572 | * lpfc_sli_async_event_handler - ASYNC iocb handler function |
| 11573 | * @phba: Pointer to HBA context object. |
| 11574 | * @pring: Pointer to driver SLI ring object. |
| 11575 | * @iocbq: Pointer to iocb object. |
| 11576 | * |
| 11577 | * This function is called by the slow ring event handler |
| 11578 | * function when there is an ASYNC event iocb in the ring. |
| 11579 | * This function is called with no lock held. |
| 11580 | * Currently this function handles only temperature related |
| 11581 | * ASYNC events. The function decodes the temperature sensor |
| 11582 | * event message and posts events for the management applications. |
| 11583 | **/ |
| 11584 | static void |
| 11585 | lpfc_sli_async_event_handler(struct lpfc_hba * phba, |
| 11586 | struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) |
| 11587 | { |
| 11588 | IOCB_t *icmd; |
| 11589 | uint16_t evt_code; |
| 11590 | struct temp_event temp_event_data; |
| 11591 | struct Scsi_Host *shost; |
| 11592 | uint32_t *iocb_w; |
| 11593 | |
| 11594 | icmd = &iocbq->iocb; |
| 11595 | evt_code = icmd->un.asyncstat.evt_code; |
| 11596 | |
| 11597 | switch (evt_code) { |
| 11598 | case ASYNC_TEMP_WARN: |
| 11599 | case ASYNC_TEMP_SAFE: |
| 11600 | temp_event_data.data = (uint32_t) icmd->ulpContext; |
| 11601 | temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; |
| 11602 | if (evt_code == ASYNC_TEMP_WARN) { |
| 11603 | temp_event_data.event_code = LPFC_THRESHOLD_TEMP; |
| 11604 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 11605 | "0347 Adapter is very hot, please take " |
| 11606 | "corrective action. temperature : %d Celsius\n" , |
| 11607 | (uint32_t) icmd->ulpContext); |
| 11608 | } else { |
| 11609 | temp_event_data.event_code = LPFC_NORMAL_TEMP; |
| 11610 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 11611 | "0340 Adapter temperature is OK now. " |
| 11612 | "temperature : %d Celsius\n" , |
| 11613 | (uint32_t) icmd->ulpContext); |
| 11614 | } |
| 11615 | |
| 11616 | /* Send temperature change event to applications */ |
| 11617 | shost = lpfc_shost_from_vport(vport: phba->pport); |
| 11618 | fc_host_post_vendor_event(shost, event_number: fc_get_event_number(), |
| 11619 | data_len: sizeof(temp_event_data), data_buf: (char *) &temp_event_data, |
| 11620 | LPFC_NL_VENDOR_ID); |
| 11621 | break; |
| 11622 | case ASYNC_STATUS_CN: |
| 11623 | lpfc_sli_abts_err_handler(phba, iocbq); |
| 11624 | break; |
| 11625 | default: |
| 11626 | iocb_w = (uint32_t *) icmd; |
| 11627 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 11628 | "0346 Ring %d handler: unexpected ASYNC_STATUS" |
| 11629 | " evt_code 0x%x\n" |
| 11630 | "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" |
| 11631 | "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" |
| 11632 | "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" |
| 11633 | "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n" , |
| 11634 | pring->ringno, icmd->un.asyncstat.evt_code, |
| 11635 | iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], |
| 11636 | iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], |
| 11637 | iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], |
| 11638 | iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); |
| 11639 | |
| 11640 | break; |
| 11641 | } |
| 11642 | } |
| 11643 | |
| 11644 | |
| 11645 | /** |
| 11646 | * lpfc_sli4_setup - SLI ring setup function |
| 11647 | * @phba: Pointer to HBA context object. |
| 11648 | * |
| 11649 | * lpfc_sli_setup sets up rings of the SLI interface with |
| 11650 | * number of iocbs per ring and iotags. This function is |
| 11651 | * called while driver attach to the HBA and before the |
| 11652 | * interrupts are enabled. So there is no need for locking. |
| 11653 | * |
| 11654 | * This function always returns 0. |
| 11655 | **/ |
| 11656 | int |
| 11657 | lpfc_sli4_setup(struct lpfc_hba *phba) |
| 11658 | { |
| 11659 | struct lpfc_sli_ring *pring; |
| 11660 | |
| 11661 | pring = phba->sli4_hba.els_wq->pring; |
| 11662 | pring->num_mask = LPFC_MAX_RING_MASK; |
| 11663 | pring->prt[0].profile = 0; /* Mask 0 */ |
| 11664 | pring->prt[0].rctl = FC_RCTL_ELS_REQ; |
| 11665 | pring->prt[0].type = FC_TYPE_ELS; |
| 11666 | pring->prt[0].lpfc_sli_rcv_unsol_event = |
| 11667 | lpfc_els_unsol_event; |
| 11668 | pring->prt[1].profile = 0; /* Mask 1 */ |
| 11669 | pring->prt[1].rctl = FC_RCTL_ELS_REP; |
| 11670 | pring->prt[1].type = FC_TYPE_ELS; |
| 11671 | pring->prt[1].lpfc_sli_rcv_unsol_event = |
| 11672 | lpfc_els_unsol_event; |
| 11673 | pring->prt[2].profile = 0; /* Mask 2 */ |
| 11674 | /* NameServer Inquiry */ |
| 11675 | pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; |
| 11676 | /* NameServer */ |
| 11677 | pring->prt[2].type = FC_TYPE_CT; |
| 11678 | pring->prt[2].lpfc_sli_rcv_unsol_event = |
| 11679 | lpfc_ct_unsol_event; |
| 11680 | pring->prt[3].profile = 0; /* Mask 3 */ |
| 11681 | /* NameServer response */ |
| 11682 | pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; |
| 11683 | /* NameServer */ |
| 11684 | pring->prt[3].type = FC_TYPE_CT; |
| 11685 | pring->prt[3].lpfc_sli_rcv_unsol_event = |
| 11686 | lpfc_ct_unsol_event; |
| 11687 | return 0; |
| 11688 | } |
| 11689 | |
| 11690 | /** |
| 11691 | * lpfc_sli_setup - SLI ring setup function |
| 11692 | * @phba: Pointer to HBA context object. |
| 11693 | * |
| 11694 | * lpfc_sli_setup sets up rings of the SLI interface with |
| 11695 | * number of iocbs per ring and iotags. This function is |
| 11696 | * called while driver attach to the HBA and before the |
| 11697 | * interrupts are enabled. So there is no need for locking. |
| 11698 | * |
| 11699 | * This function always returns 0. SLI3 only. |
| 11700 | **/ |
| 11701 | int |
| 11702 | lpfc_sli_setup(struct lpfc_hba *phba) |
| 11703 | { |
| 11704 | int i, totiocbsize = 0; |
| 11705 | struct lpfc_sli *psli = &phba->sli; |
| 11706 | struct lpfc_sli_ring *pring; |
| 11707 | |
| 11708 | psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; |
| 11709 | psli->sli_flag = 0; |
| 11710 | |
| 11711 | psli->iocbq_lookup = NULL; |
| 11712 | psli->iocbq_lookup_len = 0; |
| 11713 | psli->last_iotag = 0; |
| 11714 | |
| 11715 | for (i = 0; i < psli->num_rings; i++) { |
| 11716 | pring = &psli->sli3_ring[i]; |
| 11717 | switch (i) { |
| 11718 | case LPFC_FCP_RING: /* ring 0 - FCP */ |
| 11719 | /* numCiocb and numRiocb are used in config_port */ |
| 11720 | pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; |
| 11721 | pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; |
| 11722 | pring->sli.sli3.numCiocb += |
| 11723 | SLI2_IOCB_CMD_R1XTRA_ENTRIES; |
| 11724 | pring->sli.sli3.numRiocb += |
| 11725 | SLI2_IOCB_RSP_R1XTRA_ENTRIES; |
| 11726 | pring->sli.sli3.numCiocb += |
| 11727 | SLI2_IOCB_CMD_R3XTRA_ENTRIES; |
| 11728 | pring->sli.sli3.numRiocb += |
| 11729 | SLI2_IOCB_RSP_R3XTRA_ENTRIES; |
| 11730 | pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? |
| 11731 | SLI3_IOCB_CMD_SIZE : |
| 11732 | SLI2_IOCB_CMD_SIZE; |
| 11733 | pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? |
| 11734 | SLI3_IOCB_RSP_SIZE : |
| 11735 | SLI2_IOCB_RSP_SIZE; |
| 11736 | pring->iotag_ctr = 0; |
| 11737 | pring->iotag_max = |
| 11738 | (phba->cfg_hba_queue_depth * 2); |
| 11739 | pring->fast_iotag = pring->iotag_max; |
| 11740 | pring->num_mask = 0; |
| 11741 | break; |
| 11742 | case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ |
| 11743 | /* numCiocb and numRiocb are used in config_port */ |
| 11744 | pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; |
| 11745 | pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; |
| 11746 | pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? |
| 11747 | SLI3_IOCB_CMD_SIZE : |
| 11748 | SLI2_IOCB_CMD_SIZE; |
| 11749 | pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? |
| 11750 | SLI3_IOCB_RSP_SIZE : |
| 11751 | SLI2_IOCB_RSP_SIZE; |
| 11752 | pring->iotag_max = phba->cfg_hba_queue_depth; |
| 11753 | pring->num_mask = 0; |
| 11754 | break; |
| 11755 | case LPFC_ELS_RING: /* ring 2 - ELS / CT */ |
| 11756 | /* numCiocb and numRiocb are used in config_port */ |
| 11757 | pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; |
| 11758 | pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; |
| 11759 | pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? |
| 11760 | SLI3_IOCB_CMD_SIZE : |
| 11761 | SLI2_IOCB_CMD_SIZE; |
| 11762 | pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? |
| 11763 | SLI3_IOCB_RSP_SIZE : |
| 11764 | SLI2_IOCB_RSP_SIZE; |
| 11765 | pring->fast_iotag = 0; |
| 11766 | pring->iotag_ctr = 0; |
| 11767 | pring->iotag_max = 4096; |
| 11768 | pring->lpfc_sli_rcv_async_status = |
| 11769 | lpfc_sli_async_event_handler; |
| 11770 | pring->num_mask = LPFC_MAX_RING_MASK; |
| 11771 | pring->prt[0].profile = 0; /* Mask 0 */ |
| 11772 | pring->prt[0].rctl = FC_RCTL_ELS_REQ; |
| 11773 | pring->prt[0].type = FC_TYPE_ELS; |
| 11774 | pring->prt[0].lpfc_sli_rcv_unsol_event = |
| 11775 | lpfc_els_unsol_event; |
| 11776 | pring->prt[1].profile = 0; /* Mask 1 */ |
| 11777 | pring->prt[1].rctl = FC_RCTL_ELS_REP; |
| 11778 | pring->prt[1].type = FC_TYPE_ELS; |
| 11779 | pring->prt[1].lpfc_sli_rcv_unsol_event = |
| 11780 | lpfc_els_unsol_event; |
| 11781 | pring->prt[2].profile = 0; /* Mask 2 */ |
| 11782 | /* NameServer Inquiry */ |
| 11783 | pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; |
| 11784 | /* NameServer */ |
| 11785 | pring->prt[2].type = FC_TYPE_CT; |
| 11786 | pring->prt[2].lpfc_sli_rcv_unsol_event = |
| 11787 | lpfc_ct_unsol_event; |
| 11788 | pring->prt[3].profile = 0; /* Mask 3 */ |
| 11789 | /* NameServer response */ |
| 11790 | pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; |
| 11791 | /* NameServer */ |
| 11792 | pring->prt[3].type = FC_TYPE_CT; |
| 11793 | pring->prt[3].lpfc_sli_rcv_unsol_event = |
| 11794 | lpfc_ct_unsol_event; |
| 11795 | break; |
| 11796 | } |
| 11797 | totiocbsize += (pring->sli.sli3.numCiocb * |
| 11798 | pring->sli.sli3.sizeCiocb) + |
| 11799 | (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); |
| 11800 | } |
| 11801 | if (totiocbsize > MAX_SLIM_IOCB_SIZE) { |
| 11802 | /* Too many cmd / rsp ring entries in SLI2 SLIM */ |
| 11803 | printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " |
| 11804 | "SLI2 SLIM Data: x%x x%lx\n" , |
| 11805 | phba->brd_no, totiocbsize, |
| 11806 | (unsigned long) MAX_SLIM_IOCB_SIZE); |
| 11807 | } |
| 11808 | if (phba->cfg_multi_ring_support == 2) |
| 11809 | lpfc_extra_ring_setup(phba); |
| 11810 | |
| 11811 | return 0; |
| 11812 | } |
| 11813 | |
| 11814 | /** |
| 11815 | * lpfc_sli4_queue_init - Queue initialization function |
| 11816 | * @phba: Pointer to HBA context object. |
| 11817 | * |
| 11818 | * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each |
| 11819 | * ring. This function also initializes ring indices of each ring. |
| 11820 | * This function is called during the initialization of the SLI |
| 11821 | * interface of an HBA. |
| 11822 | * This function is called with no lock held and always returns |
| 11823 | * 1. |
| 11824 | **/ |
| 11825 | void |
| 11826 | lpfc_sli4_queue_init(struct lpfc_hba *phba) |
| 11827 | { |
| 11828 | struct lpfc_sli *psli; |
| 11829 | struct lpfc_sli_ring *pring; |
| 11830 | int i; |
| 11831 | |
| 11832 | psli = &phba->sli; |
| 11833 | spin_lock_irq(lock: &phba->hbalock); |
| 11834 | INIT_LIST_HEAD(list: &psli->mboxq); |
| 11835 | INIT_LIST_HEAD(list: &psli->mboxq_cmpl); |
| 11836 | /* Initialize list headers for txq and txcmplq as double linked lists */ |
| 11837 | for (i = 0; i < phba->cfg_hdw_queue; i++) { |
| 11838 | pring = phba->sli4_hba.hdwq[i].io_wq->pring; |
| 11839 | pring->flag = 0; |
| 11840 | pring->ringno = LPFC_FCP_RING; |
| 11841 | pring->txcmplq_cnt = 0; |
| 11842 | INIT_LIST_HEAD(list: &pring->txq); |
| 11843 | INIT_LIST_HEAD(list: &pring->txcmplq); |
| 11844 | INIT_LIST_HEAD(list: &pring->iocb_continueq); |
| 11845 | spin_lock_init(&pring->ring_lock); |
| 11846 | } |
| 11847 | pring = phba->sli4_hba.els_wq->pring; |
| 11848 | pring->flag = 0; |
| 11849 | pring->ringno = LPFC_ELS_RING; |
| 11850 | pring->txcmplq_cnt = 0; |
| 11851 | INIT_LIST_HEAD(list: &pring->txq); |
| 11852 | INIT_LIST_HEAD(list: &pring->txcmplq); |
| 11853 | INIT_LIST_HEAD(list: &pring->iocb_continueq); |
| 11854 | spin_lock_init(&pring->ring_lock); |
| 11855 | |
| 11856 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
| 11857 | pring = phba->sli4_hba.nvmels_wq->pring; |
| 11858 | pring->flag = 0; |
| 11859 | pring->ringno = LPFC_ELS_RING; |
| 11860 | pring->txcmplq_cnt = 0; |
| 11861 | INIT_LIST_HEAD(list: &pring->txq); |
| 11862 | INIT_LIST_HEAD(list: &pring->txcmplq); |
| 11863 | INIT_LIST_HEAD(list: &pring->iocb_continueq); |
| 11864 | spin_lock_init(&pring->ring_lock); |
| 11865 | } |
| 11866 | |
| 11867 | spin_unlock_irq(lock: &phba->hbalock); |
| 11868 | } |
| 11869 | |
| 11870 | /** |
| 11871 | * lpfc_sli_queue_init - Queue initialization function |
| 11872 | * @phba: Pointer to HBA context object. |
| 11873 | * |
| 11874 | * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each |
| 11875 | * ring. This function also initializes ring indices of each ring. |
| 11876 | * This function is called during the initialization of the SLI |
| 11877 | * interface of an HBA. |
| 11878 | * This function is called with no lock held and always returns |
| 11879 | * 1. |
| 11880 | **/ |
| 11881 | void |
| 11882 | lpfc_sli_queue_init(struct lpfc_hba *phba) |
| 11883 | { |
| 11884 | struct lpfc_sli *psli; |
| 11885 | struct lpfc_sli_ring *pring; |
| 11886 | int i; |
| 11887 | |
| 11888 | psli = &phba->sli; |
| 11889 | spin_lock_irq(lock: &phba->hbalock); |
| 11890 | INIT_LIST_HEAD(list: &psli->mboxq); |
| 11891 | INIT_LIST_HEAD(list: &psli->mboxq_cmpl); |
| 11892 | /* Initialize list headers for txq and txcmplq as double linked lists */ |
| 11893 | for (i = 0; i < psli->num_rings; i++) { |
| 11894 | pring = &psli->sli3_ring[i]; |
| 11895 | pring->ringno = i; |
| 11896 | pring->sli.sli3.next_cmdidx = 0; |
| 11897 | pring->sli.sli3.local_getidx = 0; |
| 11898 | pring->sli.sli3.cmdidx = 0; |
| 11899 | INIT_LIST_HEAD(list: &pring->iocb_continueq); |
| 11900 | INIT_LIST_HEAD(list: &pring->iocb_continue_saveq); |
| 11901 | INIT_LIST_HEAD(list: &pring->postbufq); |
| 11902 | pring->flag = 0; |
| 11903 | INIT_LIST_HEAD(list: &pring->txq); |
| 11904 | INIT_LIST_HEAD(list: &pring->txcmplq); |
| 11905 | spin_lock_init(&pring->ring_lock); |
| 11906 | } |
| 11907 | spin_unlock_irq(lock: &phba->hbalock); |
| 11908 | } |
| 11909 | |
| 11910 | /** |
| 11911 | * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system |
| 11912 | * @phba: Pointer to HBA context object. |
| 11913 | * |
| 11914 | * This routine flushes the mailbox command subsystem. It will unconditionally |
| 11915 | * flush all the mailbox commands in the three possible stages in the mailbox |
| 11916 | * command sub-system: pending mailbox command queue; the outstanding mailbox |
| 11917 | * command; and completed mailbox command queue. It is caller's responsibility |
| 11918 | * to make sure that the driver is in the proper state to flush the mailbox |
| 11919 | * command sub-system. Namely, the posting of mailbox commands into the |
| 11920 | * pending mailbox command queue from the various clients must be stopped; |
| 11921 | * either the HBA is in a state that it will never works on the outstanding |
| 11922 | * mailbox command (such as in EEH or ERATT conditions) or the outstanding |
| 11923 | * mailbox command has been completed. |
| 11924 | **/ |
| 11925 | static void |
| 11926 | lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) |
| 11927 | { |
| 11928 | LIST_HEAD(completions); |
| 11929 | struct lpfc_sli *psli = &phba->sli; |
| 11930 | LPFC_MBOXQ_t *pmb; |
| 11931 | unsigned long iflag; |
| 11932 | |
| 11933 | /* Disable softirqs, including timers from obtaining phba->hbalock */ |
| 11934 | local_bh_disable(); |
| 11935 | |
| 11936 | /* Flush all the mailbox commands in the mbox system */ |
| 11937 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 11938 | |
| 11939 | /* The pending mailbox command queue */ |
| 11940 | list_splice_init(list: &phba->sli.mboxq, head: &completions); |
| 11941 | /* The outstanding active mailbox command */ |
| 11942 | if (psli->mbox_active) { |
| 11943 | list_add_tail(new: &psli->mbox_active->list, head: &completions); |
| 11944 | psli->mbox_active = NULL; |
| 11945 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
| 11946 | } |
| 11947 | /* The completed mailbox command queue */ |
| 11948 | list_splice_init(list: &phba->sli.mboxq_cmpl, head: &completions); |
| 11949 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 11950 | |
| 11951 | /* Enable softirqs again, done with phba->hbalock */ |
| 11952 | local_bh_enable(); |
| 11953 | |
| 11954 | /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ |
| 11955 | while (!list_empty(head: &completions)) { |
| 11956 | list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); |
| 11957 | pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; |
| 11958 | if (pmb->mbox_cmpl) |
| 11959 | pmb->mbox_cmpl(phba, pmb); |
| 11960 | } |
| 11961 | } |
| 11962 | |
| 11963 | /** |
| 11964 | * lpfc_sli_host_down - Vport cleanup function |
| 11965 | * @vport: Pointer to virtual port object. |
| 11966 | * |
| 11967 | * lpfc_sli_host_down is called to clean up the resources |
| 11968 | * associated with a vport before destroying virtual |
| 11969 | * port data structures. |
| 11970 | * This function does following operations: |
| 11971 | * - Free discovery resources associated with this virtual |
| 11972 | * port. |
| 11973 | * - Free iocbs associated with this virtual port in |
| 11974 | * the txq. |
| 11975 | * - Send abort for all iocb commands associated with this |
| 11976 | * vport in txcmplq. |
| 11977 | * |
| 11978 | * This function is called with no lock held and always returns 1. |
| 11979 | **/ |
| 11980 | int |
| 11981 | lpfc_sli_host_down(struct lpfc_vport *vport) |
| 11982 | { |
| 11983 | LIST_HEAD(completions); |
| 11984 | struct lpfc_hba *phba = vport->phba; |
| 11985 | struct lpfc_sli *psli = &phba->sli; |
| 11986 | struct lpfc_queue *qp = NULL; |
| 11987 | struct lpfc_sli_ring *pring; |
| 11988 | struct lpfc_iocbq *iocb, *next_iocb; |
| 11989 | int i; |
| 11990 | unsigned long flags = 0; |
| 11991 | uint16_t prev_pring_flag; |
| 11992 | |
| 11993 | lpfc_cleanup_discovery_resources(vport); |
| 11994 | |
| 11995 | spin_lock_irqsave(&phba->hbalock, flags); |
| 11996 | |
| 11997 | /* |
| 11998 | * Error everything on the txq since these iocbs |
| 11999 | * have not been given to the FW yet. |
| 12000 | * Also issue ABTS for everything on the txcmplq |
| 12001 | */ |
| 12002 | if (phba->sli_rev != LPFC_SLI_REV4) { |
| 12003 | for (i = 0; i < psli->num_rings; i++) { |
| 12004 | pring = &psli->sli3_ring[i]; |
| 12005 | prev_pring_flag = pring->flag; |
| 12006 | /* Only slow rings */ |
| 12007 | if (pring->ringno == LPFC_ELS_RING) { |
| 12008 | pring->flag |= LPFC_DEFERRED_RING_EVENT; |
| 12009 | /* Set the lpfc data pending flag */ |
| 12010 | set_bit(LPFC_DATA_READY, addr: &phba->data_flags); |
| 12011 | } |
| 12012 | list_for_each_entry_safe(iocb, next_iocb, |
| 12013 | &pring->txq, list) { |
| 12014 | if (iocb->vport != vport) |
| 12015 | continue; |
| 12016 | list_move_tail(list: &iocb->list, head: &completions); |
| 12017 | } |
| 12018 | list_for_each_entry_safe(iocb, next_iocb, |
| 12019 | &pring->txcmplq, list) { |
| 12020 | if (iocb->vport != vport) |
| 12021 | continue; |
| 12022 | lpfc_sli_issue_abort_iotag(phba, pring, iocb, |
| 12023 | NULL); |
| 12024 | } |
| 12025 | pring->flag = prev_pring_flag; |
| 12026 | } |
| 12027 | } else { |
| 12028 | list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { |
| 12029 | pring = qp->pring; |
| 12030 | if (!pring) |
| 12031 | continue; |
| 12032 | if (pring == phba->sli4_hba.els_wq->pring) { |
| 12033 | pring->flag |= LPFC_DEFERRED_RING_EVENT; |
| 12034 | /* Set the lpfc data pending flag */ |
| 12035 | set_bit(LPFC_DATA_READY, addr: &phba->data_flags); |
| 12036 | } |
| 12037 | prev_pring_flag = pring->flag; |
| 12038 | spin_lock(lock: &pring->ring_lock); |
| 12039 | list_for_each_entry_safe(iocb, next_iocb, |
| 12040 | &pring->txq, list) { |
| 12041 | if (iocb->vport != vport) |
| 12042 | continue; |
| 12043 | list_move_tail(list: &iocb->list, head: &completions); |
| 12044 | } |
| 12045 | spin_unlock(lock: &pring->ring_lock); |
| 12046 | list_for_each_entry_safe(iocb, next_iocb, |
| 12047 | &pring->txcmplq, list) { |
| 12048 | if (iocb->vport != vport) |
| 12049 | continue; |
| 12050 | lpfc_sli_issue_abort_iotag(phba, pring, iocb, |
| 12051 | NULL); |
| 12052 | } |
| 12053 | pring->flag = prev_pring_flag; |
| 12054 | } |
| 12055 | } |
| 12056 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
| 12057 | |
| 12058 | /* Make sure HBA is alive */ |
| 12059 | lpfc_issue_hb_tmo(phba); |
| 12060 | |
| 12061 | /* Cancel all the IOCBs from the completions list */ |
| 12062 | lpfc_sli_cancel_iocbs(phba, iocblist: &completions, IOSTAT_LOCAL_REJECT, |
| 12063 | IOERR_SLI_DOWN); |
| 12064 | return 1; |
| 12065 | } |
| 12066 | |
| 12067 | /** |
| 12068 | * lpfc_sli_hba_down - Resource cleanup function for the HBA |
| 12069 | * @phba: Pointer to HBA context object. |
| 12070 | * |
| 12071 | * This function cleans up all iocb, buffers, mailbox commands |
| 12072 | * while shutting down the HBA. This function is called with no |
| 12073 | * lock held and always returns 1. |
| 12074 | * This function does the following to cleanup driver resources: |
| 12075 | * - Free discovery resources for each virtual port |
| 12076 | * - Cleanup any pending fabric iocbs |
| 12077 | * - Iterate through the iocb txq and free each entry |
| 12078 | * in the list. |
| 12079 | * - Free up any buffer posted to the HBA |
| 12080 | * - Free mailbox commands in the mailbox queue. |
| 12081 | **/ |
| 12082 | int |
| 12083 | lpfc_sli_hba_down(struct lpfc_hba *phba) |
| 12084 | { |
| 12085 | LIST_HEAD(completions); |
| 12086 | struct lpfc_sli *psli = &phba->sli; |
| 12087 | struct lpfc_queue *qp = NULL; |
| 12088 | struct lpfc_sli_ring *pring; |
| 12089 | struct lpfc_dmabuf *buf_ptr; |
| 12090 | unsigned long flags = 0; |
| 12091 | int i; |
| 12092 | |
| 12093 | /* Shutdown the mailbox command sub-system */ |
| 12094 | lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); |
| 12095 | |
| 12096 | lpfc_hba_down_prep(phba); |
| 12097 | |
| 12098 | /* Disable softirqs, including timers from obtaining phba->hbalock */ |
| 12099 | local_bh_disable(); |
| 12100 | |
| 12101 | lpfc_fabric_abort_hba(phba); |
| 12102 | |
| 12103 | spin_lock_irqsave(&phba->hbalock, flags); |
| 12104 | |
| 12105 | /* |
| 12106 | * Error everything on the txq since these iocbs |
| 12107 | * have not been given to the FW yet. |
| 12108 | */ |
| 12109 | if (phba->sli_rev != LPFC_SLI_REV4) { |
| 12110 | for (i = 0; i < psli->num_rings; i++) { |
| 12111 | pring = &psli->sli3_ring[i]; |
| 12112 | /* Only slow rings */ |
| 12113 | if (pring->ringno == LPFC_ELS_RING) { |
| 12114 | pring->flag |= LPFC_DEFERRED_RING_EVENT; |
| 12115 | /* Set the lpfc data pending flag */ |
| 12116 | set_bit(LPFC_DATA_READY, addr: &phba->data_flags); |
| 12117 | } |
| 12118 | list_splice_init(list: &pring->txq, head: &completions); |
| 12119 | } |
| 12120 | } else { |
| 12121 | list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { |
| 12122 | pring = qp->pring; |
| 12123 | if (!pring) |
| 12124 | continue; |
| 12125 | spin_lock(lock: &pring->ring_lock); |
| 12126 | list_splice_init(list: &pring->txq, head: &completions); |
| 12127 | spin_unlock(lock: &pring->ring_lock); |
| 12128 | if (pring == phba->sli4_hba.els_wq->pring) { |
| 12129 | pring->flag |= LPFC_DEFERRED_RING_EVENT; |
| 12130 | /* Set the lpfc data pending flag */ |
| 12131 | set_bit(LPFC_DATA_READY, addr: &phba->data_flags); |
| 12132 | } |
| 12133 | } |
| 12134 | } |
| 12135 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
| 12136 | |
| 12137 | /* Cancel all the IOCBs from the completions list */ |
| 12138 | lpfc_sli_cancel_iocbs(phba, iocblist: &completions, IOSTAT_LOCAL_REJECT, |
| 12139 | IOERR_SLI_DOWN); |
| 12140 | |
| 12141 | spin_lock_irqsave(&phba->hbalock, flags); |
| 12142 | list_splice_init(list: &phba->elsbuf, head: &completions); |
| 12143 | phba->elsbuf_cnt = 0; |
| 12144 | phba->elsbuf_prev_cnt = 0; |
| 12145 | spin_unlock_irqrestore(lock: &phba->hbalock, flags); |
| 12146 | |
| 12147 | while (!list_empty(head: &completions)) { |
| 12148 | list_remove_head(&completions, buf_ptr, |
| 12149 | struct lpfc_dmabuf, list); |
| 12150 | lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); |
| 12151 | kfree(objp: buf_ptr); |
| 12152 | } |
| 12153 | |
| 12154 | /* Enable softirqs again, done with phba->hbalock */ |
| 12155 | local_bh_enable(); |
| 12156 | |
| 12157 | /* Return any active mbox cmds */ |
| 12158 | timer_delete_sync(timer: &psli->mbox_tmo); |
| 12159 | |
| 12160 | spin_lock_irqsave(&phba->pport->work_port_lock, flags); |
| 12161 | phba->pport->work_port_events &= ~WORKER_MBOX_TMO; |
| 12162 | spin_unlock_irqrestore(lock: &phba->pport->work_port_lock, flags); |
| 12163 | |
| 12164 | return 1; |
| 12165 | } |
| 12166 | |
| 12167 | /** |
| 12168 | * lpfc_sli_pcimem_bcopy - SLI memory copy function |
| 12169 | * @srcp: Source memory pointer. |
| 12170 | * @destp: Destination memory pointer. |
| 12171 | * @cnt: Number of words required to be copied. |
| 12172 | * |
| 12173 | * This function is used for copying data between driver memory |
| 12174 | * and the SLI memory. This function also changes the endianness |
| 12175 | * of each word if native endianness is different from SLI |
| 12176 | * endianness. This function can be called with or without |
| 12177 | * lock. |
| 12178 | **/ |
| 12179 | void |
| 12180 | lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) |
| 12181 | { |
| 12182 | uint32_t *src = srcp; |
| 12183 | uint32_t *dest = destp; |
| 12184 | uint32_t ldata; |
| 12185 | int i; |
| 12186 | |
| 12187 | for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { |
| 12188 | ldata = *src; |
| 12189 | ldata = le32_to_cpu(ldata); |
| 12190 | *dest = ldata; |
| 12191 | src++; |
| 12192 | dest++; |
| 12193 | } |
| 12194 | } |
| 12195 | |
| 12196 | |
| 12197 | /** |
| 12198 | * lpfc_sli_bemem_bcopy - SLI memory copy function |
| 12199 | * @srcp: Source memory pointer. |
| 12200 | * @destp: Destination memory pointer. |
| 12201 | * @cnt: Number of words required to be copied. |
| 12202 | * |
| 12203 | * This function is used for copying data between a data structure |
| 12204 | * with big endian representation to local endianness. |
| 12205 | * This function can be called with or without lock. |
| 12206 | **/ |
| 12207 | void |
| 12208 | lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) |
| 12209 | { |
| 12210 | uint32_t *src = srcp; |
| 12211 | uint32_t *dest = destp; |
| 12212 | uint32_t ldata; |
| 12213 | int i; |
| 12214 | |
| 12215 | for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { |
| 12216 | ldata = *src; |
| 12217 | ldata = be32_to_cpu(ldata); |
| 12218 | *dest = ldata; |
| 12219 | src++; |
| 12220 | dest++; |
| 12221 | } |
| 12222 | } |
| 12223 | |
| 12224 | /** |
| 12225 | * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq |
| 12226 | * @phba: Pointer to HBA context object. |
| 12227 | * @pring: Pointer to driver SLI ring object. |
| 12228 | * @mp: Pointer to driver buffer object. |
| 12229 | * |
| 12230 | * This function is called with no lock held. |
| 12231 | * It always return zero after adding the buffer to the postbufq |
| 12232 | * buffer list. |
| 12233 | **/ |
| 12234 | int |
| 12235 | lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 12236 | struct lpfc_dmabuf *mp) |
| 12237 | { |
| 12238 | /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up |
| 12239 | later */ |
| 12240 | spin_lock_irq(lock: &phba->hbalock); |
| 12241 | list_add_tail(new: &mp->list, head: &pring->postbufq); |
| 12242 | pring->postbufq_cnt++; |
| 12243 | spin_unlock_irq(lock: &phba->hbalock); |
| 12244 | return 0; |
| 12245 | } |
| 12246 | |
| 12247 | /** |
| 12248 | * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer |
| 12249 | * @phba: Pointer to HBA context object. |
| 12250 | * |
| 12251 | * When HBQ is enabled, buffers are searched based on tags. This function |
| 12252 | * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The |
| 12253 | * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag |
| 12254 | * does not conflict with tags of buffer posted for unsolicited events. |
| 12255 | * The function returns the allocated tag. The function is called with |
| 12256 | * no locks held. |
| 12257 | **/ |
| 12258 | uint32_t |
| 12259 | lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) |
| 12260 | { |
| 12261 | spin_lock_irq(lock: &phba->hbalock); |
| 12262 | phba->buffer_tag_count++; |
| 12263 | /* |
| 12264 | * Always set the QUE_BUFTAG_BIT to distiguish between |
| 12265 | * a tag assigned by HBQ. |
| 12266 | */ |
| 12267 | phba->buffer_tag_count |= QUE_BUFTAG_BIT; |
| 12268 | spin_unlock_irq(lock: &phba->hbalock); |
| 12269 | return phba->buffer_tag_count; |
| 12270 | } |
| 12271 | |
| 12272 | /** |
| 12273 | * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag |
| 12274 | * @phba: Pointer to HBA context object. |
| 12275 | * @pring: Pointer to driver SLI ring object. |
| 12276 | * @tag: Buffer tag. |
| 12277 | * |
| 12278 | * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq |
| 12279 | * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX |
| 12280 | * iocb is posted to the response ring with the tag of the buffer. |
| 12281 | * This function searches the pring->postbufq list using the tag |
| 12282 | * to find buffer associated with CMD_IOCB_RET_XRI64_CX |
| 12283 | * iocb. If the buffer is found then lpfc_dmabuf object of the |
| 12284 | * buffer is returned to the caller else NULL is returned. |
| 12285 | * This function is called with no lock held. |
| 12286 | **/ |
| 12287 | struct lpfc_dmabuf * |
| 12288 | lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 12289 | uint32_t tag) |
| 12290 | { |
| 12291 | struct lpfc_dmabuf *mp, *next_mp; |
| 12292 | struct list_head *slp = &pring->postbufq; |
| 12293 | |
| 12294 | /* Search postbufq, from the beginning, looking for a match on tag */ |
| 12295 | spin_lock_irq(lock: &phba->hbalock); |
| 12296 | list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { |
| 12297 | if (mp->buffer_tag == tag) { |
| 12298 | list_del_init(entry: &mp->list); |
| 12299 | pring->postbufq_cnt--; |
| 12300 | spin_unlock_irq(lock: &phba->hbalock); |
| 12301 | return mp; |
| 12302 | } |
| 12303 | } |
| 12304 | |
| 12305 | spin_unlock_irq(lock: &phba->hbalock); |
| 12306 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 12307 | "0402 Cannot find virtual addr for buffer tag on " |
| 12308 | "ring %d Data x%lx x%px x%px x%x\n" , |
| 12309 | pring->ringno, (unsigned long) tag, |
| 12310 | slp->next, slp->prev, pring->postbufq_cnt); |
| 12311 | |
| 12312 | return NULL; |
| 12313 | } |
| 12314 | |
| 12315 | /** |
| 12316 | * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events |
| 12317 | * @phba: Pointer to HBA context object. |
| 12318 | * @pring: Pointer to driver SLI ring object. |
| 12319 | * @phys: DMA address of the buffer. |
| 12320 | * |
| 12321 | * This function searches the buffer list using the dma_address |
| 12322 | * of unsolicited event to find the driver's lpfc_dmabuf object |
| 12323 | * corresponding to the dma_address. The function returns the |
| 12324 | * lpfc_dmabuf object if a buffer is found else it returns NULL. |
| 12325 | * This function is called by the ct and els unsolicited event |
| 12326 | * handlers to get the buffer associated with the unsolicited |
| 12327 | * event. |
| 12328 | * |
| 12329 | * This function is called with no lock held. |
| 12330 | **/ |
| 12331 | struct lpfc_dmabuf * |
| 12332 | lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 12333 | dma_addr_t phys) |
| 12334 | { |
| 12335 | struct lpfc_dmabuf *mp, *next_mp; |
| 12336 | struct list_head *slp = &pring->postbufq; |
| 12337 | |
| 12338 | /* Search postbufq, from the beginning, looking for a match on phys */ |
| 12339 | spin_lock_irq(lock: &phba->hbalock); |
| 12340 | list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { |
| 12341 | if (mp->phys == phys) { |
| 12342 | list_del_init(entry: &mp->list); |
| 12343 | pring->postbufq_cnt--; |
| 12344 | spin_unlock_irq(lock: &phba->hbalock); |
| 12345 | return mp; |
| 12346 | } |
| 12347 | } |
| 12348 | |
| 12349 | spin_unlock_irq(lock: &phba->hbalock); |
| 12350 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 12351 | "0410 Cannot find virtual addr for mapped buf on " |
| 12352 | "ring %d Data x%llx x%px x%px x%x\n" , |
| 12353 | pring->ringno, (unsigned long long)phys, |
| 12354 | slp->next, slp->prev, pring->postbufq_cnt); |
| 12355 | return NULL; |
| 12356 | } |
| 12357 | |
| 12358 | /** |
| 12359 | * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs |
| 12360 | * @phba: Pointer to HBA context object. |
| 12361 | * @cmdiocb: Pointer to driver command iocb object. |
| 12362 | * @rspiocb: Pointer to driver response iocb object. |
| 12363 | * |
| 12364 | * This function is the completion handler for the abort iocbs for |
| 12365 | * ELS commands. This function is called from the ELS ring event |
| 12366 | * handler with no lock held. This function frees memory resources |
| 12367 | * associated with the abort iocb. |
| 12368 | **/ |
| 12369 | static void |
| 12370 | lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
| 12371 | struct lpfc_iocbq *rspiocb) |
| 12372 | { |
| 12373 | u32 ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb); |
| 12374 | u32 ulp_word4 = get_job_word4(phba, iocbq: rspiocb); |
| 12375 | u8 cmnd = get_job_cmnd(phba, iocbq: cmdiocb); |
| 12376 | |
| 12377 | if (ulp_status) { |
| 12378 | /* |
| 12379 | * Assume that the port already completed and returned, or |
| 12380 | * will return the iocb. Just Log the message. |
| 12381 | */ |
| 12382 | if (phba->sli_rev < LPFC_SLI_REV4) { |
| 12383 | if (cmnd == CMD_ABORT_XRI_CX && |
| 12384 | ulp_status == IOSTAT_LOCAL_REJECT && |
| 12385 | ulp_word4 == IOERR_ABORT_REQUESTED) { |
| 12386 | goto release_iocb; |
| 12387 | } |
| 12388 | } |
| 12389 | } |
| 12390 | |
| 12391 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, |
| 12392 | "0327 Abort els iocb complete x%px with io cmd xri %x " |
| 12393 | "abort tag x%x abort status %x abort code %x\n" , |
| 12394 | cmdiocb, get_job_abtsiotag(phba, cmdiocb), |
| 12395 | (phba->sli_rev == LPFC_SLI_REV4) ? |
| 12396 | get_wqe_reqtag(cmdiocb) : |
| 12397 | cmdiocb->iocb.ulpIoTag, |
| 12398 | ulp_status, ulp_word4); |
| 12399 | release_iocb: |
| 12400 | lpfc_sli_release_iocbq(phba, iocbq: cmdiocb); |
| 12401 | return; |
| 12402 | } |
| 12403 | |
| 12404 | /** |
| 12405 | * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command |
| 12406 | * @phba: Pointer to HBA context object. |
| 12407 | * @cmdiocb: Pointer to driver command iocb object. |
| 12408 | * @rspiocb: Pointer to driver response iocb object. |
| 12409 | * |
| 12410 | * The function is called from SLI ring event handler with no |
| 12411 | * lock held. This function is the completion handler for ELS commands |
| 12412 | * which are aborted. The function frees memory resources used for |
| 12413 | * the aborted ELS commands. |
| 12414 | **/ |
| 12415 | void |
| 12416 | lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
| 12417 | struct lpfc_iocbq *rspiocb) |
| 12418 | { |
| 12419 | struct lpfc_nodelist *ndlp = cmdiocb->ndlp; |
| 12420 | IOCB_t *irsp; |
| 12421 | LPFC_MBOXQ_t *mbox; |
| 12422 | u32 ulp_command, ulp_status, ulp_word4, iotag; |
| 12423 | |
| 12424 | ulp_command = get_job_cmnd(phba, iocbq: cmdiocb); |
| 12425 | ulp_status = get_job_ulpstatus(phba, iocbq: rspiocb); |
| 12426 | ulp_word4 = get_job_word4(phba, iocbq: rspiocb); |
| 12427 | |
| 12428 | if (phba->sli_rev == LPFC_SLI_REV4) { |
| 12429 | iotag = get_wqe_reqtag(cmdiocb); |
| 12430 | } else { |
| 12431 | irsp = &rspiocb->iocb; |
| 12432 | iotag = irsp->ulpIoTag; |
| 12433 | |
| 12434 | /* It is possible a PLOGI_RJT for NPIV ports to get aborted. |
| 12435 | * The MBX_REG_LOGIN64 mbox command is freed back to the |
| 12436 | * mbox_mem_pool here. |
| 12437 | */ |
| 12438 | if (cmdiocb->context_un.mbox) { |
| 12439 | mbox = cmdiocb->context_un.mbox; |
| 12440 | lpfc_mbox_rsrc_cleanup(phba, mbox, locked: MBOX_THD_UNLOCKED); |
| 12441 | cmdiocb->context_un.mbox = NULL; |
| 12442 | } |
| 12443 | } |
| 12444 | |
| 12445 | /* ELS cmd tag <ulpIoTag> completes */ |
| 12446 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
| 12447 | "0139 Ignoring ELS cmd code x%x ref cnt x%x Data: " |
| 12448 | "x%x x%x x%x x%px\n" , |
| 12449 | ulp_command, kref_read(&cmdiocb->ndlp->kref), |
| 12450 | ulp_status, ulp_word4, iotag, cmdiocb->ndlp); |
| 12451 | /* |
| 12452 | * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp |
| 12453 | * if exchange is busy. |
| 12454 | */ |
| 12455 | if (ulp_command == CMD_GEN_REQUEST64_CR) |
| 12456 | lpfc_ct_free_iocb(phba, cmdiocb); |
| 12457 | else |
| 12458 | lpfc_els_free_iocb(phba, cmdiocb); |
| 12459 | |
| 12460 | lpfc_nlp_put(ndlp); |
| 12461 | } |
| 12462 | |
| 12463 | /** |
| 12464 | * lpfc_sli_issue_abort_iotag - Abort function for a command iocb |
| 12465 | * @phba: Pointer to HBA context object. |
| 12466 | * @pring: Pointer to driver SLI ring object. |
| 12467 | * @cmdiocb: Pointer to driver command iocb object. |
| 12468 | * @cmpl: completion function. |
| 12469 | * |
| 12470 | * This function issues an abort iocb for the provided command iocb. In case |
| 12471 | * of unloading, the abort iocb will not be issued to commands on the ELS |
| 12472 | * ring. Instead, the callback function shall be changed to those commands |
| 12473 | * so that nothing happens when them finishes. This function is called with |
| 12474 | * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS |
| 12475 | * when the command iocb is an abort request. |
| 12476 | * |
| 12477 | **/ |
| 12478 | int |
| 12479 | lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 12480 | struct lpfc_iocbq *cmdiocb, void *cmpl) |
| 12481 | { |
| 12482 | struct lpfc_vport *vport = cmdiocb->vport; |
| 12483 | struct lpfc_iocbq *abtsiocbp; |
| 12484 | int retval = IOCB_ERROR; |
| 12485 | unsigned long iflags; |
| 12486 | struct lpfc_nodelist *ndlp = NULL; |
| 12487 | u32 ulp_command = get_job_cmnd(phba, iocbq: cmdiocb); |
| 12488 | u16 ulp_context, iotag; |
| 12489 | bool ia; |
| 12490 | |
| 12491 | /* |
| 12492 | * There are certain command types we don't want to abort. And we |
| 12493 | * don't want to abort commands that are already in the process of |
| 12494 | * being aborted. |
| 12495 | */ |
| 12496 | if (ulp_command == CMD_ABORT_XRI_WQE || |
| 12497 | ulp_command == CMD_ABORT_XRI_CN || |
| 12498 | ulp_command == CMD_CLOSE_XRI_CN || |
| 12499 | cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED) |
| 12500 | return IOCB_ABORTING; |
| 12501 | |
| 12502 | if (!pring) { |
| 12503 | if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) |
| 12504 | cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl; |
| 12505 | else |
| 12506 | cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl; |
| 12507 | return retval; |
| 12508 | } |
| 12509 | |
| 12510 | /* |
| 12511 | * Always abort the outstanding WQE and set the IA bit correctly |
| 12512 | * for the context. This is necessary for correctly removing |
| 12513 | * outstanding ndlp reference counts when the CQE completes with |
| 12514 | * the XB bit set. |
| 12515 | */ |
| 12516 | abtsiocbp = __lpfc_sli_get_iocbq(phba); |
| 12517 | if (abtsiocbp == NULL) |
| 12518 | return IOCB_NORESOURCE; |
| 12519 | |
| 12520 | /* This signals the response to set the correct status |
| 12521 | * before calling the completion handler |
| 12522 | */ |
| 12523 | cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED; |
| 12524 | |
| 12525 | if (phba->sli_rev == LPFC_SLI_REV4) { |
| 12526 | ulp_context = cmdiocb->sli4_xritag; |
| 12527 | iotag = abtsiocbp->iotag; |
| 12528 | } else { |
| 12529 | iotag = cmdiocb->iocb.ulpIoTag; |
| 12530 | if (pring->ringno == LPFC_ELS_RING) { |
| 12531 | ndlp = cmdiocb->ndlp; |
| 12532 | ulp_context = ndlp->nlp_rpi; |
| 12533 | } else { |
| 12534 | ulp_context = cmdiocb->iocb.ulpContext; |
| 12535 | } |
| 12536 | } |
| 12537 | |
| 12538 | /* Just close the exchange under certain conditions. */ |
| 12539 | if (test_bit(FC_UNLOADING, &vport->load_flag) || |
| 12540 | phba->link_state < LPFC_LINK_UP || |
| 12541 | (phba->sli_rev == LPFC_SLI_REV4 && |
| 12542 | phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) || |
| 12543 | (phba->link_flag & LS_EXTERNAL_LOOPBACK)) |
| 12544 | ia = true; |
| 12545 | else |
| 12546 | ia = false; |
| 12547 | |
| 12548 | lpfc_sli_prep_abort_xri(phba, cmdiocbq: abtsiocbp, ulp_context, iotag, |
| 12549 | ulp_class: cmdiocb->iocb.ulpClass, |
| 12550 | LPFC_WQE_CQ_ID_DEFAULT, ia, wqec: false); |
| 12551 | |
| 12552 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ |
| 12553 | abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; |
| 12554 | if (cmdiocb->cmd_flag & LPFC_IO_FCP) |
| 12555 | abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX); |
| 12556 | |
| 12557 | if (cmdiocb->cmd_flag & LPFC_IO_FOF) |
| 12558 | abtsiocbp->cmd_flag |= LPFC_IO_FOF; |
| 12559 | |
| 12560 | if (cmpl) |
| 12561 | abtsiocbp->cmd_cmpl = cmpl; |
| 12562 | else |
| 12563 | abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl; |
| 12564 | abtsiocbp->vport = vport; |
| 12565 | |
| 12566 | if (phba->sli_rev == LPFC_SLI_REV4) { |
| 12567 | pring = lpfc_sli4_calc_ring(phba, piocb: abtsiocbp); |
| 12568 | if (unlikely(pring == NULL)) |
| 12569 | goto abort_iotag_exit; |
| 12570 | /* Note: both hbalock and ring_lock need to be set here */ |
| 12571 | spin_lock_irqsave(&pring->ring_lock, iflags); |
| 12572 | retval = __lpfc_sli_issue_iocb(phba, ring_number: pring->ringno, |
| 12573 | piocb: abtsiocbp, flag: 0); |
| 12574 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 12575 | } else { |
| 12576 | retval = __lpfc_sli_issue_iocb(phba, ring_number: pring->ringno, |
| 12577 | piocb: abtsiocbp, flag: 0); |
| 12578 | } |
| 12579 | |
| 12580 | abort_iotag_exit: |
| 12581 | |
| 12582 | lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, |
| 12583 | "0339 Abort IO XRI x%x, Original iotag x%x, " |
| 12584 | "abort tag x%x Cmdjob : x%px Abortjob : x%px " |
| 12585 | "retval x%x : IA %d cmd_cmpl %ps\n" , |
| 12586 | ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ? |
| 12587 | cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp, |
| 12588 | retval, ia, abtsiocbp->cmd_cmpl); |
| 12589 | if (retval) { |
| 12590 | cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; |
| 12591 | __lpfc_sli_release_iocbq(phba, iocbq: abtsiocbp); |
| 12592 | } |
| 12593 | |
| 12594 | /* |
| 12595 | * Caller to this routine should check for IOCB_ERROR |
| 12596 | * and handle it properly. This routine no longer removes |
| 12597 | * iocb off txcmplq and call compl in case of IOCB_ERROR. |
| 12598 | */ |
| 12599 | return retval; |
| 12600 | } |
| 12601 | |
| 12602 | /** |
| 12603 | * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. |
| 12604 | * @phba: pointer to lpfc HBA data structure. |
| 12605 | * |
| 12606 | * This routine will abort all pending and outstanding iocbs to an HBA. |
| 12607 | **/ |
| 12608 | void |
| 12609 | lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) |
| 12610 | { |
| 12611 | struct lpfc_sli *psli = &phba->sli; |
| 12612 | struct lpfc_sli_ring *pring; |
| 12613 | struct lpfc_queue *qp = NULL; |
| 12614 | int i; |
| 12615 | |
| 12616 | if (phba->sli_rev != LPFC_SLI_REV4) { |
| 12617 | for (i = 0; i < psli->num_rings; i++) { |
| 12618 | pring = &psli->sli3_ring[i]; |
| 12619 | lpfc_sli_abort_iocb_ring(phba, pring); |
| 12620 | } |
| 12621 | return; |
| 12622 | } |
| 12623 | list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { |
| 12624 | pring = qp->pring; |
| 12625 | if (!pring) |
| 12626 | continue; |
| 12627 | lpfc_sli_abort_iocb_ring(phba, pring); |
| 12628 | } |
| 12629 | } |
| 12630 | |
| 12631 | /** |
| 12632 | * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts |
| 12633 | * @iocbq: Pointer to iocb object. |
| 12634 | * @vport: Pointer to driver virtual port object. |
| 12635 | * |
| 12636 | * This function acts as an iocb filter for functions which abort FCP iocbs. |
| 12637 | * |
| 12638 | * Return values |
| 12639 | * -ENODEV, if a null iocb or vport ptr is encountered |
| 12640 | * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as |
| 12641 | * driver already started the abort process, or is an abort iocb itself |
| 12642 | * 0, passes criteria for aborting the FCP I/O iocb |
| 12643 | **/ |
| 12644 | static int |
| 12645 | lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq, |
| 12646 | struct lpfc_vport *vport) |
| 12647 | { |
| 12648 | u8 ulp_command; |
| 12649 | |
| 12650 | /* No null ptr vports */ |
| 12651 | if (!iocbq || iocbq->vport != vport) |
| 12652 | return -ENODEV; |
| 12653 | |
| 12654 | /* iocb must be for FCP IO, already exists on the TX cmpl queue, |
| 12655 | * can't be premarked as driver aborted, nor be an ABORT iocb itself |
| 12656 | */ |
| 12657 | ulp_command = get_job_cmnd(phba: vport->phba, iocbq); |
| 12658 | if (!(iocbq->cmd_flag & LPFC_IO_FCP) || |
| 12659 | !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) || |
| 12660 | (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) || |
| 12661 | (ulp_command == CMD_ABORT_XRI_CN || |
| 12662 | ulp_command == CMD_CLOSE_XRI_CN || |
| 12663 | ulp_command == CMD_ABORT_XRI_WQE)) |
| 12664 | return -EINVAL; |
| 12665 | |
| 12666 | return 0; |
| 12667 | } |
| 12668 | |
| 12669 | /** |
| 12670 | * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target |
| 12671 | * @iocbq: Pointer to driver iocb object. |
| 12672 | * @vport: Pointer to driver virtual port object. |
| 12673 | * @tgt_id: SCSI ID of the target. |
| 12674 | * @lun_id: LUN ID of the scsi device. |
| 12675 | * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST |
| 12676 | * |
| 12677 | * This function acts as an iocb filter for validating a lun/SCSI target/SCSI |
| 12678 | * host. |
| 12679 | * |
| 12680 | * It will return |
| 12681 | * 0 if the filtering criteria is met for the given iocb and will return |
| 12682 | * 1 if the filtering criteria is not met. |
| 12683 | * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the |
| 12684 | * given iocb is for the SCSI device specified by vport, tgt_id and |
| 12685 | * lun_id parameter. |
| 12686 | * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the |
| 12687 | * given iocb is for the SCSI target specified by vport and tgt_id |
| 12688 | * parameters. |
| 12689 | * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the |
| 12690 | * given iocb is for the SCSI host associated with the given vport. |
| 12691 | * This function is called with no locks held. |
| 12692 | **/ |
| 12693 | static int |
| 12694 | lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, |
| 12695 | uint16_t tgt_id, uint64_t lun_id, |
| 12696 | lpfc_ctx_cmd ctx_cmd) |
| 12697 | { |
| 12698 | struct lpfc_io_buf *lpfc_cmd; |
| 12699 | int rc = 1; |
| 12700 | |
| 12701 | lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); |
| 12702 | |
| 12703 | if (lpfc_cmd->pCmd == NULL) |
| 12704 | return rc; |
| 12705 | |
| 12706 | switch (ctx_cmd) { |
| 12707 | case LPFC_CTX_LUN: |
| 12708 | if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && |
| 12709 | (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && |
| 12710 | (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) |
| 12711 | rc = 0; |
| 12712 | break; |
| 12713 | case LPFC_CTX_TGT: |
| 12714 | if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && |
| 12715 | (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) |
| 12716 | rc = 0; |
| 12717 | break; |
| 12718 | case LPFC_CTX_HOST: |
| 12719 | rc = 0; |
| 12720 | break; |
| 12721 | default: |
| 12722 | printk(KERN_ERR "%s: Unknown context cmd type, value %d\n" , |
| 12723 | __func__, ctx_cmd); |
| 12724 | break; |
| 12725 | } |
| 12726 | |
| 12727 | return rc; |
| 12728 | } |
| 12729 | |
| 12730 | /** |
| 12731 | * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending |
| 12732 | * @vport: Pointer to virtual port. |
| 12733 | * @tgt_id: SCSI ID of the target. |
| 12734 | * @lun_id: LUN ID of the scsi device. |
| 12735 | * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. |
| 12736 | * |
| 12737 | * This function returns number of FCP commands pending for the vport. |
| 12738 | * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP |
| 12739 | * commands pending on the vport associated with SCSI device specified |
| 12740 | * by tgt_id and lun_id parameters. |
| 12741 | * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP |
| 12742 | * commands pending on the vport associated with SCSI target specified |
| 12743 | * by tgt_id parameter. |
| 12744 | * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP |
| 12745 | * commands pending on the vport. |
| 12746 | * This function returns the number of iocbs which satisfy the filter. |
| 12747 | * This function is called without any lock held. |
| 12748 | **/ |
| 12749 | int |
| 12750 | lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, |
| 12751 | lpfc_ctx_cmd ctx_cmd) |
| 12752 | { |
| 12753 | struct lpfc_hba *phba = vport->phba; |
| 12754 | struct lpfc_iocbq *iocbq; |
| 12755 | int sum, i; |
| 12756 | unsigned long iflags; |
| 12757 | u8 ulp_command; |
| 12758 | |
| 12759 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 12760 | for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { |
| 12761 | iocbq = phba->sli.iocbq_lookup[i]; |
| 12762 | |
| 12763 | if (!iocbq || iocbq->vport != vport) |
| 12764 | continue; |
| 12765 | if (!(iocbq->cmd_flag & LPFC_IO_FCP) || |
| 12766 | !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) |
| 12767 | continue; |
| 12768 | |
| 12769 | /* Include counting outstanding aborts */ |
| 12770 | ulp_command = get_job_cmnd(phba, iocbq); |
| 12771 | if (ulp_command == CMD_ABORT_XRI_CN || |
| 12772 | ulp_command == CMD_CLOSE_XRI_CN || |
| 12773 | ulp_command == CMD_ABORT_XRI_WQE) { |
| 12774 | sum++; |
| 12775 | continue; |
| 12776 | } |
| 12777 | |
| 12778 | if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, |
| 12779 | ctx_cmd) == 0) |
| 12780 | sum++; |
| 12781 | } |
| 12782 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 12783 | |
| 12784 | return sum; |
| 12785 | } |
| 12786 | |
| 12787 | /** |
| 12788 | * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs |
| 12789 | * @phba: Pointer to HBA context object |
| 12790 | * @cmdiocb: Pointer to command iocb object. |
| 12791 | * @rspiocb: Pointer to response iocb object. |
| 12792 | * |
| 12793 | * This function is called when an aborted FCP iocb completes. This |
| 12794 | * function is called by the ring event handler with no lock held. |
| 12795 | * This function frees the iocb. |
| 12796 | **/ |
| 12797 | void |
| 12798 | lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
| 12799 | struct lpfc_iocbq *rspiocb) |
| 12800 | { |
| 12801 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 12802 | "3096 ABORT_XRI_CX completing on rpi x%x " |
| 12803 | "original iotag x%x, abort cmd iotag x%x " |
| 12804 | "status 0x%x, reason 0x%x\n" , |
| 12805 | (phba->sli_rev == LPFC_SLI_REV4) ? |
| 12806 | cmdiocb->sli4_xritag : |
| 12807 | cmdiocb->iocb.un.acxri.abortContextTag, |
| 12808 | get_job_abtsiotag(phba, cmdiocb), |
| 12809 | cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb), |
| 12810 | get_job_word4(phba, rspiocb)); |
| 12811 | lpfc_sli_release_iocbq(phba, iocbq: cmdiocb); |
| 12812 | return; |
| 12813 | } |
| 12814 | |
| 12815 | /** |
| 12816 | * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN |
| 12817 | * @vport: Pointer to virtual port. |
| 12818 | * @tgt_id: SCSI ID of the target. |
| 12819 | * @lun_id: LUN ID of the scsi device. |
| 12820 | * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. |
| 12821 | * |
| 12822 | * This function sends an abort command for every SCSI command |
| 12823 | * associated with the given virtual port pending on the ring |
| 12824 | * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then |
| 12825 | * lpfc_sli_validate_fcp_iocb function. The ordering for validation before |
| 12826 | * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort |
| 12827 | * followed by lpfc_sli_validate_fcp_iocb. |
| 12828 | * |
| 12829 | * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the |
| 12830 | * FCP iocbs associated with lun specified by tgt_id and lun_id |
| 12831 | * parameters |
| 12832 | * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the |
| 12833 | * FCP iocbs associated with SCSI target specified by tgt_id parameter. |
| 12834 | * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all |
| 12835 | * FCP iocbs associated with virtual port. |
| 12836 | * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4 |
| 12837 | * lpfc_sli4_calc_ring is used. |
| 12838 | * This function returns number of iocbs it failed to abort. |
| 12839 | * This function is called with no locks held. |
| 12840 | **/ |
| 12841 | int |
| 12842 | lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id, |
| 12843 | lpfc_ctx_cmd abort_cmd) |
| 12844 | { |
| 12845 | struct lpfc_hba *phba = vport->phba; |
| 12846 | struct lpfc_sli_ring *pring = NULL; |
| 12847 | struct lpfc_iocbq *iocbq; |
| 12848 | int errcnt = 0, ret_val = 0; |
| 12849 | unsigned long iflags; |
| 12850 | int i; |
| 12851 | |
| 12852 | /* all I/Os are in process of being flushed */ |
| 12853 | if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) |
| 12854 | return errcnt; |
| 12855 | |
| 12856 | for (i = 1; i <= phba->sli.last_iotag; i++) { |
| 12857 | iocbq = phba->sli.iocbq_lookup[i]; |
| 12858 | |
| 12859 | if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport)) |
| 12860 | continue; |
| 12861 | |
| 12862 | if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, |
| 12863 | ctx_cmd: abort_cmd) != 0) |
| 12864 | continue; |
| 12865 | |
| 12866 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 12867 | if (phba->sli_rev == LPFC_SLI_REV3) { |
| 12868 | pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; |
| 12869 | } else if (phba->sli_rev == LPFC_SLI_REV4) { |
| 12870 | pring = lpfc_sli4_calc_ring(phba, piocb: iocbq); |
| 12871 | } |
| 12872 | ret_val = lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb: iocbq, |
| 12873 | cmpl: lpfc_sli_abort_fcp_cmpl); |
| 12874 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 12875 | if (ret_val != IOCB_SUCCESS) |
| 12876 | errcnt++; |
| 12877 | } |
| 12878 | |
| 12879 | return errcnt; |
| 12880 | } |
| 12881 | |
| 12882 | /** |
| 12883 | * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN |
| 12884 | * @vport: Pointer to virtual port. |
| 12885 | * @pring: Pointer to driver SLI ring object. |
| 12886 | * @tgt_id: SCSI ID of the target. |
| 12887 | * @lun_id: LUN ID of the scsi device. |
| 12888 | * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. |
| 12889 | * |
| 12890 | * This function sends an abort command for every SCSI command |
| 12891 | * associated with the given virtual port pending on the ring |
| 12892 | * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then |
| 12893 | * lpfc_sli_validate_fcp_iocb function. The ordering for validation before |
| 12894 | * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort |
| 12895 | * followed by lpfc_sli_validate_fcp_iocb. |
| 12896 | * |
| 12897 | * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the |
| 12898 | * FCP iocbs associated with lun specified by tgt_id and lun_id |
| 12899 | * parameters |
| 12900 | * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the |
| 12901 | * FCP iocbs associated with SCSI target specified by tgt_id parameter. |
| 12902 | * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all |
| 12903 | * FCP iocbs associated with virtual port. |
| 12904 | * This function returns number of iocbs it aborted . |
| 12905 | * This function is called with no locks held right after a taskmgmt |
| 12906 | * command is sent. |
| 12907 | **/ |
| 12908 | int |
| 12909 | lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, |
| 12910 | uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) |
| 12911 | { |
| 12912 | struct lpfc_hba *phba = vport->phba; |
| 12913 | struct lpfc_io_buf *lpfc_cmd; |
| 12914 | struct lpfc_iocbq *abtsiocbq; |
| 12915 | struct lpfc_nodelist *ndlp = NULL; |
| 12916 | struct lpfc_iocbq *iocbq; |
| 12917 | int sum, i, ret_val; |
| 12918 | unsigned long iflags; |
| 12919 | struct lpfc_sli_ring *pring_s4 = NULL; |
| 12920 | u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT; |
| 12921 | bool ia; |
| 12922 | |
| 12923 | /* all I/Os are in process of being flushed */ |
| 12924 | if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) |
| 12925 | return 0; |
| 12926 | |
| 12927 | sum = 0; |
| 12928 | |
| 12929 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 12930 | for (i = 1; i <= phba->sli.last_iotag; i++) { |
| 12931 | iocbq = phba->sli.iocbq_lookup[i]; |
| 12932 | |
| 12933 | if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport)) |
| 12934 | continue; |
| 12935 | |
| 12936 | if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, |
| 12937 | ctx_cmd: cmd) != 0) |
| 12938 | continue; |
| 12939 | |
| 12940 | /* Guard against IO completion being called at same time */ |
| 12941 | lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); |
| 12942 | spin_lock(lock: &lpfc_cmd->buf_lock); |
| 12943 | |
| 12944 | if (!lpfc_cmd->pCmd) { |
| 12945 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
| 12946 | continue; |
| 12947 | } |
| 12948 | |
| 12949 | if (phba->sli_rev == LPFC_SLI_REV4) { |
| 12950 | pring_s4 = |
| 12951 | phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring; |
| 12952 | if (!pring_s4) { |
| 12953 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
| 12954 | continue; |
| 12955 | } |
| 12956 | /* Note: both hbalock and ring_lock must be set here */ |
| 12957 | spin_lock(lock: &pring_s4->ring_lock); |
| 12958 | } |
| 12959 | |
| 12960 | /* |
| 12961 | * If the iocbq is already being aborted, don't take a second |
| 12962 | * action, but do count it. |
| 12963 | */ |
| 12964 | if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) || |
| 12965 | !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { |
| 12966 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 12967 | spin_unlock(lock: &pring_s4->ring_lock); |
| 12968 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
| 12969 | continue; |
| 12970 | } |
| 12971 | |
| 12972 | /* issue ABTS for this IOCB based on iotag */ |
| 12973 | abtsiocbq = __lpfc_sli_get_iocbq(phba); |
| 12974 | if (!abtsiocbq) { |
| 12975 | if (phba->sli_rev == LPFC_SLI_REV4) |
| 12976 | spin_unlock(lock: &pring_s4->ring_lock); |
| 12977 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
| 12978 | continue; |
| 12979 | } |
| 12980 | |
| 12981 | if (phba->sli_rev == LPFC_SLI_REV4) { |
| 12982 | iotag = abtsiocbq->iotag; |
| 12983 | ulp_context = iocbq->sli4_xritag; |
| 12984 | cqid = lpfc_cmd->hdwq->io_cq_map; |
| 12985 | } else { |
| 12986 | iotag = iocbq->iocb.ulpIoTag; |
| 12987 | if (pring->ringno == LPFC_ELS_RING) { |
| 12988 | ndlp = iocbq->ndlp; |
| 12989 | ulp_context = ndlp->nlp_rpi; |
| 12990 | } else { |
| 12991 | ulp_context = iocbq->iocb.ulpContext; |
| 12992 | } |
| 12993 | } |
| 12994 | |
| 12995 | ndlp = lpfc_cmd->rdata->pnode; |
| 12996 | |
| 12997 | if (lpfc_is_link_up(phba) && |
| 12998 | (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) && |
| 12999 | !(phba->link_flag & LS_EXTERNAL_LOOPBACK)) |
| 13000 | ia = false; |
| 13001 | else |
| 13002 | ia = true; |
| 13003 | |
| 13004 | lpfc_sli_prep_abort_xri(phba, cmdiocbq: abtsiocbq, ulp_context, iotag, |
| 13005 | ulp_class: iocbq->iocb.ulpClass, cqid, |
| 13006 | ia, wqec: false); |
| 13007 | |
| 13008 | abtsiocbq->vport = vport; |
| 13009 | |
| 13010 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ |
| 13011 | abtsiocbq->hba_wqidx = iocbq->hba_wqidx; |
| 13012 | if (iocbq->cmd_flag & LPFC_IO_FCP) |
| 13013 | abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX; |
| 13014 | if (iocbq->cmd_flag & LPFC_IO_FOF) |
| 13015 | abtsiocbq->cmd_flag |= LPFC_IO_FOF; |
| 13016 | |
| 13017 | /* Setup callback routine and issue the command. */ |
| 13018 | abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl; |
| 13019 | |
| 13020 | /* |
| 13021 | * Indicate the IO is being aborted by the driver and set |
| 13022 | * the caller's flag into the aborted IO. |
| 13023 | */ |
| 13024 | iocbq->cmd_flag |= LPFC_DRIVER_ABORTED; |
| 13025 | |
| 13026 | if (phba->sli_rev == LPFC_SLI_REV4) { |
| 13027 | ret_val = __lpfc_sli_issue_iocb(phba, ring_number: pring_s4->ringno, |
| 13028 | piocb: abtsiocbq, flag: 0); |
| 13029 | spin_unlock(lock: &pring_s4->ring_lock); |
| 13030 | } else { |
| 13031 | ret_val = __lpfc_sli_issue_iocb(phba, ring_number: pring->ringno, |
| 13032 | piocb: abtsiocbq, flag: 0); |
| 13033 | } |
| 13034 | |
| 13035 | spin_unlock(lock: &lpfc_cmd->buf_lock); |
| 13036 | |
| 13037 | if (ret_val == IOCB_ERROR) |
| 13038 | __lpfc_sli_release_iocbq(phba, iocbq: abtsiocbq); |
| 13039 | else |
| 13040 | sum++; |
| 13041 | } |
| 13042 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 13043 | return sum; |
| 13044 | } |
| 13045 | |
| 13046 | /** |
| 13047 | * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler |
| 13048 | * @phba: Pointer to HBA context object. |
| 13049 | * @cmdiocbq: Pointer to command iocb. |
| 13050 | * @rspiocbq: Pointer to response iocb. |
| 13051 | * |
| 13052 | * This function is the completion handler for iocbs issued using |
| 13053 | * lpfc_sli_issue_iocb_wait function. This function is called by the |
| 13054 | * ring event handler function without any lock held. This function |
| 13055 | * can be called from both worker thread context and interrupt |
| 13056 | * context. This function also can be called from other thread which |
| 13057 | * cleans up the SLI layer objects. |
| 13058 | * This function copy the contents of the response iocb to the |
| 13059 | * response iocb memory object provided by the caller of |
| 13060 | * lpfc_sli_issue_iocb_wait and then wakes up the thread which |
| 13061 | * sleeps for the iocb completion. |
| 13062 | **/ |
| 13063 | static void |
| 13064 | lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, |
| 13065 | struct lpfc_iocbq *cmdiocbq, |
| 13066 | struct lpfc_iocbq *rspiocbq) |
| 13067 | { |
| 13068 | wait_queue_head_t *pdone_q; |
| 13069 | unsigned long iflags; |
| 13070 | struct lpfc_io_buf *lpfc_cmd; |
| 13071 | size_t offset = offsetof(struct lpfc_iocbq, wqe); |
| 13072 | |
| 13073 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 13074 | if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) { |
| 13075 | |
| 13076 | /* |
| 13077 | * A time out has occurred for the iocb. If a time out |
| 13078 | * completion handler has been supplied, call it. Otherwise, |
| 13079 | * just free the iocbq. |
| 13080 | */ |
| 13081 | |
| 13082 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 13083 | cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl; |
| 13084 | cmdiocbq->wait_cmd_cmpl = NULL; |
| 13085 | if (cmdiocbq->cmd_cmpl) |
| 13086 | cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL); |
| 13087 | else |
| 13088 | lpfc_sli_release_iocbq(phba, iocbq: cmdiocbq); |
| 13089 | return; |
| 13090 | } |
| 13091 | |
| 13092 | /* Copy the contents of the local rspiocb into the caller's buffer. */ |
| 13093 | cmdiocbq->cmd_flag |= LPFC_IO_WAKE; |
| 13094 | if (cmdiocbq->rsp_iocb && rspiocbq) |
| 13095 | memcpy((char *)cmdiocbq->rsp_iocb + offset, |
| 13096 | (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset); |
| 13097 | |
| 13098 | /* Set the exchange busy flag for task management commands */ |
| 13099 | if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) && |
| 13100 | !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) { |
| 13101 | lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, |
| 13102 | cur_iocbq); |
| 13103 | if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY)) |
| 13104 | lpfc_cmd->flags |= LPFC_SBUF_XBUSY; |
| 13105 | else |
| 13106 | lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; |
| 13107 | } |
| 13108 | |
| 13109 | pdone_q = cmdiocbq->context_un.wait_queue; |
| 13110 | if (pdone_q) |
| 13111 | wake_up(pdone_q); |
| 13112 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 13113 | return; |
| 13114 | } |
| 13115 | |
| 13116 | /** |
| 13117 | * lpfc_chk_iocb_flg - Test IOCB flag with lock held. |
| 13118 | * @phba: Pointer to HBA context object.. |
| 13119 | * @piocbq: Pointer to command iocb. |
| 13120 | * @flag: Flag to test. |
| 13121 | * |
| 13122 | * This routine grabs the hbalock and then test the cmd_flag to |
| 13123 | * see if the passed in flag is set. |
| 13124 | * Returns: |
| 13125 | * 1 if flag is set. |
| 13126 | * 0 if flag is not set. |
| 13127 | **/ |
| 13128 | static int |
| 13129 | lpfc_chk_iocb_flg(struct lpfc_hba *phba, |
| 13130 | struct lpfc_iocbq *piocbq, uint32_t flag) |
| 13131 | { |
| 13132 | unsigned long iflags; |
| 13133 | int ret; |
| 13134 | |
| 13135 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 13136 | ret = piocbq->cmd_flag & flag; |
| 13137 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 13138 | return ret; |
| 13139 | |
| 13140 | } |
| 13141 | |
| 13142 | /** |
| 13143 | * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands |
| 13144 | * @phba: Pointer to HBA context object.. |
| 13145 | * @ring_number: Ring number |
| 13146 | * @piocb: Pointer to command iocb. |
| 13147 | * @prspiocbq: Pointer to response iocb. |
| 13148 | * @timeout: Timeout in number of seconds. |
| 13149 | * |
| 13150 | * This function issues the iocb to firmware and waits for the |
| 13151 | * iocb to complete. The cmd_cmpl field of the shall be used |
| 13152 | * to handle iocbs which time out. If the field is NULL, the |
| 13153 | * function shall free the iocbq structure. If more clean up is |
| 13154 | * needed, the caller is expected to provide a completion function |
| 13155 | * that will provide the needed clean up. If the iocb command is |
| 13156 | * not completed within timeout seconds, the function will either |
| 13157 | * free the iocbq structure (if cmd_cmpl == NULL) or execute the |
| 13158 | * completion function set in the cmd_cmpl field and then return |
| 13159 | * a status of IOCB_TIMEDOUT. The caller should not free the iocb |
| 13160 | * resources if this function returns IOCB_TIMEDOUT. |
| 13161 | * The function waits for the iocb completion using an |
| 13162 | * non-interruptible wait. |
| 13163 | * This function will sleep while waiting for iocb completion. |
| 13164 | * So, this function should not be called from any context which |
| 13165 | * does not allow sleeping. Due to the same reason, this function |
| 13166 | * cannot be called with interrupt disabled. |
| 13167 | * This function assumes that the iocb completions occur while |
| 13168 | * this function sleep. So, this function cannot be called from |
| 13169 | * the thread which process iocb completion for this ring. |
| 13170 | * This function clears the cmd_flag of the iocb object before |
| 13171 | * issuing the iocb and the iocb completion handler sets this |
| 13172 | * flag and wakes this thread when the iocb completes. |
| 13173 | * The contents of the response iocb will be copied to prspiocbq |
| 13174 | * by the completion handler when the command completes. |
| 13175 | * This function returns IOCB_SUCCESS when success. |
| 13176 | * This function is called with no lock held. |
| 13177 | **/ |
| 13178 | int |
| 13179 | lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, |
| 13180 | uint32_t ring_number, |
| 13181 | struct lpfc_iocbq *piocb, |
| 13182 | struct lpfc_iocbq *prspiocbq, |
| 13183 | uint32_t timeout) |
| 13184 | { |
| 13185 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); |
| 13186 | long timeleft, timeout_req = 0; |
| 13187 | int retval = IOCB_SUCCESS; |
| 13188 | uint32_t creg_val; |
| 13189 | struct lpfc_iocbq *iocb; |
| 13190 | int txq_cnt = 0; |
| 13191 | int txcmplq_cnt = 0; |
| 13192 | struct lpfc_sli_ring *pring; |
| 13193 | unsigned long iflags; |
| 13194 | bool iocb_completed = true; |
| 13195 | |
| 13196 | if (phba->sli_rev >= LPFC_SLI_REV4) { |
| 13197 | lpfc_sli_prep_wqe(phba, job: piocb); |
| 13198 | |
| 13199 | pring = lpfc_sli4_calc_ring(phba, piocb); |
| 13200 | } else |
| 13201 | pring = &phba->sli.sli3_ring[ring_number]; |
| 13202 | /* |
| 13203 | * If the caller has provided a response iocbq buffer, then rsp_iocb |
| 13204 | * is NULL or its an error. |
| 13205 | */ |
| 13206 | if (prspiocbq) { |
| 13207 | if (piocb->rsp_iocb) |
| 13208 | return IOCB_ERROR; |
| 13209 | piocb->rsp_iocb = prspiocbq; |
| 13210 | } |
| 13211 | |
| 13212 | piocb->wait_cmd_cmpl = piocb->cmd_cmpl; |
| 13213 | piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait; |
| 13214 | piocb->context_un.wait_queue = &done_q; |
| 13215 | piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); |
| 13216 | |
| 13217 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
| 13218 | if (lpfc_readl(addr: phba->HCregaddr, data: &creg_val)) |
| 13219 | return IOCB_ERROR; |
| 13220 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); |
| 13221 | writel(val: creg_val, addr: phba->HCregaddr); |
| 13222 | readl(addr: phba->HCregaddr); /* flush */ |
| 13223 | } |
| 13224 | |
| 13225 | retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, |
| 13226 | SLI_IOCB_RET_IOCB); |
| 13227 | if (retval == IOCB_SUCCESS) { |
| 13228 | timeout_req = secs_to_jiffies(timeout); |
| 13229 | timeleft = wait_event_timeout(done_q, |
| 13230 | lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), |
| 13231 | timeout_req); |
| 13232 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 13233 | if (!(piocb->cmd_flag & LPFC_IO_WAKE)) { |
| 13234 | |
| 13235 | /* |
| 13236 | * IOCB timed out. Inform the wake iocb wait |
| 13237 | * completion function and set local status |
| 13238 | */ |
| 13239 | |
| 13240 | iocb_completed = false; |
| 13241 | piocb->cmd_flag |= LPFC_IO_WAKE_TMO; |
| 13242 | } |
| 13243 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 13244 | if (iocb_completed) { |
| 13245 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 13246 | "0331 IOCB wake signaled\n" ); |
| 13247 | /* Note: we are not indicating if the IOCB has a success |
| 13248 | * status or not - that's for the caller to check. |
| 13249 | * IOCB_SUCCESS means just that the command was sent and |
| 13250 | * completed. Not that it completed successfully. |
| 13251 | * */ |
| 13252 | } else if (timeleft == 0) { |
| 13253 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 13254 | "0338 IOCB wait timeout error - no " |
| 13255 | "wake response Data x%x\n" , timeout); |
| 13256 | retval = IOCB_TIMEDOUT; |
| 13257 | } else { |
| 13258 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 13259 | "0330 IOCB wake NOT set, " |
| 13260 | "Data x%x x%lx\n" , |
| 13261 | timeout, (timeleft / jiffies)); |
| 13262 | retval = IOCB_TIMEDOUT; |
| 13263 | } |
| 13264 | } else if (retval == IOCB_BUSY) { |
| 13265 | if (phba->cfg_log_verbose & LOG_SLI) { |
| 13266 | list_for_each_entry(iocb, &pring->txq, list) { |
| 13267 | txq_cnt++; |
| 13268 | } |
| 13269 | list_for_each_entry(iocb, &pring->txcmplq, list) { |
| 13270 | txcmplq_cnt++; |
| 13271 | } |
| 13272 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 13273 | "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n" , |
| 13274 | phba->iocb_cnt, txq_cnt, txcmplq_cnt); |
| 13275 | } |
| 13276 | return retval; |
| 13277 | } else { |
| 13278 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 13279 | "0332 IOCB wait issue failed, Data x%x\n" , |
| 13280 | retval); |
| 13281 | retval = IOCB_ERROR; |
| 13282 | } |
| 13283 | |
| 13284 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
| 13285 | if (lpfc_readl(addr: phba->HCregaddr, data: &creg_val)) |
| 13286 | return IOCB_ERROR; |
| 13287 | creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); |
| 13288 | writel(val: creg_val, addr: phba->HCregaddr); |
| 13289 | readl(addr: phba->HCregaddr); /* flush */ |
| 13290 | } |
| 13291 | |
| 13292 | if (prspiocbq) |
| 13293 | piocb->rsp_iocb = NULL; |
| 13294 | |
| 13295 | piocb->context_un.wait_queue = NULL; |
| 13296 | piocb->cmd_cmpl = NULL; |
| 13297 | return retval; |
| 13298 | } |
| 13299 | |
| 13300 | /** |
| 13301 | * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox |
| 13302 | * @phba: Pointer to HBA context object. |
| 13303 | * @pmboxq: Pointer to driver mailbox object. |
| 13304 | * @timeout: Timeout in number of seconds. |
| 13305 | * |
| 13306 | * This function issues the mailbox to firmware and waits for the |
| 13307 | * mailbox command to complete. If the mailbox command is not |
| 13308 | * completed within timeout seconds, it returns MBX_TIMEOUT. |
| 13309 | * The function waits for the mailbox completion using an |
| 13310 | * interruptible wait. If the thread is woken up due to a |
| 13311 | * signal, MBX_TIMEOUT error is returned to the caller. Caller |
| 13312 | * should not free the mailbox resources, if this function returns |
| 13313 | * MBX_TIMEOUT. |
| 13314 | * This function will sleep while waiting for mailbox completion. |
| 13315 | * So, this function should not be called from any context which |
| 13316 | * does not allow sleeping. Due to the same reason, this function |
| 13317 | * cannot be called with interrupt disabled. |
| 13318 | * This function assumes that the mailbox completion occurs while |
| 13319 | * this function sleep. So, this function cannot be called from |
| 13320 | * the worker thread which processes mailbox completion. |
| 13321 | * This function is called in the context of HBA management |
| 13322 | * applications. |
| 13323 | * This function returns MBX_SUCCESS when successful. |
| 13324 | * This function is called with no lock held. |
| 13325 | **/ |
| 13326 | int |
| 13327 | lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, |
| 13328 | uint32_t timeout) |
| 13329 | { |
| 13330 | struct completion mbox_done; |
| 13331 | int retval; |
| 13332 | unsigned long flag; |
| 13333 | |
| 13334 | pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; |
| 13335 | /* setup wake call as IOCB callback */ |
| 13336 | pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; |
| 13337 | |
| 13338 | /* setup ctx_u field to pass wait_queue pointer to wake function */ |
| 13339 | init_completion(x: &mbox_done); |
| 13340 | pmboxq->ctx_u.mbox_wait = &mbox_done; |
| 13341 | /* now issue the command */ |
| 13342 | retval = lpfc_sli_issue_mbox(phba, pmbox: pmboxq, MBX_NOWAIT); |
| 13343 | if (retval == MBX_BUSY || retval == MBX_SUCCESS) { |
| 13344 | wait_for_completion_timeout(x: &mbox_done, secs_to_jiffies(timeout)); |
| 13345 | |
| 13346 | spin_lock_irqsave(&phba->hbalock, flag); |
| 13347 | pmboxq->ctx_u.mbox_wait = NULL; |
| 13348 | /* |
| 13349 | * if LPFC_MBX_WAKE flag is set the mailbox is completed |
| 13350 | * else do not free the resources. |
| 13351 | */ |
| 13352 | if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { |
| 13353 | retval = MBX_SUCCESS; |
| 13354 | } else { |
| 13355 | retval = MBX_TIMEOUT; |
| 13356 | pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 13357 | } |
| 13358 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: flag); |
| 13359 | } |
| 13360 | return retval; |
| 13361 | } |
| 13362 | |
| 13363 | /** |
| 13364 | * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system |
| 13365 | * @phba: Pointer to HBA context. |
| 13366 | * @mbx_action: Mailbox shutdown options. |
| 13367 | * |
| 13368 | * This function is called to shutdown the driver's mailbox sub-system. |
| 13369 | * It first marks the mailbox sub-system is in a block state to prevent |
| 13370 | * the asynchronous mailbox command from issued off the pending mailbox |
| 13371 | * command queue. If the mailbox command sub-system shutdown is due to |
| 13372 | * HBA error conditions such as EEH or ERATT, this routine shall invoke |
| 13373 | * the mailbox sub-system flush routine to forcefully bring down the |
| 13374 | * mailbox sub-system. Otherwise, if it is due to normal condition (such |
| 13375 | * as with offline or HBA function reset), this routine will wait for the |
| 13376 | * outstanding mailbox command to complete before invoking the mailbox |
| 13377 | * sub-system flush routine to gracefully bring down mailbox sub-system. |
| 13378 | **/ |
| 13379 | void |
| 13380 | lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) |
| 13381 | { |
| 13382 | struct lpfc_sli *psli = &phba->sli; |
| 13383 | unsigned long timeout; |
| 13384 | |
| 13385 | if (mbx_action == LPFC_MBX_NO_WAIT) { |
| 13386 | /* delay 100ms for port state */ |
| 13387 | msleep(msecs: 100); |
| 13388 | lpfc_sli_mbox_sys_flush(phba); |
| 13389 | return; |
| 13390 | } |
| 13391 | timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies; |
| 13392 | |
| 13393 | /* Disable softirqs, including timers from obtaining phba->hbalock */ |
| 13394 | local_bh_disable(); |
| 13395 | |
| 13396 | spin_lock_irq(lock: &phba->hbalock); |
| 13397 | psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; |
| 13398 | |
| 13399 | if (psli->sli_flag & LPFC_SLI_ACTIVE) { |
| 13400 | /* Determine how long we might wait for the active mailbox |
| 13401 | * command to be gracefully completed by firmware. |
| 13402 | */ |
| 13403 | if (phba->sli.mbox_active) |
| 13404 | timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, |
| 13405 | phba->sli.mbox_active)) + jiffies; |
| 13406 | spin_unlock_irq(lock: &phba->hbalock); |
| 13407 | |
| 13408 | /* Enable softirqs again, done with phba->hbalock */ |
| 13409 | local_bh_enable(); |
| 13410 | |
| 13411 | while (phba->sli.mbox_active) { |
| 13412 | /* Check active mailbox complete status every 2ms */ |
| 13413 | msleep(msecs: 2); |
| 13414 | if (time_after(jiffies, timeout)) |
| 13415 | /* Timeout, let the mailbox flush routine to |
| 13416 | * forcefully release active mailbox command |
| 13417 | */ |
| 13418 | break; |
| 13419 | } |
| 13420 | } else { |
| 13421 | spin_unlock_irq(lock: &phba->hbalock); |
| 13422 | |
| 13423 | /* Enable softirqs again, done with phba->hbalock */ |
| 13424 | local_bh_enable(); |
| 13425 | } |
| 13426 | |
| 13427 | lpfc_sli_mbox_sys_flush(phba); |
| 13428 | } |
| 13429 | |
| 13430 | /** |
| 13431 | * lpfc_sli_eratt_read - read sli-3 error attention events |
| 13432 | * @phba: Pointer to HBA context. |
| 13433 | * |
| 13434 | * This function is called to read the SLI3 device error attention registers |
| 13435 | * for possible error attention events. The caller must hold the hostlock |
| 13436 | * with spin_lock_irq(). |
| 13437 | * |
| 13438 | * This function returns 1 when there is Error Attention in the Host Attention |
| 13439 | * Register and returns 0 otherwise. |
| 13440 | **/ |
| 13441 | static int |
| 13442 | lpfc_sli_eratt_read(struct lpfc_hba *phba) |
| 13443 | { |
| 13444 | uint32_t ha_copy; |
| 13445 | |
| 13446 | /* Read chip Host Attention (HA) register */ |
| 13447 | if (lpfc_readl(addr: phba->HAregaddr, data: &ha_copy)) |
| 13448 | goto unplug_err; |
| 13449 | |
| 13450 | if (ha_copy & HA_ERATT) { |
| 13451 | /* Read host status register to retrieve error event */ |
| 13452 | if (lpfc_sli_read_hs(phba)) |
| 13453 | goto unplug_err; |
| 13454 | |
| 13455 | /* Check if there is a deferred error condition is active */ |
| 13456 | if ((HS_FFER1 & phba->work_hs) && |
| 13457 | ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | |
| 13458 | HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { |
| 13459 | set_bit(nr: DEFER_ERATT, addr: &phba->hba_flag); |
| 13460 | /* Clear all interrupt enable conditions */ |
| 13461 | writel(val: 0, addr: phba->HCregaddr); |
| 13462 | readl(addr: phba->HCregaddr); |
| 13463 | } |
| 13464 | |
| 13465 | /* Set the driver HA work bitmap */ |
| 13466 | phba->work_ha |= HA_ERATT; |
| 13467 | /* Indicate polling handles this ERATT */ |
| 13468 | set_bit(nr: HBA_ERATT_HANDLED, addr: &phba->hba_flag); |
| 13469 | return 1; |
| 13470 | } |
| 13471 | return 0; |
| 13472 | |
| 13473 | unplug_err: |
| 13474 | /* Set the driver HS work bitmap */ |
| 13475 | phba->work_hs |= UNPLUG_ERR; |
| 13476 | /* Set the driver HA work bitmap */ |
| 13477 | phba->work_ha |= HA_ERATT; |
| 13478 | /* Indicate polling handles this ERATT */ |
| 13479 | set_bit(nr: HBA_ERATT_HANDLED, addr: &phba->hba_flag); |
| 13480 | return 1; |
| 13481 | } |
| 13482 | |
| 13483 | /** |
| 13484 | * lpfc_sli4_eratt_read - read sli-4 error attention events |
| 13485 | * @phba: Pointer to HBA context. |
| 13486 | * |
| 13487 | * This function is called to read the SLI4 device error attention registers |
| 13488 | * for possible error attention events. The caller must hold the hostlock |
| 13489 | * with spin_lock_irq(). |
| 13490 | * |
| 13491 | * This function returns 1 when there is Error Attention in the Host Attention |
| 13492 | * Register and returns 0 otherwise. |
| 13493 | **/ |
| 13494 | static int |
| 13495 | lpfc_sli4_eratt_read(struct lpfc_hba *phba) |
| 13496 | { |
| 13497 | uint32_t uerr_sta_hi, uerr_sta_lo; |
| 13498 | uint32_t if_type, portsmphr; |
| 13499 | struct lpfc_register portstat_reg; |
| 13500 | u32 logmask; |
| 13501 | |
| 13502 | /* |
| 13503 | * For now, use the SLI4 device internal unrecoverable error |
| 13504 | * registers for error attention. This can be changed later. |
| 13505 | */ |
| 13506 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
| 13507 | switch (if_type) { |
| 13508 | case LPFC_SLI_INTF_IF_TYPE_0: |
| 13509 | if (lpfc_readl(addr: phba->sli4_hba.u.if_type0.UERRLOregaddr, |
| 13510 | data: &uerr_sta_lo) || |
| 13511 | lpfc_readl(addr: phba->sli4_hba.u.if_type0.UERRHIregaddr, |
| 13512 | data: &uerr_sta_hi)) { |
| 13513 | phba->work_hs |= UNPLUG_ERR; |
| 13514 | phba->work_ha |= HA_ERATT; |
| 13515 | set_bit(nr: HBA_ERATT_HANDLED, addr: &phba->hba_flag); |
| 13516 | return 1; |
| 13517 | } |
| 13518 | if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || |
| 13519 | (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { |
| 13520 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 13521 | "1423 HBA Unrecoverable error: " |
| 13522 | "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " |
| 13523 | "ue_mask_lo_reg=0x%x, " |
| 13524 | "ue_mask_hi_reg=0x%x\n" , |
| 13525 | uerr_sta_lo, uerr_sta_hi, |
| 13526 | phba->sli4_hba.ue_mask_lo, |
| 13527 | phba->sli4_hba.ue_mask_hi); |
| 13528 | phba->work_status[0] = uerr_sta_lo; |
| 13529 | phba->work_status[1] = uerr_sta_hi; |
| 13530 | phba->work_ha |= HA_ERATT; |
| 13531 | set_bit(nr: HBA_ERATT_HANDLED, addr: &phba->hba_flag); |
| 13532 | return 1; |
| 13533 | } |
| 13534 | break; |
| 13535 | case LPFC_SLI_INTF_IF_TYPE_2: |
| 13536 | case LPFC_SLI_INTF_IF_TYPE_6: |
| 13537 | if (lpfc_readl(addr: phba->sli4_hba.u.if_type2.STATUSregaddr, |
| 13538 | data: &portstat_reg.word0) || |
| 13539 | lpfc_readl(addr: phba->sli4_hba.PSMPHRregaddr, |
| 13540 | data: &portsmphr)){ |
| 13541 | phba->work_hs |= UNPLUG_ERR; |
| 13542 | phba->work_ha |= HA_ERATT; |
| 13543 | set_bit(nr: HBA_ERATT_HANDLED, addr: &phba->hba_flag); |
| 13544 | return 1; |
| 13545 | } |
| 13546 | if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { |
| 13547 | phba->work_status[0] = |
| 13548 | readl(addr: phba->sli4_hba.u.if_type2.ERR1regaddr); |
| 13549 | phba->work_status[1] = |
| 13550 | readl(addr: phba->sli4_hba.u.if_type2.ERR2regaddr); |
| 13551 | logmask = LOG_TRACE_EVENT; |
| 13552 | if (phba->work_status[0] == |
| 13553 | SLIPORT_ERR1_REG_ERR_CODE_2 && |
| 13554 | phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART) |
| 13555 | logmask = LOG_SLI; |
| 13556 | lpfc_printf_log(phba, KERN_ERR, logmask, |
| 13557 | "2885 Port Status Event: " |
| 13558 | "port status reg 0x%x, " |
| 13559 | "port smphr reg 0x%x, " |
| 13560 | "error 1=0x%x, error 2=0x%x\n" , |
| 13561 | portstat_reg.word0, |
| 13562 | portsmphr, |
| 13563 | phba->work_status[0], |
| 13564 | phba->work_status[1]); |
| 13565 | phba->work_ha |= HA_ERATT; |
| 13566 | set_bit(nr: HBA_ERATT_HANDLED, addr: &phba->hba_flag); |
| 13567 | return 1; |
| 13568 | } |
| 13569 | break; |
| 13570 | case LPFC_SLI_INTF_IF_TYPE_1: |
| 13571 | default: |
| 13572 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 13573 | "2886 HBA Error Attention on unsupported " |
| 13574 | "if type %d." , if_type); |
| 13575 | return 1; |
| 13576 | } |
| 13577 | |
| 13578 | return 0; |
| 13579 | } |
| 13580 | |
| 13581 | /** |
| 13582 | * lpfc_sli_check_eratt - check error attention events |
| 13583 | * @phba: Pointer to HBA context. |
| 13584 | * |
| 13585 | * This function is called from timer soft interrupt context to check HBA's |
| 13586 | * error attention register bit for error attention events. |
| 13587 | * |
| 13588 | * This function returns 1 when there is Error Attention in the Host Attention |
| 13589 | * Register and returns 0 otherwise. |
| 13590 | **/ |
| 13591 | int |
| 13592 | lpfc_sli_check_eratt(struct lpfc_hba *phba) |
| 13593 | { |
| 13594 | uint32_t ha_copy; |
| 13595 | |
| 13596 | /* If somebody is waiting to handle an eratt, don't process it |
| 13597 | * here. The brdkill function will do this. |
| 13598 | */ |
| 13599 | if (phba->link_flag & LS_IGNORE_ERATT) |
| 13600 | return 0; |
| 13601 | |
| 13602 | /* Check if interrupt handler handles this ERATT */ |
| 13603 | if (test_bit(HBA_ERATT_HANDLED, &phba->hba_flag)) |
| 13604 | /* Interrupt handler has handled ERATT */ |
| 13605 | return 0; |
| 13606 | |
| 13607 | /* |
| 13608 | * If there is deferred error attention, do not check for error |
| 13609 | * attention |
| 13610 | */ |
| 13611 | if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) |
| 13612 | return 0; |
| 13613 | |
| 13614 | spin_lock_irq(lock: &phba->hbalock); |
| 13615 | /* If PCI channel is offline, don't process it */ |
| 13616 | if (unlikely(pci_channel_offline(phba->pcidev))) { |
| 13617 | spin_unlock_irq(lock: &phba->hbalock); |
| 13618 | return 0; |
| 13619 | } |
| 13620 | |
| 13621 | switch (phba->sli_rev) { |
| 13622 | case LPFC_SLI_REV2: |
| 13623 | case LPFC_SLI_REV3: |
| 13624 | /* Read chip Host Attention (HA) register */ |
| 13625 | ha_copy = lpfc_sli_eratt_read(phba); |
| 13626 | break; |
| 13627 | case LPFC_SLI_REV4: |
| 13628 | /* Read device Uncoverable Error (UERR) registers */ |
| 13629 | ha_copy = lpfc_sli4_eratt_read(phba); |
| 13630 | break; |
| 13631 | default: |
| 13632 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 13633 | "0299 Invalid SLI revision (%d)\n" , |
| 13634 | phba->sli_rev); |
| 13635 | ha_copy = 0; |
| 13636 | break; |
| 13637 | } |
| 13638 | spin_unlock_irq(lock: &phba->hbalock); |
| 13639 | |
| 13640 | return ha_copy; |
| 13641 | } |
| 13642 | |
| 13643 | /** |
| 13644 | * lpfc_intr_state_check - Check device state for interrupt handling |
| 13645 | * @phba: Pointer to HBA context. |
| 13646 | * |
| 13647 | * This inline routine checks whether a device or its PCI slot is in a state |
| 13648 | * that the interrupt should be handled. |
| 13649 | * |
| 13650 | * This function returns 0 if the device or the PCI slot is in a state that |
| 13651 | * interrupt should be handled, otherwise -EIO. |
| 13652 | */ |
| 13653 | static inline int |
| 13654 | lpfc_intr_state_check(struct lpfc_hba *phba) |
| 13655 | { |
| 13656 | /* If the pci channel is offline, ignore all the interrupts */ |
| 13657 | if (unlikely(pci_channel_offline(phba->pcidev))) |
| 13658 | return -EIO; |
| 13659 | |
| 13660 | /* Update device level interrupt statistics */ |
| 13661 | phba->sli.slistat.sli_intr++; |
| 13662 | |
| 13663 | /* Ignore all interrupts during initialization. */ |
| 13664 | if (unlikely(phba->link_state < LPFC_LINK_DOWN)) |
| 13665 | return -EIO; |
| 13666 | |
| 13667 | return 0; |
| 13668 | } |
| 13669 | |
| 13670 | /** |
| 13671 | * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device |
| 13672 | * @irq: Interrupt number. |
| 13673 | * @dev_id: The device context pointer. |
| 13674 | * |
| 13675 | * This function is directly called from the PCI layer as an interrupt |
| 13676 | * service routine when device with SLI-3 interface spec is enabled with |
| 13677 | * MSI-X multi-message interrupt mode and there are slow-path events in |
| 13678 | * the HBA. However, when the device is enabled with either MSI or Pin-IRQ |
| 13679 | * interrupt mode, this function is called as part of the device-level |
| 13680 | * interrupt handler. When the PCI slot is in error recovery or the HBA |
| 13681 | * is undergoing initialization, the interrupt handler will not process |
| 13682 | * the interrupt. The link attention and ELS ring attention events are |
| 13683 | * handled by the worker thread. The interrupt handler signals the worker |
| 13684 | * thread and returns for these events. This function is called without |
| 13685 | * any lock held. It gets the hbalock to access and update SLI data |
| 13686 | * structures. |
| 13687 | * |
| 13688 | * This function returns IRQ_HANDLED when interrupt is handled else it |
| 13689 | * returns IRQ_NONE. |
| 13690 | **/ |
| 13691 | irqreturn_t |
| 13692 | lpfc_sli_sp_intr_handler(int irq, void *dev_id) |
| 13693 | { |
| 13694 | struct lpfc_hba *phba; |
| 13695 | uint32_t ha_copy, hc_copy; |
| 13696 | uint32_t work_ha_copy; |
| 13697 | unsigned long status; |
| 13698 | unsigned long iflag; |
| 13699 | uint32_t control; |
| 13700 | |
| 13701 | MAILBOX_t *mbox, *pmbox; |
| 13702 | struct lpfc_vport *vport; |
| 13703 | struct lpfc_nodelist *ndlp; |
| 13704 | struct lpfc_dmabuf *mp; |
| 13705 | LPFC_MBOXQ_t *pmb; |
| 13706 | int rc; |
| 13707 | |
| 13708 | /* |
| 13709 | * Get the driver's phba structure from the dev_id and |
| 13710 | * assume the HBA is not interrupting. |
| 13711 | */ |
| 13712 | phba = (struct lpfc_hba *)dev_id; |
| 13713 | |
| 13714 | if (unlikely(!phba)) |
| 13715 | return IRQ_NONE; |
| 13716 | |
| 13717 | /* |
| 13718 | * Stuff needs to be attented to when this function is invoked as an |
| 13719 | * individual interrupt handler in MSI-X multi-message interrupt mode |
| 13720 | */ |
| 13721 | if (phba->intr_type == MSIX) { |
| 13722 | /* Check device state for handling interrupt */ |
| 13723 | if (lpfc_intr_state_check(phba)) |
| 13724 | return IRQ_NONE; |
| 13725 | /* Need to read HA REG for slow-path events */ |
| 13726 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 13727 | if (lpfc_readl(addr: phba->HAregaddr, data: &ha_copy)) |
| 13728 | goto unplug_error; |
| 13729 | /* If somebody is waiting to handle an eratt don't process it |
| 13730 | * here. The brdkill function will do this. |
| 13731 | */ |
| 13732 | if (phba->link_flag & LS_IGNORE_ERATT) |
| 13733 | ha_copy &= ~HA_ERATT; |
| 13734 | /* Check the need for handling ERATT in interrupt handler */ |
| 13735 | if (ha_copy & HA_ERATT) { |
| 13736 | if (test_and_set_bit(nr: HBA_ERATT_HANDLED, |
| 13737 | addr: &phba->hba_flag)) |
| 13738 | /* ERATT polling has handled ERATT */ |
| 13739 | ha_copy &= ~HA_ERATT; |
| 13740 | } |
| 13741 | |
| 13742 | /* |
| 13743 | * If there is deferred error attention, do not check for any |
| 13744 | * interrupt. |
| 13745 | */ |
| 13746 | if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { |
| 13747 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 13748 | return IRQ_NONE; |
| 13749 | } |
| 13750 | |
| 13751 | /* Clear up only attention source related to slow-path */ |
| 13752 | if (lpfc_readl(addr: phba->HCregaddr, data: &hc_copy)) |
| 13753 | goto unplug_error; |
| 13754 | |
| 13755 | writel(val: hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | |
| 13756 | HC_LAINT_ENA | HC_ERINT_ENA), |
| 13757 | addr: phba->HCregaddr); |
| 13758 | writel(val: (ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), |
| 13759 | addr: phba->HAregaddr); |
| 13760 | writel(val: hc_copy, addr: phba->HCregaddr); |
| 13761 | readl(addr: phba->HAregaddr); /* flush */ |
| 13762 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 13763 | } else |
| 13764 | ha_copy = phba->ha_copy; |
| 13765 | |
| 13766 | work_ha_copy = ha_copy & phba->work_ha_mask; |
| 13767 | |
| 13768 | if (work_ha_copy) { |
| 13769 | if (work_ha_copy & HA_LATT) { |
| 13770 | if (phba->sli.sli_flag & LPFC_PROCESS_LA) { |
| 13771 | /* |
| 13772 | * Turn off Link Attention interrupts |
| 13773 | * until CLEAR_LA done |
| 13774 | */ |
| 13775 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 13776 | phba->sli.sli_flag &= ~LPFC_PROCESS_LA; |
| 13777 | if (lpfc_readl(addr: phba->HCregaddr, data: &control)) |
| 13778 | goto unplug_error; |
| 13779 | control &= ~HC_LAINT_ENA; |
| 13780 | writel(val: control, addr: phba->HCregaddr); |
| 13781 | readl(addr: phba->HCregaddr); /* flush */ |
| 13782 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 13783 | } |
| 13784 | else |
| 13785 | work_ha_copy &= ~HA_LATT; |
| 13786 | } |
| 13787 | |
| 13788 | if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { |
| 13789 | /* |
| 13790 | * Turn off Slow Rings interrupts, LPFC_ELS_RING is |
| 13791 | * the only slow ring. |
| 13792 | */ |
| 13793 | status = (work_ha_copy & |
| 13794 | (HA_RXMASK << (4*LPFC_ELS_RING))); |
| 13795 | status >>= (4*LPFC_ELS_RING); |
| 13796 | if (status & HA_RXMASK) { |
| 13797 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 13798 | if (lpfc_readl(addr: phba->HCregaddr, data: &control)) |
| 13799 | goto unplug_error; |
| 13800 | |
| 13801 | lpfc_debugfs_slow_ring_trc(phba, |
| 13802 | "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x" , |
| 13803 | control, status, |
| 13804 | (uint32_t)phba->sli.slistat.sli_intr); |
| 13805 | |
| 13806 | if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { |
| 13807 | lpfc_debugfs_slow_ring_trc(phba, |
| 13808 | "ISR Disable ring:" |
| 13809 | "pwork:x%x hawork:x%x wait:x%x" , |
| 13810 | phba->work_ha, work_ha_copy, |
| 13811 | (uint32_t)((unsigned long) |
| 13812 | &phba->work_waitq)); |
| 13813 | |
| 13814 | control &= |
| 13815 | ~(HC_R0INT_ENA << LPFC_ELS_RING); |
| 13816 | writel(val: control, addr: phba->HCregaddr); |
| 13817 | readl(addr: phba->HCregaddr); /* flush */ |
| 13818 | } |
| 13819 | else { |
| 13820 | lpfc_debugfs_slow_ring_trc(phba, |
| 13821 | "ISR slow ring: pwork:" |
| 13822 | "x%x hawork:x%x wait:x%x" , |
| 13823 | phba->work_ha, work_ha_copy, |
| 13824 | (uint32_t)((unsigned long) |
| 13825 | &phba->work_waitq)); |
| 13826 | } |
| 13827 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 13828 | } |
| 13829 | } |
| 13830 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 13831 | if (work_ha_copy & HA_ERATT) { |
| 13832 | if (lpfc_sli_read_hs(phba)) |
| 13833 | goto unplug_error; |
| 13834 | /* |
| 13835 | * Check if there is a deferred error condition |
| 13836 | * is active |
| 13837 | */ |
| 13838 | if ((HS_FFER1 & phba->work_hs) && |
| 13839 | ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | |
| 13840 | HS_FFER6 | HS_FFER7 | HS_FFER8) & |
| 13841 | phba->work_hs)) { |
| 13842 | set_bit(nr: DEFER_ERATT, addr: &phba->hba_flag); |
| 13843 | /* Clear all interrupt enable conditions */ |
| 13844 | writel(val: 0, addr: phba->HCregaddr); |
| 13845 | readl(addr: phba->HCregaddr); |
| 13846 | } |
| 13847 | } |
| 13848 | |
| 13849 | if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { |
| 13850 | pmb = phba->sli.mbox_active; |
| 13851 | pmbox = &pmb->u.mb; |
| 13852 | mbox = phba->mbox; |
| 13853 | vport = pmb->vport; |
| 13854 | |
| 13855 | /* First check out the status word */ |
| 13856 | lpfc_sli_pcimem_bcopy(srcp: mbox, destp: pmbox, cnt: sizeof(uint32_t)); |
| 13857 | if (pmbox->mbxOwner != OWN_HOST) { |
| 13858 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 13859 | /* |
| 13860 | * Stray Mailbox Interrupt, mbxCommand <cmd> |
| 13861 | * mbxStatus <status> |
| 13862 | */ |
| 13863 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 13864 | "(%d):0304 Stray Mailbox " |
| 13865 | "Interrupt mbxCommand x%x " |
| 13866 | "mbxStatus x%x\n" , |
| 13867 | (vport ? vport->vpi : 0), |
| 13868 | pmbox->mbxCommand, |
| 13869 | pmbox->mbxStatus); |
| 13870 | /* clear mailbox attention bit */ |
| 13871 | work_ha_copy &= ~HA_MBATT; |
| 13872 | } else { |
| 13873 | phba->sli.mbox_active = NULL; |
| 13874 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 13875 | phba->last_completion_time = jiffies; |
| 13876 | timer_delete(timer: &phba->sli.mbox_tmo); |
| 13877 | if (pmb->mbox_cmpl) { |
| 13878 | lpfc_sli_pcimem_bcopy(srcp: mbox, destp: pmbox, |
| 13879 | MAILBOX_CMD_SIZE); |
| 13880 | if (pmb->out_ext_byte_len && |
| 13881 | pmb->ext_buf) |
| 13882 | lpfc_sli_pcimem_bcopy( |
| 13883 | srcp: phba->mbox_ext, |
| 13884 | destp: pmb->ext_buf, |
| 13885 | cnt: pmb->out_ext_byte_len); |
| 13886 | } |
| 13887 | if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { |
| 13888 | pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; |
| 13889 | |
| 13890 | lpfc_debugfs_disc_trc(vport, |
| 13891 | LPFC_DISC_TRC_MBOX_VPORT, |
| 13892 | "MBOX dflt rpi: : " |
| 13893 | "status:x%x rpi:x%x" , |
| 13894 | (uint32_t)pmbox->mbxStatus, |
| 13895 | pmbox->un.varWords[0], 0); |
| 13896 | |
| 13897 | if (!pmbox->mbxStatus) { |
| 13898 | mp = pmb->ctx_buf; |
| 13899 | ndlp = pmb->ctx_ndlp; |
| 13900 | |
| 13901 | /* Reg_LOGIN of dflt RPI was |
| 13902 | * successful. new lets get |
| 13903 | * rid of the RPI using the |
| 13904 | * same mbox buffer. |
| 13905 | */ |
| 13906 | lpfc_unreg_login(phba, |
| 13907 | vport->vpi, |
| 13908 | pmbox->un.varWords[0], |
| 13909 | pmb); |
| 13910 | pmb->mbox_cmpl = |
| 13911 | lpfc_mbx_cmpl_dflt_rpi; |
| 13912 | pmb->ctx_buf = mp; |
| 13913 | pmb->ctx_ndlp = ndlp; |
| 13914 | pmb->vport = vport; |
| 13915 | rc = lpfc_sli_issue_mbox(phba, |
| 13916 | pmbox: pmb, |
| 13917 | MBX_NOWAIT); |
| 13918 | if (rc != MBX_BUSY) |
| 13919 | lpfc_printf_log(phba, |
| 13920 | KERN_ERR, |
| 13921 | LOG_TRACE_EVENT, |
| 13922 | "0350 rc should have" |
| 13923 | "been MBX_BUSY\n" ); |
| 13924 | if (rc != MBX_NOT_FINISHED) |
| 13925 | goto send_current_mbox; |
| 13926 | } |
| 13927 | } |
| 13928 | spin_lock_irqsave( |
| 13929 | &phba->pport->work_port_lock, |
| 13930 | iflag); |
| 13931 | phba->pport->work_port_events &= |
| 13932 | ~WORKER_MBOX_TMO; |
| 13933 | spin_unlock_irqrestore( |
| 13934 | lock: &phba->pport->work_port_lock, |
| 13935 | flags: iflag); |
| 13936 | |
| 13937 | /* Do NOT queue MBX_HEARTBEAT to the worker |
| 13938 | * thread for processing. |
| 13939 | */ |
| 13940 | if (pmbox->mbxCommand == MBX_HEARTBEAT) { |
| 13941 | /* Process mbox now */ |
| 13942 | phba->sli.mbox_active = NULL; |
| 13943 | phba->sli.sli_flag &= |
| 13944 | ~LPFC_SLI_MBOX_ACTIVE; |
| 13945 | if (pmb->mbox_cmpl) |
| 13946 | pmb->mbox_cmpl(phba, pmb); |
| 13947 | } else { |
| 13948 | /* Queue to worker thread to process */ |
| 13949 | lpfc_mbox_cmpl_put(phba, pmb); |
| 13950 | } |
| 13951 | } |
| 13952 | } else |
| 13953 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 13954 | |
| 13955 | if ((work_ha_copy & HA_MBATT) && |
| 13956 | (phba->sli.mbox_active == NULL)) { |
| 13957 | send_current_mbox: |
| 13958 | /* Process next mailbox command if there is one */ |
| 13959 | do { |
| 13960 | rc = lpfc_sli_issue_mbox(phba, NULL, |
| 13961 | MBX_NOWAIT); |
| 13962 | } while (rc == MBX_NOT_FINISHED); |
| 13963 | if (rc != MBX_SUCCESS) |
| 13964 | lpfc_printf_log(phba, KERN_ERR, |
| 13965 | LOG_TRACE_EVENT, |
| 13966 | "0349 rc should be " |
| 13967 | "MBX_SUCCESS\n" ); |
| 13968 | } |
| 13969 | |
| 13970 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 13971 | phba->work_ha |= work_ha_copy; |
| 13972 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 13973 | lpfc_worker_wake_up(phba); |
| 13974 | } |
| 13975 | return IRQ_HANDLED; |
| 13976 | unplug_error: |
| 13977 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 13978 | return IRQ_HANDLED; |
| 13979 | |
| 13980 | } /* lpfc_sli_sp_intr_handler */ |
| 13981 | |
| 13982 | /** |
| 13983 | * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. |
| 13984 | * @irq: Interrupt number. |
| 13985 | * @dev_id: The device context pointer. |
| 13986 | * |
| 13987 | * This function is directly called from the PCI layer as an interrupt |
| 13988 | * service routine when device with SLI-3 interface spec is enabled with |
| 13989 | * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB |
| 13990 | * ring event in the HBA. However, when the device is enabled with either |
| 13991 | * MSI or Pin-IRQ interrupt mode, this function is called as part of the |
| 13992 | * device-level interrupt handler. When the PCI slot is in error recovery |
| 13993 | * or the HBA is undergoing initialization, the interrupt handler will not |
| 13994 | * process the interrupt. The SCSI FCP fast-path ring event are handled in |
| 13995 | * the intrrupt context. This function is called without any lock held. |
| 13996 | * It gets the hbalock to access and update SLI data structures. |
| 13997 | * |
| 13998 | * This function returns IRQ_HANDLED when interrupt is handled else it |
| 13999 | * returns IRQ_NONE. |
| 14000 | **/ |
| 14001 | irqreturn_t |
| 14002 | lpfc_sli_fp_intr_handler(int irq, void *dev_id) |
| 14003 | { |
| 14004 | struct lpfc_hba *phba; |
| 14005 | uint32_t ha_copy; |
| 14006 | unsigned long status; |
| 14007 | unsigned long iflag; |
| 14008 | struct lpfc_sli_ring *pring; |
| 14009 | |
| 14010 | /* Get the driver's phba structure from the dev_id and |
| 14011 | * assume the HBA is not interrupting. |
| 14012 | */ |
| 14013 | phba = (struct lpfc_hba *) dev_id; |
| 14014 | |
| 14015 | if (unlikely(!phba)) |
| 14016 | return IRQ_NONE; |
| 14017 | |
| 14018 | /* |
| 14019 | * Stuff needs to be attented to when this function is invoked as an |
| 14020 | * individual interrupt handler in MSI-X multi-message interrupt mode |
| 14021 | */ |
| 14022 | if (phba->intr_type == MSIX) { |
| 14023 | /* Check device state for handling interrupt */ |
| 14024 | if (lpfc_intr_state_check(phba)) |
| 14025 | return IRQ_NONE; |
| 14026 | /* Need to read HA REG for FCP ring and other ring events */ |
| 14027 | if (lpfc_readl(addr: phba->HAregaddr, data: &ha_copy)) |
| 14028 | return IRQ_HANDLED; |
| 14029 | |
| 14030 | /* |
| 14031 | * If there is deferred error attention, do not check for |
| 14032 | * any interrupt. |
| 14033 | */ |
| 14034 | if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) |
| 14035 | return IRQ_NONE; |
| 14036 | |
| 14037 | /* Clear up only attention source related to fast-path */ |
| 14038 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 14039 | writel(val: (ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), |
| 14040 | addr: phba->HAregaddr); |
| 14041 | readl(addr: phba->HAregaddr); /* flush */ |
| 14042 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 14043 | } else |
| 14044 | ha_copy = phba->ha_copy; |
| 14045 | |
| 14046 | /* |
| 14047 | * Process all events on FCP ring. Take the optimized path for FCP IO. |
| 14048 | */ |
| 14049 | ha_copy &= ~(phba->work_ha_mask); |
| 14050 | |
| 14051 | status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); |
| 14052 | status >>= (4*LPFC_FCP_RING); |
| 14053 | pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; |
| 14054 | if (status & HA_RXMASK) |
| 14055 | lpfc_sli_handle_fast_ring_event(phba, pring, mask: status); |
| 14056 | |
| 14057 | if (phba->cfg_multi_ring_support == 2) { |
| 14058 | /* |
| 14059 | * Process all events on extra ring. Take the optimized path |
| 14060 | * for extra ring IO. |
| 14061 | */ |
| 14062 | status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); |
| 14063 | status >>= (4*LPFC_EXTRA_RING); |
| 14064 | if (status & HA_RXMASK) { |
| 14065 | lpfc_sli_handle_fast_ring_event(phba, |
| 14066 | pring: &phba->sli.sli3_ring[LPFC_EXTRA_RING], |
| 14067 | mask: status); |
| 14068 | } |
| 14069 | } |
| 14070 | return IRQ_HANDLED; |
| 14071 | } /* lpfc_sli_fp_intr_handler */ |
| 14072 | |
| 14073 | /** |
| 14074 | * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device |
| 14075 | * @irq: Interrupt number. |
| 14076 | * @dev_id: The device context pointer. |
| 14077 | * |
| 14078 | * This function is the HBA device-level interrupt handler to device with |
| 14079 | * SLI-3 interface spec, called from the PCI layer when either MSI or |
| 14080 | * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which |
| 14081 | * requires driver attention. This function invokes the slow-path interrupt |
| 14082 | * attention handling function and fast-path interrupt attention handling |
| 14083 | * function in turn to process the relevant HBA attention events. This |
| 14084 | * function is called without any lock held. It gets the hbalock to access |
| 14085 | * and update SLI data structures. |
| 14086 | * |
| 14087 | * This function returns IRQ_HANDLED when interrupt is handled, else it |
| 14088 | * returns IRQ_NONE. |
| 14089 | **/ |
| 14090 | irqreturn_t |
| 14091 | lpfc_sli_intr_handler(int irq, void *dev_id) |
| 14092 | { |
| 14093 | struct lpfc_hba *phba; |
| 14094 | irqreturn_t sp_irq_rc, fp_irq_rc; |
| 14095 | unsigned long status1, status2; |
| 14096 | uint32_t hc_copy; |
| 14097 | |
| 14098 | /* |
| 14099 | * Get the driver's phba structure from the dev_id and |
| 14100 | * assume the HBA is not interrupting. |
| 14101 | */ |
| 14102 | phba = (struct lpfc_hba *) dev_id; |
| 14103 | |
| 14104 | if (unlikely(!phba)) |
| 14105 | return IRQ_NONE; |
| 14106 | |
| 14107 | /* Check device state for handling interrupt */ |
| 14108 | if (lpfc_intr_state_check(phba)) |
| 14109 | return IRQ_NONE; |
| 14110 | |
| 14111 | spin_lock(lock: &phba->hbalock); |
| 14112 | if (lpfc_readl(addr: phba->HAregaddr, data: &phba->ha_copy)) { |
| 14113 | spin_unlock(lock: &phba->hbalock); |
| 14114 | return IRQ_HANDLED; |
| 14115 | } |
| 14116 | |
| 14117 | if (unlikely(!phba->ha_copy)) { |
| 14118 | spin_unlock(lock: &phba->hbalock); |
| 14119 | return IRQ_NONE; |
| 14120 | } else if (phba->ha_copy & HA_ERATT) { |
| 14121 | if (test_and_set_bit(nr: HBA_ERATT_HANDLED, addr: &phba->hba_flag)) |
| 14122 | /* ERATT polling has handled ERATT */ |
| 14123 | phba->ha_copy &= ~HA_ERATT; |
| 14124 | } |
| 14125 | |
| 14126 | /* |
| 14127 | * If there is deferred error attention, do not check for any interrupt. |
| 14128 | */ |
| 14129 | if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { |
| 14130 | spin_unlock(lock: &phba->hbalock); |
| 14131 | return IRQ_NONE; |
| 14132 | } |
| 14133 | |
| 14134 | /* Clear attention sources except link and error attentions */ |
| 14135 | if (lpfc_readl(addr: phba->HCregaddr, data: &hc_copy)) { |
| 14136 | spin_unlock(lock: &phba->hbalock); |
| 14137 | return IRQ_HANDLED; |
| 14138 | } |
| 14139 | writel(val: hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA |
| 14140 | | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), |
| 14141 | addr: phba->HCregaddr); |
| 14142 | writel(val: (phba->ha_copy & ~(HA_LATT | HA_ERATT)), addr: phba->HAregaddr); |
| 14143 | writel(val: hc_copy, addr: phba->HCregaddr); |
| 14144 | readl(addr: phba->HAregaddr); /* flush */ |
| 14145 | spin_unlock(lock: &phba->hbalock); |
| 14146 | |
| 14147 | /* |
| 14148 | * Invokes slow-path host attention interrupt handling as appropriate. |
| 14149 | */ |
| 14150 | |
| 14151 | /* status of events with mailbox and link attention */ |
| 14152 | status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); |
| 14153 | |
| 14154 | /* status of events with ELS ring */ |
| 14155 | status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); |
| 14156 | status2 >>= (4*LPFC_ELS_RING); |
| 14157 | |
| 14158 | if (status1 || (status2 & HA_RXMASK)) |
| 14159 | sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); |
| 14160 | else |
| 14161 | sp_irq_rc = IRQ_NONE; |
| 14162 | |
| 14163 | /* |
| 14164 | * Invoke fast-path host attention interrupt handling as appropriate. |
| 14165 | */ |
| 14166 | |
| 14167 | /* status of events with FCP ring */ |
| 14168 | status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); |
| 14169 | status1 >>= (4*LPFC_FCP_RING); |
| 14170 | |
| 14171 | /* status of events with extra ring */ |
| 14172 | if (phba->cfg_multi_ring_support == 2) { |
| 14173 | status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); |
| 14174 | status2 >>= (4*LPFC_EXTRA_RING); |
| 14175 | } else |
| 14176 | status2 = 0; |
| 14177 | |
| 14178 | if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) |
| 14179 | fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); |
| 14180 | else |
| 14181 | fp_irq_rc = IRQ_NONE; |
| 14182 | |
| 14183 | /* Return device-level interrupt handling status */ |
| 14184 | return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; |
| 14185 | } /* lpfc_sli_intr_handler */ |
| 14186 | |
| 14187 | /** |
| 14188 | * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event |
| 14189 | * @phba: pointer to lpfc hba data structure. |
| 14190 | * |
| 14191 | * This routine is invoked by the worker thread to process all the pending |
| 14192 | * SLI4 els abort xri events. |
| 14193 | **/ |
| 14194 | void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) |
| 14195 | { |
| 14196 | struct lpfc_cq_event *cq_event; |
| 14197 | unsigned long iflags; |
| 14198 | |
| 14199 | /* First, declare the els xri abort event has been handled */ |
| 14200 | clear_bit(nr: ELS_XRI_ABORT_EVENT, addr: &phba->hba_flag); |
| 14201 | |
| 14202 | /* Now, handle all the els xri abort events */ |
| 14203 | spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); |
| 14204 | while (!list_empty(head: &phba->sli4_hba.sp_els_xri_aborted_work_queue)) { |
| 14205 | /* Get the first event from the head of the event queue */ |
| 14206 | list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, |
| 14207 | cq_event, struct lpfc_cq_event, list); |
| 14208 | spin_unlock_irqrestore(lock: &phba->sli4_hba.els_xri_abrt_list_lock, |
| 14209 | flags: iflags); |
| 14210 | /* Notify aborted XRI for ELS work queue */ |
| 14211 | lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); |
| 14212 | |
| 14213 | /* Free the event processed back to the free pool */ |
| 14214 | lpfc_sli4_cq_event_release(phba, cq_event); |
| 14215 | spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, |
| 14216 | iflags); |
| 14217 | } |
| 14218 | spin_unlock_irqrestore(lock: &phba->sli4_hba.els_xri_abrt_list_lock, flags: iflags); |
| 14219 | } |
| 14220 | |
| 14221 | /** |
| 14222 | * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe |
| 14223 | * @phba: Pointer to HBA context object. |
| 14224 | * @irspiocbq: Pointer to work-queue completion queue entry. |
| 14225 | * |
| 14226 | * This routine handles an ELS work-queue completion event and construct |
| 14227 | * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common |
| 14228 | * discovery engine to handle. |
| 14229 | * |
| 14230 | * Return: Pointer to the receive IOCBQ, NULL otherwise. |
| 14231 | **/ |
| 14232 | static struct lpfc_iocbq * |
| 14233 | lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, |
| 14234 | struct lpfc_iocbq *irspiocbq) |
| 14235 | { |
| 14236 | struct lpfc_sli_ring *pring; |
| 14237 | struct lpfc_iocbq *cmdiocbq; |
| 14238 | struct lpfc_wcqe_complete *wcqe; |
| 14239 | unsigned long iflags; |
| 14240 | |
| 14241 | pring = lpfc_phba_elsring(phba); |
| 14242 | if (unlikely(!pring)) |
| 14243 | return NULL; |
| 14244 | |
| 14245 | wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; |
| 14246 | spin_lock_irqsave(&pring->ring_lock, iflags); |
| 14247 | pring->stats.iocb_event++; |
| 14248 | /* Look up the ELS command IOCB and create pseudo response IOCB */ |
| 14249 | cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, |
| 14250 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); |
| 14251 | if (unlikely(!cmdiocbq)) { |
| 14252 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 14253 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 14254 | "0386 ELS complete with no corresponding " |
| 14255 | "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n" , |
| 14256 | wcqe->word0, wcqe->total_data_placed, |
| 14257 | wcqe->parameter, wcqe->word3); |
| 14258 | lpfc_sli_release_iocbq(phba, iocbq: irspiocbq); |
| 14259 | return NULL; |
| 14260 | } |
| 14261 | |
| 14262 | memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128)); |
| 14263 | memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe)); |
| 14264 | |
| 14265 | /* Put the iocb back on the txcmplq */ |
| 14266 | lpfc_sli_ringtxcmpl_put(phba, pring, piocb: cmdiocbq); |
| 14267 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 14268 | |
| 14269 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) { |
| 14270 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 14271 | irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; |
| 14272 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 14273 | } |
| 14274 | |
| 14275 | return irspiocbq; |
| 14276 | } |
| 14277 | |
| 14278 | inline struct lpfc_cq_event * |
| 14279 | lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) |
| 14280 | { |
| 14281 | struct lpfc_cq_event *cq_event; |
| 14282 | |
| 14283 | /* Allocate a new internal CQ_EVENT entry */ |
| 14284 | cq_event = lpfc_sli4_cq_event_alloc(phba); |
| 14285 | if (!cq_event) { |
| 14286 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 14287 | "0602 Failed to alloc CQ_EVENT entry\n" ); |
| 14288 | return NULL; |
| 14289 | } |
| 14290 | |
| 14291 | /* Move the CQE into the event */ |
| 14292 | memcpy(&cq_event->cqe, entry, size); |
| 14293 | return cq_event; |
| 14294 | } |
| 14295 | |
| 14296 | /** |
| 14297 | * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event |
| 14298 | * @phba: Pointer to HBA context object. |
| 14299 | * @mcqe: Pointer to mailbox completion queue entry. |
| 14300 | * |
| 14301 | * This routine process a mailbox completion queue entry with asynchronous |
| 14302 | * event. |
| 14303 | * |
| 14304 | * Return: true if work posted to worker thread, otherwise false. |
| 14305 | **/ |
| 14306 | static bool |
| 14307 | lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) |
| 14308 | { |
| 14309 | struct lpfc_cq_event *cq_event; |
| 14310 | unsigned long iflags; |
| 14311 | |
| 14312 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 14313 | "0392 Async Event: word0:x%x, word1:x%x, " |
| 14314 | "word2:x%x, word3:x%x\n" , mcqe->word0, |
| 14315 | mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); |
| 14316 | |
| 14317 | cq_event = lpfc_cq_event_setup(phba, entry: mcqe, size: sizeof(struct lpfc_mcqe)); |
| 14318 | if (!cq_event) |
| 14319 | return false; |
| 14320 | |
| 14321 | spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); |
| 14322 | list_add_tail(new: &cq_event->list, head: &phba->sli4_hba.sp_asynce_work_queue); |
| 14323 | spin_unlock_irqrestore(lock: &phba->sli4_hba.asynce_list_lock, flags: iflags); |
| 14324 | |
| 14325 | /* Set the async event flag */ |
| 14326 | set_bit(nr: ASYNC_EVENT, addr: &phba->hba_flag); |
| 14327 | |
| 14328 | return true; |
| 14329 | } |
| 14330 | |
| 14331 | /** |
| 14332 | * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event |
| 14333 | * @phba: Pointer to HBA context object. |
| 14334 | * @mcqe: Pointer to mailbox completion queue entry. |
| 14335 | * |
| 14336 | * This routine process a mailbox completion queue entry with mailbox |
| 14337 | * completion event. |
| 14338 | * |
| 14339 | * Return: true if work posted to worker thread, otherwise false. |
| 14340 | **/ |
| 14341 | static bool |
| 14342 | lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) |
| 14343 | { |
| 14344 | uint32_t mcqe_status; |
| 14345 | MAILBOX_t *mbox, *pmbox; |
| 14346 | struct lpfc_mqe *mqe; |
| 14347 | struct lpfc_vport *vport; |
| 14348 | struct lpfc_nodelist *ndlp; |
| 14349 | struct lpfc_dmabuf *mp; |
| 14350 | unsigned long iflags; |
| 14351 | LPFC_MBOXQ_t *pmb; |
| 14352 | bool workposted = false; |
| 14353 | int rc; |
| 14354 | |
| 14355 | /* If not a mailbox complete MCQE, out by checking mailbox consume */ |
| 14356 | if (!bf_get(lpfc_trailer_completed, mcqe)) |
| 14357 | goto out_no_mqe_complete; |
| 14358 | |
| 14359 | /* Get the reference to the active mbox command */ |
| 14360 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 14361 | pmb = phba->sli.mbox_active; |
| 14362 | if (unlikely(!pmb)) { |
| 14363 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 14364 | "1832 No pending MBOX command to handle\n" ); |
| 14365 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 14366 | goto out_no_mqe_complete; |
| 14367 | } |
| 14368 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 14369 | mqe = &pmb->u.mqe; |
| 14370 | pmbox = (MAILBOX_t *)&pmb->u.mqe; |
| 14371 | mbox = phba->mbox; |
| 14372 | vport = pmb->vport; |
| 14373 | |
| 14374 | /* Reset heartbeat timer */ |
| 14375 | phba->last_completion_time = jiffies; |
| 14376 | timer_delete(timer: &phba->sli.mbox_tmo); |
| 14377 | |
| 14378 | /* Move mbox data to caller's mailbox region, do endian swapping */ |
| 14379 | if (pmb->mbox_cmpl && mbox) |
| 14380 | lpfc_sli4_pcimem_bcopy(srcp: mbox, destp: mqe, cnt: sizeof(struct lpfc_mqe)); |
| 14381 | |
| 14382 | /* |
| 14383 | * For mcqe errors, conditionally move a modified error code to |
| 14384 | * the mbox so that the error will not be missed. |
| 14385 | */ |
| 14386 | mcqe_status = bf_get(lpfc_mcqe_status, mcqe); |
| 14387 | if (mcqe_status != MB_CQE_STATUS_SUCCESS) { |
| 14388 | if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) |
| 14389 | bf_set(lpfc_mqe_status, mqe, |
| 14390 | (LPFC_MBX_ERROR_RANGE | mcqe_status)); |
| 14391 | } |
| 14392 | if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { |
| 14393 | pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; |
| 14394 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, |
| 14395 | "MBOX dflt rpi: status:x%x rpi:x%x" , |
| 14396 | mcqe_status, |
| 14397 | pmbox->un.varWords[0], 0); |
| 14398 | if (mcqe_status == MB_CQE_STATUS_SUCCESS) { |
| 14399 | mp = pmb->ctx_buf; |
| 14400 | ndlp = pmb->ctx_ndlp; |
| 14401 | |
| 14402 | /* Reg_LOGIN of dflt RPI was successful. Mark the |
| 14403 | * node as having an UNREG_LOGIN in progress to stop |
| 14404 | * an unsolicited PLOGI from the same NPortId from |
| 14405 | * starting another mailbox transaction. |
| 14406 | */ |
| 14407 | set_bit(nr: NLP_UNREG_INP, addr: &ndlp->nlp_flag); |
| 14408 | lpfc_unreg_login(phba, vport->vpi, |
| 14409 | pmbox->un.varWords[0], pmb); |
| 14410 | pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; |
| 14411 | pmb->ctx_buf = mp; |
| 14412 | |
| 14413 | /* No reference taken here. This is a default |
| 14414 | * RPI reg/immediate unreg cycle. The reference was |
| 14415 | * taken in the reg rpi path and is released when |
| 14416 | * this mailbox completes. |
| 14417 | */ |
| 14418 | pmb->ctx_ndlp = ndlp; |
| 14419 | pmb->vport = vport; |
| 14420 | rc = lpfc_sli_issue_mbox(phba, pmbox: pmb, MBX_NOWAIT); |
| 14421 | if (rc != MBX_BUSY) |
| 14422 | lpfc_printf_log(phba, KERN_ERR, |
| 14423 | LOG_TRACE_EVENT, |
| 14424 | "0385 rc should " |
| 14425 | "have been MBX_BUSY\n" ); |
| 14426 | if (rc != MBX_NOT_FINISHED) |
| 14427 | goto send_current_mbox; |
| 14428 | } |
| 14429 | } |
| 14430 | spin_lock_irqsave(&phba->pport->work_port_lock, iflags); |
| 14431 | phba->pport->work_port_events &= ~WORKER_MBOX_TMO; |
| 14432 | spin_unlock_irqrestore(lock: &phba->pport->work_port_lock, flags: iflags); |
| 14433 | |
| 14434 | /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */ |
| 14435 | if (pmbox->mbxCommand == MBX_HEARTBEAT) { |
| 14436 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 14437 | /* Release the mailbox command posting token */ |
| 14438 | phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
| 14439 | phba->sli.mbox_active = NULL; |
| 14440 | if (bf_get(lpfc_trailer_consumed, mcqe)) |
| 14441 | lpfc_sli4_mq_release(q: phba->sli4_hba.mbx_wq); |
| 14442 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 14443 | |
| 14444 | /* Post the next mbox command, if there is one */ |
| 14445 | lpfc_sli4_post_async_mbox(phba); |
| 14446 | |
| 14447 | /* Process cmpl now */ |
| 14448 | if (pmb->mbox_cmpl) |
| 14449 | pmb->mbox_cmpl(phba, pmb); |
| 14450 | return false; |
| 14451 | } |
| 14452 | |
| 14453 | /* There is mailbox completion work to queue to the worker thread */ |
| 14454 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 14455 | __lpfc_mbox_cmpl_put(phba, pmb); |
| 14456 | phba->work_ha |= HA_MBATT; |
| 14457 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 14458 | workposted = true; |
| 14459 | |
| 14460 | send_current_mbox: |
| 14461 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 14462 | /* Release the mailbox command posting token */ |
| 14463 | phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
| 14464 | /* Setting active mailbox pointer need to be in sync to flag clear */ |
| 14465 | phba->sli.mbox_active = NULL; |
| 14466 | if (bf_get(lpfc_trailer_consumed, mcqe)) |
| 14467 | lpfc_sli4_mq_release(q: phba->sli4_hba.mbx_wq); |
| 14468 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 14469 | /* Wake up worker thread to post the next pending mailbox command */ |
| 14470 | lpfc_worker_wake_up(phba); |
| 14471 | return workposted; |
| 14472 | |
| 14473 | out_no_mqe_complete: |
| 14474 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 14475 | if (bf_get(lpfc_trailer_consumed, mcqe)) |
| 14476 | lpfc_sli4_mq_release(q: phba->sli4_hba.mbx_wq); |
| 14477 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 14478 | return false; |
| 14479 | } |
| 14480 | |
| 14481 | /** |
| 14482 | * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry |
| 14483 | * @phba: Pointer to HBA context object. |
| 14484 | * @cq: Pointer to associated CQ |
| 14485 | * @cqe: Pointer to mailbox completion queue entry. |
| 14486 | * |
| 14487 | * This routine process a mailbox completion queue entry, it invokes the |
| 14488 | * proper mailbox complete handling or asynchronous event handling routine |
| 14489 | * according to the MCQE's async bit. |
| 14490 | * |
| 14491 | * Return: true if work posted to worker thread, otherwise false. |
| 14492 | **/ |
| 14493 | static bool |
| 14494 | lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, |
| 14495 | struct lpfc_cqe *cqe) |
| 14496 | { |
| 14497 | struct lpfc_mcqe mcqe; |
| 14498 | bool workposted; |
| 14499 | |
| 14500 | cq->CQ_mbox++; |
| 14501 | |
| 14502 | /* Copy the mailbox MCQE and convert endian order as needed */ |
| 14503 | lpfc_sli4_pcimem_bcopy(srcp: cqe, destp: &mcqe, cnt: sizeof(struct lpfc_mcqe)); |
| 14504 | |
| 14505 | /* Invoke the proper event handling routine */ |
| 14506 | if (!bf_get(lpfc_trailer_async, &mcqe)) |
| 14507 | workposted = lpfc_sli4_sp_handle_mbox_event(phba, mcqe: &mcqe); |
| 14508 | else |
| 14509 | workposted = lpfc_sli4_sp_handle_async_event(phba, mcqe: &mcqe); |
| 14510 | return workposted; |
| 14511 | } |
| 14512 | |
| 14513 | /** |
| 14514 | * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event |
| 14515 | * @phba: Pointer to HBA context object. |
| 14516 | * @cq: Pointer to associated CQ |
| 14517 | * @wcqe: Pointer to work-queue completion queue entry. |
| 14518 | * |
| 14519 | * This routine handles an ELS work-queue completion event. |
| 14520 | * |
| 14521 | * Return: true if work posted to worker thread, otherwise false. |
| 14522 | **/ |
| 14523 | static bool |
| 14524 | lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, |
| 14525 | struct lpfc_wcqe_complete *wcqe) |
| 14526 | { |
| 14527 | struct lpfc_iocbq *irspiocbq; |
| 14528 | unsigned long iflags; |
| 14529 | struct lpfc_sli_ring *pring = cq->pring; |
| 14530 | int txq_cnt = 0; |
| 14531 | int txcmplq_cnt = 0; |
| 14532 | |
| 14533 | /* Check for response status */ |
| 14534 | if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { |
| 14535 | /* Log the error status */ |
| 14536 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 14537 | "0357 ELS CQE error: status=x%x: " |
| 14538 | "CQE: %08x %08x %08x %08x\n" , |
| 14539 | bf_get(lpfc_wcqe_c_status, wcqe), |
| 14540 | wcqe->word0, wcqe->total_data_placed, |
| 14541 | wcqe->parameter, wcqe->word3); |
| 14542 | } |
| 14543 | |
| 14544 | /* Get an irspiocbq for later ELS response processing use */ |
| 14545 | irspiocbq = lpfc_sli_get_iocbq(phba); |
| 14546 | if (!irspiocbq) { |
| 14547 | if (!list_empty(head: &pring->txq)) |
| 14548 | txq_cnt++; |
| 14549 | if (!list_empty(head: &pring->txcmplq)) |
| 14550 | txcmplq_cnt++; |
| 14551 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 14552 | "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " |
| 14553 | "els_txcmplq_cnt=%d\n" , |
| 14554 | txq_cnt, phba->iocb_cnt, |
| 14555 | txcmplq_cnt); |
| 14556 | return false; |
| 14557 | } |
| 14558 | |
| 14559 | /* Save off the slow-path queue event for work thread to process */ |
| 14560 | memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); |
| 14561 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 14562 | list_add_tail(new: &irspiocbq->cq_event.list, |
| 14563 | head: &phba->sli4_hba.sp_queue_event); |
| 14564 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 14565 | set_bit(nr: HBA_SP_QUEUE_EVT, addr: &phba->hba_flag); |
| 14566 | |
| 14567 | return true; |
| 14568 | } |
| 14569 | |
| 14570 | /** |
| 14571 | * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event |
| 14572 | * @phba: Pointer to HBA context object. |
| 14573 | * @wcqe: Pointer to work-queue completion queue entry. |
| 14574 | * |
| 14575 | * This routine handles slow-path WQ entry consumed event by invoking the |
| 14576 | * proper WQ release routine to the slow-path WQ. |
| 14577 | **/ |
| 14578 | static void |
| 14579 | lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, |
| 14580 | struct lpfc_wcqe_release *wcqe) |
| 14581 | { |
| 14582 | /* sanity check on queue memory */ |
| 14583 | if (unlikely(!phba->sli4_hba.els_wq)) |
| 14584 | return; |
| 14585 | /* Check for the slow-path ELS work queue */ |
| 14586 | if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) |
| 14587 | lpfc_sli4_wq_release(q: phba->sli4_hba.els_wq, |
| 14588 | bf_get(lpfc_wcqe_r_wqe_index, wcqe)); |
| 14589 | else |
| 14590 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 14591 | "2579 Slow-path wqe consume event carries " |
| 14592 | "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n" , |
| 14593 | bf_get(lpfc_wcqe_r_wqe_index, wcqe), |
| 14594 | phba->sli4_hba.els_wq->queue_id); |
| 14595 | } |
| 14596 | |
| 14597 | /** |
| 14598 | * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event |
| 14599 | * @phba: Pointer to HBA context object. |
| 14600 | * @cq: Pointer to a WQ completion queue. |
| 14601 | * @wcqe: Pointer to work-queue completion queue entry. |
| 14602 | * |
| 14603 | * This routine handles an XRI abort event. |
| 14604 | * |
| 14605 | * Return: true if work posted to worker thread, otherwise false. |
| 14606 | **/ |
| 14607 | static bool |
| 14608 | lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, |
| 14609 | struct lpfc_queue *cq, |
| 14610 | struct sli4_wcqe_xri_aborted *wcqe) |
| 14611 | { |
| 14612 | bool workposted = false; |
| 14613 | struct lpfc_cq_event *cq_event; |
| 14614 | unsigned long iflags; |
| 14615 | |
| 14616 | switch (cq->subtype) { |
| 14617 | case LPFC_IO: |
| 14618 | lpfc_sli4_io_xri_aborted(phba, axri: wcqe, idx: cq->hdwq); |
| 14619 | if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
| 14620 | /* Notify aborted XRI for NVME work queue */ |
| 14621 | if (phba->nvmet_support) |
| 14622 | lpfc_sli4_nvmet_xri_aborted(phba, axri: wcqe); |
| 14623 | } |
| 14624 | workposted = false; |
| 14625 | break; |
| 14626 | case LPFC_NVME_LS: /* NVME LS uses ELS resources */ |
| 14627 | case LPFC_ELS: |
| 14628 | cq_event = lpfc_cq_event_setup(phba, entry: wcqe, size: sizeof(*wcqe)); |
| 14629 | if (!cq_event) { |
| 14630 | workposted = false; |
| 14631 | break; |
| 14632 | } |
| 14633 | cq_event->hdwq = cq->hdwq; |
| 14634 | spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, |
| 14635 | iflags); |
| 14636 | list_add_tail(new: &cq_event->list, |
| 14637 | head: &phba->sli4_hba.sp_els_xri_aborted_work_queue); |
| 14638 | /* Set the els xri abort event flag */ |
| 14639 | set_bit(nr: ELS_XRI_ABORT_EVENT, addr: &phba->hba_flag); |
| 14640 | spin_unlock_irqrestore(lock: &phba->sli4_hba.els_xri_abrt_list_lock, |
| 14641 | flags: iflags); |
| 14642 | workposted = true; |
| 14643 | break; |
| 14644 | default: |
| 14645 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 14646 | "0603 Invalid CQ subtype %d: " |
| 14647 | "%08x %08x %08x %08x\n" , |
| 14648 | cq->subtype, wcqe->word0, wcqe->parameter, |
| 14649 | wcqe->word2, wcqe->word3); |
| 14650 | workposted = false; |
| 14651 | break; |
| 14652 | } |
| 14653 | return workposted; |
| 14654 | } |
| 14655 | |
| 14656 | #define FC_RCTL_MDS_DIAGS 0xF4 |
| 14657 | |
| 14658 | /** |
| 14659 | * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry |
| 14660 | * @phba: Pointer to HBA context object. |
| 14661 | * @rcqe: Pointer to receive-queue completion queue entry. |
| 14662 | * |
| 14663 | * This routine process a receive-queue completion queue entry. |
| 14664 | * |
| 14665 | * Return: true if work posted to worker thread, otherwise false. |
| 14666 | **/ |
| 14667 | static bool |
| 14668 | lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) |
| 14669 | { |
| 14670 | bool workposted = false; |
| 14671 | struct fc_frame_header *fc_hdr; |
| 14672 | struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; |
| 14673 | struct lpfc_queue *drq = phba->sli4_hba.dat_rq; |
| 14674 | struct lpfc_nvmet_tgtport *tgtp; |
| 14675 | struct hbq_dmabuf *dma_buf; |
| 14676 | uint32_t status, rq_id; |
| 14677 | unsigned long iflags; |
| 14678 | |
| 14679 | /* sanity check on queue memory */ |
| 14680 | if (unlikely(!hrq) || unlikely(!drq)) |
| 14681 | return workposted; |
| 14682 | |
| 14683 | if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) |
| 14684 | rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); |
| 14685 | else |
| 14686 | rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); |
| 14687 | if (rq_id != hrq->queue_id) |
| 14688 | goto out; |
| 14689 | |
| 14690 | status = bf_get(lpfc_rcqe_status, rcqe); |
| 14691 | switch (status) { |
| 14692 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
| 14693 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 14694 | "2537 Receive Frame Truncated!!\n" ); |
| 14695 | fallthrough; |
| 14696 | case FC_STATUS_RQ_SUCCESS: |
| 14697 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 14698 | lpfc_sli4_rq_release(hq: hrq, dq: drq); |
| 14699 | dma_buf = lpfc_sli_hbqbuf_get(rb_list: &phba->hbqs[0].hbq_buffer_list); |
| 14700 | if (!dma_buf) { |
| 14701 | hrq->RQ_no_buf_found++; |
| 14702 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 14703 | goto out; |
| 14704 | } |
| 14705 | hrq->RQ_rcv_buf++; |
| 14706 | hrq->RQ_buf_posted--; |
| 14707 | memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); |
| 14708 | |
| 14709 | fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; |
| 14710 | |
| 14711 | if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || |
| 14712 | fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { |
| 14713 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 14714 | /* Handle MDS Loopback frames */ |
| 14715 | if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) |
| 14716 | lpfc_sli4_handle_mds_loopback(vport: phba->pport, |
| 14717 | dmabuf: dma_buf); |
| 14718 | else |
| 14719 | lpfc_in_buf_free(phba, &dma_buf->dbuf); |
| 14720 | break; |
| 14721 | } |
| 14722 | |
| 14723 | /* save off the frame for the work thread to process */ |
| 14724 | list_add_tail(new: &dma_buf->cq_event.list, |
| 14725 | head: &phba->sli4_hba.sp_queue_event); |
| 14726 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 14727 | /* Frame received */ |
| 14728 | set_bit(nr: HBA_SP_QUEUE_EVT, addr: &phba->hba_flag); |
| 14729 | workposted = true; |
| 14730 | break; |
| 14731 | case FC_STATUS_INSUFF_BUF_FRM_DISC: |
| 14732 | if (phba->nvmet_support) { |
| 14733 | tgtp = phba->targetport->private; |
| 14734 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 14735 | "6402 RQE Error x%x, posted %d err_cnt " |
| 14736 | "%d: %x %x %x\n" , |
| 14737 | status, hrq->RQ_buf_posted, |
| 14738 | hrq->RQ_no_posted_buf, |
| 14739 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
| 14740 | atomic_read(&tgtp->rcv_fcp_cmd_out), |
| 14741 | atomic_read(&tgtp->xmt_fcp_release)); |
| 14742 | } |
| 14743 | fallthrough; |
| 14744 | |
| 14745 | case FC_STATUS_INSUFF_BUF_NEED_BUF: |
| 14746 | hrq->RQ_no_posted_buf++; |
| 14747 | /* Post more buffers if possible */ |
| 14748 | set_bit(nr: HBA_POST_RECEIVE_BUFFER, addr: &phba->hba_flag); |
| 14749 | workposted = true; |
| 14750 | break; |
| 14751 | case FC_STATUS_RQ_DMA_FAILURE: |
| 14752 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 14753 | "2564 RQE DMA Error x%x, x%08x x%08x x%08x " |
| 14754 | "x%08x\n" , |
| 14755 | status, rcqe->word0, rcqe->word1, |
| 14756 | rcqe->word2, rcqe->word3); |
| 14757 | |
| 14758 | /* If IV set, no further recovery */ |
| 14759 | if (bf_get(lpfc_rcqe_iv, rcqe)) |
| 14760 | break; |
| 14761 | |
| 14762 | /* recycle consumed resource */ |
| 14763 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 14764 | lpfc_sli4_rq_release(hq: hrq, dq: drq); |
| 14765 | dma_buf = lpfc_sli_hbqbuf_get(rb_list: &phba->hbqs[0].hbq_buffer_list); |
| 14766 | if (!dma_buf) { |
| 14767 | hrq->RQ_no_buf_found++; |
| 14768 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 14769 | break; |
| 14770 | } |
| 14771 | hrq->RQ_rcv_buf++; |
| 14772 | hrq->RQ_buf_posted--; |
| 14773 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 14774 | lpfc_in_buf_free(phba, &dma_buf->dbuf); |
| 14775 | break; |
| 14776 | default: |
| 14777 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 14778 | "2565 Unexpected RQE Status x%x, w0-3 x%08x " |
| 14779 | "x%08x x%08x x%08x\n" , |
| 14780 | status, rcqe->word0, rcqe->word1, |
| 14781 | rcqe->word2, rcqe->word3); |
| 14782 | break; |
| 14783 | } |
| 14784 | out: |
| 14785 | return workposted; |
| 14786 | } |
| 14787 | |
| 14788 | /** |
| 14789 | * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry |
| 14790 | * @phba: Pointer to HBA context object. |
| 14791 | * @cq: Pointer to the completion queue. |
| 14792 | * @cqe: Pointer to a completion queue entry. |
| 14793 | * |
| 14794 | * This routine process a slow-path work-queue or receive queue completion queue |
| 14795 | * entry. |
| 14796 | * |
| 14797 | * Return: true if work posted to worker thread, otherwise false. |
| 14798 | **/ |
| 14799 | static bool |
| 14800 | lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, |
| 14801 | struct lpfc_cqe *cqe) |
| 14802 | { |
| 14803 | struct lpfc_cqe cqevt; |
| 14804 | bool workposted = false; |
| 14805 | |
| 14806 | /* Copy the work queue CQE and convert endian order if needed */ |
| 14807 | lpfc_sli4_pcimem_bcopy(srcp: cqe, destp: &cqevt, cnt: sizeof(struct lpfc_cqe)); |
| 14808 | |
| 14809 | /* Check and process for different type of WCQE and dispatch */ |
| 14810 | switch (bf_get(lpfc_cqe_code, &cqevt)) { |
| 14811 | case CQE_CODE_COMPL_WQE: |
| 14812 | /* Process the WQ/RQ complete event */ |
| 14813 | phba->last_completion_time = jiffies; |
| 14814 | workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, |
| 14815 | wcqe: (struct lpfc_wcqe_complete *)&cqevt); |
| 14816 | break; |
| 14817 | case CQE_CODE_RELEASE_WQE: |
| 14818 | /* Process the WQ release event */ |
| 14819 | lpfc_sli4_sp_handle_rel_wcqe(phba, |
| 14820 | wcqe: (struct lpfc_wcqe_release *)&cqevt); |
| 14821 | break; |
| 14822 | case CQE_CODE_XRI_ABORTED: |
| 14823 | /* Process the WQ XRI abort event */ |
| 14824 | phba->last_completion_time = jiffies; |
| 14825 | workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, |
| 14826 | wcqe: (struct sli4_wcqe_xri_aborted *)&cqevt); |
| 14827 | break; |
| 14828 | case CQE_CODE_RECEIVE: |
| 14829 | case CQE_CODE_RECEIVE_V1: |
| 14830 | /* Process the RQ event */ |
| 14831 | phba->last_completion_time = jiffies; |
| 14832 | workposted = lpfc_sli4_sp_handle_rcqe(phba, |
| 14833 | rcqe: (struct lpfc_rcqe *)&cqevt); |
| 14834 | break; |
| 14835 | default: |
| 14836 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 14837 | "0388 Not a valid WCQE code: x%x\n" , |
| 14838 | bf_get(lpfc_cqe_code, &cqevt)); |
| 14839 | break; |
| 14840 | } |
| 14841 | return workposted; |
| 14842 | } |
| 14843 | |
| 14844 | /** |
| 14845 | * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry |
| 14846 | * @phba: Pointer to HBA context object. |
| 14847 | * @eqe: Pointer to fast-path event queue entry. |
| 14848 | * @speq: Pointer to slow-path event queue. |
| 14849 | * |
| 14850 | * This routine process a event queue entry from the slow-path event queue. |
| 14851 | * It will check the MajorCode and MinorCode to determine this is for a |
| 14852 | * completion event on a completion queue, if not, an error shall be logged |
| 14853 | * and just return. Otherwise, it will get to the corresponding completion |
| 14854 | * queue and process all the entries on that completion queue, rearm the |
| 14855 | * completion queue, and then return. |
| 14856 | * |
| 14857 | **/ |
| 14858 | static void |
| 14859 | lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, |
| 14860 | struct lpfc_queue *speq) |
| 14861 | { |
| 14862 | struct lpfc_queue *cq = NULL, *childq; |
| 14863 | uint16_t cqid; |
| 14864 | int ret = 0; |
| 14865 | |
| 14866 | /* Get the reference to the corresponding CQ */ |
| 14867 | cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); |
| 14868 | |
| 14869 | list_for_each_entry(childq, &speq->child_list, list) { |
| 14870 | if (childq->queue_id == cqid) { |
| 14871 | cq = childq; |
| 14872 | break; |
| 14873 | } |
| 14874 | } |
| 14875 | if (unlikely(!cq)) { |
| 14876 | if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) |
| 14877 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 14878 | "0365 Slow-path CQ identifier " |
| 14879 | "(%d) does not exist\n" , cqid); |
| 14880 | return; |
| 14881 | } |
| 14882 | |
| 14883 | /* Save EQ associated with this CQ */ |
| 14884 | cq->assoc_qp = speq; |
| 14885 | |
| 14886 | if (is_kdump_kernel()) |
| 14887 | ret = queue_work(wq: phba->wq, work: &cq->spwork); |
| 14888 | else |
| 14889 | ret = queue_work_on(cpu: cq->chann, wq: phba->wq, work: &cq->spwork); |
| 14890 | |
| 14891 | if (!ret) |
| 14892 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 14893 | "0390 Cannot schedule queue work " |
| 14894 | "for CQ eqcqid=%d, cqid=%d on CPU %d\n" , |
| 14895 | cqid, cq->queue_id, raw_smp_processor_id()); |
| 14896 | } |
| 14897 | |
| 14898 | /** |
| 14899 | * __lpfc_sli4_process_cq - Process elements of a CQ |
| 14900 | * @phba: Pointer to HBA context object. |
| 14901 | * @cq: Pointer to CQ to be processed |
| 14902 | * @handler: Routine to process each cqe |
| 14903 | * @delay: Pointer to usdelay to set in case of rescheduling of the handler |
| 14904 | * |
| 14905 | * This routine processes completion queue entries in a CQ. While a valid |
| 14906 | * queue element is found, the handler is called. During processing checks |
| 14907 | * are made for periodic doorbell writes to let the hardware know of |
| 14908 | * element consumption. |
| 14909 | * |
| 14910 | * If the max limit on cqes to process is hit, or there are no more valid |
| 14911 | * entries, the loop stops. If we processed a sufficient number of elements, |
| 14912 | * meaning there is sufficient load, rather than rearming and generating |
| 14913 | * another interrupt, a cq rescheduling delay will be set. A delay of 0 |
| 14914 | * indicates no rescheduling. |
| 14915 | * |
| 14916 | * Returns True if work scheduled, False otherwise. |
| 14917 | **/ |
| 14918 | static bool |
| 14919 | __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, |
| 14920 | bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, |
| 14921 | struct lpfc_cqe *), unsigned long *delay) |
| 14922 | { |
| 14923 | struct lpfc_cqe *cqe; |
| 14924 | bool workposted = false; |
| 14925 | int count = 0, consumed = 0; |
| 14926 | bool arm = true; |
| 14927 | |
| 14928 | /* default - no reschedule */ |
| 14929 | *delay = 0; |
| 14930 | |
| 14931 | if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) |
| 14932 | goto rearm_and_exit; |
| 14933 | |
| 14934 | /* Process all the entries to the CQ */ |
| 14935 | cq->q_flag = 0; |
| 14936 | cqe = lpfc_sli4_cq_get(q: cq); |
| 14937 | while (cqe) { |
| 14938 | workposted |= handler(phba, cq, cqe); |
| 14939 | __lpfc_sli4_consume_cqe(phba, cq, cqe); |
| 14940 | |
| 14941 | consumed++; |
| 14942 | if (!(++count % cq->max_proc_limit)) |
| 14943 | break; |
| 14944 | |
| 14945 | if (!(count % cq->notify_interval)) { |
| 14946 | phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, |
| 14947 | LPFC_QUEUE_NOARM); |
| 14948 | consumed = 0; |
| 14949 | cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK; |
| 14950 | } |
| 14951 | |
| 14952 | if (count == LPFC_NVMET_CQ_NOTIFY) |
| 14953 | cq->q_flag |= HBA_NVMET_CQ_NOTIFY; |
| 14954 | |
| 14955 | cqe = lpfc_sli4_cq_get(q: cq); |
| 14956 | } |
| 14957 | if (count >= phba->cfg_cq_poll_threshold) { |
| 14958 | *delay = 1; |
| 14959 | arm = false; |
| 14960 | } |
| 14961 | |
| 14962 | /* Track the max number of CQEs processed in 1 EQ */ |
| 14963 | if (count > cq->CQ_max_cqe) |
| 14964 | cq->CQ_max_cqe = count; |
| 14965 | |
| 14966 | cq->assoc_qp->EQ_cqe_cnt += count; |
| 14967 | |
| 14968 | /* Catch the no cq entry condition */ |
| 14969 | if (unlikely(count == 0)) |
| 14970 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 14971 | "0369 No entry from completion queue " |
| 14972 | "qid=%d\n" , cq->queue_id); |
| 14973 | |
| 14974 | xchg(&cq->queue_claimed, 0); |
| 14975 | |
| 14976 | rearm_and_exit: |
| 14977 | phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, |
| 14978 | arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM); |
| 14979 | |
| 14980 | return workposted; |
| 14981 | } |
| 14982 | |
| 14983 | /** |
| 14984 | * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry |
| 14985 | * @cq: pointer to CQ to process |
| 14986 | * |
| 14987 | * This routine calls the cq processing routine with a handler specific |
| 14988 | * to the type of queue bound to it. |
| 14989 | * |
| 14990 | * The CQ routine returns two values: the first is the calling status, |
| 14991 | * which indicates whether work was queued to the background discovery |
| 14992 | * thread. If true, the routine should wakeup the discovery thread; |
| 14993 | * the second is the delay parameter. If non-zero, rather than rearming |
| 14994 | * the CQ and yet another interrupt, the CQ handler should be queued so |
| 14995 | * that it is processed in a subsequent polling action. The value of |
| 14996 | * the delay indicates when to reschedule it. |
| 14997 | **/ |
| 14998 | static void |
| 14999 | __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) |
| 15000 | { |
| 15001 | struct lpfc_hba *phba = cq->phba; |
| 15002 | unsigned long delay; |
| 15003 | bool workposted = false; |
| 15004 | int ret = 0; |
| 15005 | |
| 15006 | /* Process and rearm the CQ */ |
| 15007 | switch (cq->type) { |
| 15008 | case LPFC_MCQ: |
| 15009 | workposted |= __lpfc_sli4_process_cq(phba, cq, |
| 15010 | handler: lpfc_sli4_sp_handle_mcqe, |
| 15011 | delay: &delay); |
| 15012 | break; |
| 15013 | case LPFC_WCQ: |
| 15014 | if (cq->subtype == LPFC_IO) |
| 15015 | workposted |= __lpfc_sli4_process_cq(phba, cq, |
| 15016 | handler: lpfc_sli4_fp_handle_cqe, |
| 15017 | delay: &delay); |
| 15018 | else |
| 15019 | workposted |= __lpfc_sli4_process_cq(phba, cq, |
| 15020 | handler: lpfc_sli4_sp_handle_cqe, |
| 15021 | delay: &delay); |
| 15022 | break; |
| 15023 | default: |
| 15024 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 15025 | "0370 Invalid completion queue type (%d)\n" , |
| 15026 | cq->type); |
| 15027 | return; |
| 15028 | } |
| 15029 | |
| 15030 | if (delay) { |
| 15031 | if (is_kdump_kernel()) |
| 15032 | ret = queue_delayed_work(wq: phba->wq, dwork: &cq->sched_spwork, |
| 15033 | delay); |
| 15034 | else |
| 15035 | ret = queue_delayed_work_on(cpu: cq->chann, wq: phba->wq, |
| 15036 | work: &cq->sched_spwork, delay); |
| 15037 | if (!ret) |
| 15038 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 15039 | "0394 Cannot schedule queue work " |
| 15040 | "for cqid=%d on CPU %d\n" , |
| 15041 | cq->queue_id, cq->chann); |
| 15042 | } |
| 15043 | |
| 15044 | /* wake up worker thread if there are works to be done */ |
| 15045 | if (workposted) |
| 15046 | lpfc_worker_wake_up(phba); |
| 15047 | } |
| 15048 | |
| 15049 | /** |
| 15050 | * lpfc_sli4_sp_process_cq - slow-path work handler when started by |
| 15051 | * interrupt |
| 15052 | * @work: pointer to work element |
| 15053 | * |
| 15054 | * translates from the work handler and calls the slow-path handler. |
| 15055 | **/ |
| 15056 | static void |
| 15057 | lpfc_sli4_sp_process_cq(struct work_struct *work) |
| 15058 | { |
| 15059 | struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); |
| 15060 | |
| 15061 | __lpfc_sli4_sp_process_cq(cq); |
| 15062 | } |
| 15063 | |
| 15064 | /** |
| 15065 | * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer |
| 15066 | * @work: pointer to work element |
| 15067 | * |
| 15068 | * translates from the work handler and calls the slow-path handler. |
| 15069 | **/ |
| 15070 | static void |
| 15071 | lpfc_sli4_dly_sp_process_cq(struct work_struct *work) |
| 15072 | { |
| 15073 | struct lpfc_queue *cq = container_of(to_delayed_work(work), |
| 15074 | struct lpfc_queue, sched_spwork); |
| 15075 | |
| 15076 | __lpfc_sli4_sp_process_cq(cq); |
| 15077 | } |
| 15078 | |
| 15079 | /** |
| 15080 | * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry |
| 15081 | * @phba: Pointer to HBA context object. |
| 15082 | * @cq: Pointer to associated CQ |
| 15083 | * @wcqe: Pointer to work-queue completion queue entry. |
| 15084 | * |
| 15085 | * This routine process a fast-path work queue completion entry from fast-path |
| 15086 | * event queue for FCP command response completion. |
| 15087 | **/ |
| 15088 | static void |
| 15089 | lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, |
| 15090 | struct lpfc_wcqe_complete *wcqe) |
| 15091 | { |
| 15092 | struct lpfc_sli_ring *pring = cq->pring; |
| 15093 | struct lpfc_iocbq *cmdiocbq; |
| 15094 | unsigned long iflags; |
| 15095 | |
| 15096 | /* Check for response status */ |
| 15097 | if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { |
| 15098 | /* If resource errors reported from HBA, reduce queue |
| 15099 | * depth of the SCSI device. |
| 15100 | */ |
| 15101 | if (((bf_get(lpfc_wcqe_c_status, wcqe) == |
| 15102 | IOSTAT_LOCAL_REJECT)) && |
| 15103 | ((wcqe->parameter & IOERR_PARAM_MASK) == |
| 15104 | IOERR_NO_RESOURCES)) |
| 15105 | phba->lpfc_rampdown_queue_depth(phba); |
| 15106 | |
| 15107 | /* Log the cmpl status */ |
| 15108 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 15109 | "0373 FCP CQE cmpl: status=x%x: " |
| 15110 | "CQE: %08x %08x %08x %08x\n" , |
| 15111 | bf_get(lpfc_wcqe_c_status, wcqe), |
| 15112 | wcqe->word0, wcqe->total_data_placed, |
| 15113 | wcqe->parameter, wcqe->word3); |
| 15114 | } |
| 15115 | |
| 15116 | /* Look up the FCP command IOCB and create pseudo response IOCB */ |
| 15117 | spin_lock_irqsave(&pring->ring_lock, iflags); |
| 15118 | pring->stats.iocb_event++; |
| 15119 | cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, |
| 15120 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); |
| 15121 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 15122 | if (unlikely(!cmdiocbq)) { |
| 15123 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 15124 | "0374 FCP complete with no corresponding " |
| 15125 | "cmdiocb: iotag (%d)\n" , |
| 15126 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); |
| 15127 | return; |
| 15128 | } |
| 15129 | #ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
| 15130 | cmdiocbq->isr_timestamp = cq->isr_timestamp; |
| 15131 | #endif |
| 15132 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) { |
| 15133 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 15134 | cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; |
| 15135 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 15136 | } |
| 15137 | |
| 15138 | if (cmdiocbq->cmd_cmpl) { |
| 15139 | /* For FCP the flag is cleared in cmd_cmpl */ |
| 15140 | if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) && |
| 15141 | cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) { |
| 15142 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 15143 | cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; |
| 15144 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 15145 | } |
| 15146 | |
| 15147 | /* Pass the cmd_iocb and the wcqe to the upper layer */ |
| 15148 | memcpy(&cmdiocbq->wcqe_cmpl, wcqe, |
| 15149 | sizeof(struct lpfc_wcqe_complete)); |
| 15150 | cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq); |
| 15151 | } else { |
| 15152 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 15153 | "0375 FCP cmdiocb not callback function " |
| 15154 | "iotag: (%d)\n" , |
| 15155 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); |
| 15156 | } |
| 15157 | } |
| 15158 | |
| 15159 | /** |
| 15160 | * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event |
| 15161 | * @phba: Pointer to HBA context object. |
| 15162 | * @cq: Pointer to completion queue. |
| 15163 | * @wcqe: Pointer to work-queue completion queue entry. |
| 15164 | * |
| 15165 | * This routine handles an fast-path WQ entry consumed event by invoking the |
| 15166 | * proper WQ release routine to the slow-path WQ. |
| 15167 | **/ |
| 15168 | static void |
| 15169 | lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, |
| 15170 | struct lpfc_wcqe_release *wcqe) |
| 15171 | { |
| 15172 | struct lpfc_queue *childwq; |
| 15173 | bool wqid_matched = false; |
| 15174 | uint16_t hba_wqid; |
| 15175 | |
| 15176 | /* Check for fast-path FCP work queue release */ |
| 15177 | hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); |
| 15178 | list_for_each_entry(childwq, &cq->child_list, list) { |
| 15179 | if (childwq->queue_id == hba_wqid) { |
| 15180 | lpfc_sli4_wq_release(q: childwq, |
| 15181 | bf_get(lpfc_wcqe_r_wqe_index, wcqe)); |
| 15182 | if (childwq->q_flag & HBA_NVMET_WQFULL) |
| 15183 | lpfc_nvmet_wqfull_process(phba, wq: childwq); |
| 15184 | wqid_matched = true; |
| 15185 | break; |
| 15186 | } |
| 15187 | } |
| 15188 | /* Report warning log message if no match found */ |
| 15189 | if (wqid_matched != true) |
| 15190 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 15191 | "2580 Fast-path wqe consume event carries " |
| 15192 | "miss-matched qid: wcqe-qid=x%x\n" , hba_wqid); |
| 15193 | } |
| 15194 | |
| 15195 | /** |
| 15196 | * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry |
| 15197 | * @phba: Pointer to HBA context object. |
| 15198 | * @cq: Pointer to completion queue. |
| 15199 | * @rcqe: Pointer to receive-queue completion queue entry. |
| 15200 | * |
| 15201 | * This routine process a receive-queue completion queue entry. |
| 15202 | * |
| 15203 | * Return: true if work posted to worker thread, otherwise false. |
| 15204 | **/ |
| 15205 | static bool |
| 15206 | lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, |
| 15207 | struct lpfc_rcqe *rcqe) |
| 15208 | { |
| 15209 | bool workposted = false; |
| 15210 | struct lpfc_queue *hrq; |
| 15211 | struct lpfc_queue *drq; |
| 15212 | struct rqb_dmabuf *dma_buf; |
| 15213 | struct fc_frame_header *fc_hdr; |
| 15214 | struct lpfc_nvmet_tgtport *tgtp; |
| 15215 | uint32_t status, rq_id; |
| 15216 | unsigned long iflags; |
| 15217 | uint32_t fctl, idx; |
| 15218 | |
| 15219 | if ((phba->nvmet_support == 0) || |
| 15220 | (phba->sli4_hba.nvmet_cqset == NULL)) |
| 15221 | return workposted; |
| 15222 | |
| 15223 | idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; |
| 15224 | hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; |
| 15225 | drq = phba->sli4_hba.nvmet_mrq_data[idx]; |
| 15226 | |
| 15227 | /* sanity check on queue memory */ |
| 15228 | if (unlikely(!hrq) || unlikely(!drq)) |
| 15229 | return workposted; |
| 15230 | |
| 15231 | if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) |
| 15232 | rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); |
| 15233 | else |
| 15234 | rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); |
| 15235 | |
| 15236 | if ((phba->nvmet_support == 0) || |
| 15237 | (rq_id != hrq->queue_id)) |
| 15238 | return workposted; |
| 15239 | |
| 15240 | status = bf_get(lpfc_rcqe_status, rcqe); |
| 15241 | switch (status) { |
| 15242 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
| 15243 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 15244 | "6126 Receive Frame Truncated!!\n" ); |
| 15245 | fallthrough; |
| 15246 | case FC_STATUS_RQ_SUCCESS: |
| 15247 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 15248 | lpfc_sli4_rq_release(hq: hrq, dq: drq); |
| 15249 | dma_buf = lpfc_sli_rqbuf_get(phba, hrq); |
| 15250 | if (!dma_buf) { |
| 15251 | hrq->RQ_no_buf_found++; |
| 15252 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 15253 | goto out; |
| 15254 | } |
| 15255 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 15256 | hrq->RQ_rcv_buf++; |
| 15257 | hrq->RQ_buf_posted--; |
| 15258 | fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; |
| 15259 | |
| 15260 | /* Just some basic sanity checks on FCP Command frame */ |
| 15261 | fctl = (fc_hdr->fh_f_ctl[0] << 16 | |
| 15262 | fc_hdr->fh_f_ctl[1] << 8 | |
| 15263 | fc_hdr->fh_f_ctl[2]); |
| 15264 | if (((fctl & |
| 15265 | (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != |
| 15266 | (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || |
| 15267 | (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ |
| 15268 | goto drop; |
| 15269 | |
| 15270 | if (fc_hdr->fh_type == FC_TYPE_FCP) { |
| 15271 | dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); |
| 15272 | lpfc_nvmet_unsol_fcp_event( |
| 15273 | phba, idx, nvmebuf: dma_buf, isr_ts: cq->isr_timestamp, |
| 15274 | cqflag: cq->q_flag & HBA_NVMET_CQ_NOTIFY); |
| 15275 | return false; |
| 15276 | } |
| 15277 | drop: |
| 15278 | lpfc_rq_buf_free(phba, mp: &dma_buf->hbuf); |
| 15279 | break; |
| 15280 | case FC_STATUS_INSUFF_BUF_FRM_DISC: |
| 15281 | if (phba->nvmet_support) { |
| 15282 | tgtp = phba->targetport->private; |
| 15283 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 15284 | "6401 RQE Error x%x, posted %d err_cnt " |
| 15285 | "%d: %x %x %x\n" , |
| 15286 | status, hrq->RQ_buf_posted, |
| 15287 | hrq->RQ_no_posted_buf, |
| 15288 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
| 15289 | atomic_read(&tgtp->rcv_fcp_cmd_out), |
| 15290 | atomic_read(&tgtp->xmt_fcp_release)); |
| 15291 | } |
| 15292 | fallthrough; |
| 15293 | |
| 15294 | case FC_STATUS_INSUFF_BUF_NEED_BUF: |
| 15295 | hrq->RQ_no_posted_buf++; |
| 15296 | /* Post more buffers if possible */ |
| 15297 | break; |
| 15298 | case FC_STATUS_RQ_DMA_FAILURE: |
| 15299 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 15300 | "2575 RQE DMA Error x%x, x%08x x%08x x%08x " |
| 15301 | "x%08x\n" , |
| 15302 | status, rcqe->word0, rcqe->word1, |
| 15303 | rcqe->word2, rcqe->word3); |
| 15304 | |
| 15305 | /* If IV set, no further recovery */ |
| 15306 | if (bf_get(lpfc_rcqe_iv, rcqe)) |
| 15307 | break; |
| 15308 | |
| 15309 | /* recycle consumed resource */ |
| 15310 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 15311 | lpfc_sli4_rq_release(hq: hrq, dq: drq); |
| 15312 | dma_buf = lpfc_sli_rqbuf_get(phba, hrq); |
| 15313 | if (!dma_buf) { |
| 15314 | hrq->RQ_no_buf_found++; |
| 15315 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 15316 | break; |
| 15317 | } |
| 15318 | hrq->RQ_rcv_buf++; |
| 15319 | hrq->RQ_buf_posted--; |
| 15320 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 15321 | lpfc_rq_buf_free(phba, mp: &dma_buf->hbuf); |
| 15322 | break; |
| 15323 | default: |
| 15324 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 15325 | "2576 Unexpected RQE Status x%x, w0-3 x%08x " |
| 15326 | "x%08x x%08x x%08x\n" , |
| 15327 | status, rcqe->word0, rcqe->word1, |
| 15328 | rcqe->word2, rcqe->word3); |
| 15329 | break; |
| 15330 | } |
| 15331 | out: |
| 15332 | return workposted; |
| 15333 | } |
| 15334 | |
| 15335 | /** |
| 15336 | * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry |
| 15337 | * @phba: adapter with cq |
| 15338 | * @cq: Pointer to the completion queue. |
| 15339 | * @cqe: Pointer to fast-path completion queue entry. |
| 15340 | * |
| 15341 | * This routine process a fast-path work queue completion entry from fast-path |
| 15342 | * event queue for FCP command response completion. |
| 15343 | * |
| 15344 | * Return: true if work posted to worker thread, otherwise false. |
| 15345 | **/ |
| 15346 | static bool |
| 15347 | lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, |
| 15348 | struct lpfc_cqe *cqe) |
| 15349 | { |
| 15350 | struct lpfc_wcqe_release wcqe; |
| 15351 | bool workposted = false; |
| 15352 | |
| 15353 | /* Copy the work queue CQE and convert endian order if needed */ |
| 15354 | lpfc_sli4_pcimem_bcopy(srcp: cqe, destp: &wcqe, cnt: sizeof(struct lpfc_cqe)); |
| 15355 | |
| 15356 | /* Check and process for different type of WCQE and dispatch */ |
| 15357 | switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { |
| 15358 | case CQE_CODE_COMPL_WQE: |
| 15359 | case CQE_CODE_NVME_ERSP: |
| 15360 | cq->CQ_wq++; |
| 15361 | /* Process the WQ complete event */ |
| 15362 | phba->last_completion_time = jiffies; |
| 15363 | if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS) |
| 15364 | lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, |
| 15365 | wcqe: (struct lpfc_wcqe_complete *)&wcqe); |
| 15366 | break; |
| 15367 | case CQE_CODE_RELEASE_WQE: |
| 15368 | cq->CQ_release_wqe++; |
| 15369 | /* Process the WQ release event */ |
| 15370 | lpfc_sli4_fp_handle_rel_wcqe(phba, cq, |
| 15371 | wcqe: (struct lpfc_wcqe_release *)&wcqe); |
| 15372 | break; |
| 15373 | case CQE_CODE_XRI_ABORTED: |
| 15374 | cq->CQ_xri_aborted++; |
| 15375 | /* Process the WQ XRI abort event */ |
| 15376 | phba->last_completion_time = jiffies; |
| 15377 | workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, |
| 15378 | wcqe: (struct sli4_wcqe_xri_aborted *)&wcqe); |
| 15379 | break; |
| 15380 | case CQE_CODE_RECEIVE_V1: |
| 15381 | case CQE_CODE_RECEIVE: |
| 15382 | phba->last_completion_time = jiffies; |
| 15383 | if (cq->subtype == LPFC_NVMET) { |
| 15384 | workposted = lpfc_sli4_nvmet_handle_rcqe( |
| 15385 | phba, cq, rcqe: (struct lpfc_rcqe *)&wcqe); |
| 15386 | } |
| 15387 | break; |
| 15388 | default: |
| 15389 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 15390 | "0144 Not a valid CQE code: x%x\n" , |
| 15391 | bf_get(lpfc_wcqe_c_code, &wcqe)); |
| 15392 | break; |
| 15393 | } |
| 15394 | return workposted; |
| 15395 | } |
| 15396 | |
| 15397 | /** |
| 15398 | * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry |
| 15399 | * @cq: Pointer to CQ to be processed |
| 15400 | * |
| 15401 | * This routine calls the cq processing routine with the handler for |
| 15402 | * fast path CQEs. |
| 15403 | * |
| 15404 | * The CQ routine returns two values: the first is the calling status, |
| 15405 | * which indicates whether work was queued to the background discovery |
| 15406 | * thread. If true, the routine should wakeup the discovery thread; |
| 15407 | * the second is the delay parameter. If non-zero, rather than rearming |
| 15408 | * the CQ and yet another interrupt, the CQ handler should be queued so |
| 15409 | * that it is processed in a subsequent polling action. The value of |
| 15410 | * the delay indicates when to reschedule it. |
| 15411 | **/ |
| 15412 | static void |
| 15413 | __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) |
| 15414 | { |
| 15415 | struct lpfc_hba *phba = cq->phba; |
| 15416 | unsigned long delay; |
| 15417 | bool workposted = false; |
| 15418 | int ret; |
| 15419 | |
| 15420 | /* process and rearm the CQ */ |
| 15421 | workposted |= __lpfc_sli4_process_cq(phba, cq, handler: lpfc_sli4_fp_handle_cqe, |
| 15422 | delay: &delay); |
| 15423 | |
| 15424 | if (delay) { |
| 15425 | if (is_kdump_kernel()) |
| 15426 | ret = queue_delayed_work(wq: phba->wq, dwork: &cq->sched_irqwork, |
| 15427 | delay); |
| 15428 | else |
| 15429 | ret = queue_delayed_work_on(cpu: cq->chann, wq: phba->wq, |
| 15430 | work: &cq->sched_irqwork, delay); |
| 15431 | if (!ret) |
| 15432 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 15433 | "0367 Cannot schedule queue work " |
| 15434 | "for cqid=%d on CPU %d\n" , |
| 15435 | cq->queue_id, cq->chann); |
| 15436 | } |
| 15437 | |
| 15438 | /* wake up worker thread if there are works to be done */ |
| 15439 | if (workposted) |
| 15440 | lpfc_worker_wake_up(phba); |
| 15441 | } |
| 15442 | |
| 15443 | /** |
| 15444 | * lpfc_sli4_hba_process_cq - fast-path work handler when started by |
| 15445 | * interrupt |
| 15446 | * @work: pointer to work element |
| 15447 | * |
| 15448 | * translates from the work handler and calls the fast-path handler. |
| 15449 | **/ |
| 15450 | static void |
| 15451 | lpfc_sli4_hba_process_cq(struct work_struct *work) |
| 15452 | { |
| 15453 | struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); |
| 15454 | |
| 15455 | __lpfc_sli4_hba_process_cq(cq); |
| 15456 | } |
| 15457 | |
| 15458 | /** |
| 15459 | * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry |
| 15460 | * @phba: Pointer to HBA context object. |
| 15461 | * @eq: Pointer to the queue structure. |
| 15462 | * @eqe: Pointer to fast-path event queue entry. |
| 15463 | * @poll_mode: poll_mode to execute processing the cq. |
| 15464 | * |
| 15465 | * This routine process a event queue entry from the fast-path event queue. |
| 15466 | * It will check the MajorCode and MinorCode to determine this is for a |
| 15467 | * completion event on a completion queue, if not, an error shall be logged |
| 15468 | * and just return. Otherwise, it will get to the corresponding completion |
| 15469 | * queue and process all the entries on the completion queue, rearm the |
| 15470 | * completion queue, and then return. |
| 15471 | **/ |
| 15472 | static void |
| 15473 | lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, |
| 15474 | struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode) |
| 15475 | { |
| 15476 | struct lpfc_queue *cq = NULL; |
| 15477 | uint32_t qidx = eq->hdwq; |
| 15478 | uint16_t cqid, id; |
| 15479 | int ret; |
| 15480 | |
| 15481 | if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { |
| 15482 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 15483 | "0366 Not a valid completion " |
| 15484 | "event: majorcode=x%x, minorcode=x%x\n" , |
| 15485 | bf_get_le32(lpfc_eqe_major_code, eqe), |
| 15486 | bf_get_le32(lpfc_eqe_minor_code, eqe)); |
| 15487 | return; |
| 15488 | } |
| 15489 | |
| 15490 | /* Get the reference to the corresponding CQ */ |
| 15491 | cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); |
| 15492 | |
| 15493 | /* Use the fast lookup method first */ |
| 15494 | if (cqid <= phba->sli4_hba.cq_max) { |
| 15495 | cq = phba->sli4_hba.cq_lookup[cqid]; |
| 15496 | if (cq) |
| 15497 | goto work_cq; |
| 15498 | } |
| 15499 | |
| 15500 | /* Next check for NVMET completion */ |
| 15501 | if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { |
| 15502 | id = phba->sli4_hba.nvmet_cqset[0]->queue_id; |
| 15503 | if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { |
| 15504 | /* Process NVMET unsol rcv */ |
| 15505 | cq = phba->sli4_hba.nvmet_cqset[cqid - id]; |
| 15506 | goto process_cq; |
| 15507 | } |
| 15508 | } |
| 15509 | |
| 15510 | if (phba->sli4_hba.nvmels_cq && |
| 15511 | (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { |
| 15512 | /* Process NVME unsol rcv */ |
| 15513 | cq = phba->sli4_hba.nvmels_cq; |
| 15514 | } |
| 15515 | |
| 15516 | /* Otherwise this is a Slow path event */ |
| 15517 | if (cq == NULL) { |
| 15518 | lpfc_sli4_sp_handle_eqe(phba, eqe, |
| 15519 | speq: phba->sli4_hba.hdwq[qidx].hba_eq); |
| 15520 | return; |
| 15521 | } |
| 15522 | |
| 15523 | process_cq: |
| 15524 | if (unlikely(cqid != cq->queue_id)) { |
| 15525 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 15526 | "0368 Miss-matched fast-path completion " |
| 15527 | "queue identifier: eqcqid=%d, fcpcqid=%d\n" , |
| 15528 | cqid, cq->queue_id); |
| 15529 | return; |
| 15530 | } |
| 15531 | |
| 15532 | work_cq: |
| 15533 | #if defined(CONFIG_SCSI_LPFC_DEBUG_FS) |
| 15534 | if (phba->ktime_on) |
| 15535 | cq->isr_timestamp = ktime_get_ns(); |
| 15536 | else |
| 15537 | cq->isr_timestamp = 0; |
| 15538 | #endif |
| 15539 | |
| 15540 | switch (poll_mode) { |
| 15541 | case LPFC_THREADED_IRQ: |
| 15542 | __lpfc_sli4_hba_process_cq(cq); |
| 15543 | break; |
| 15544 | case LPFC_QUEUE_WORK: |
| 15545 | default: |
| 15546 | if (is_kdump_kernel()) |
| 15547 | ret = queue_work(wq: phba->wq, work: &cq->irqwork); |
| 15548 | else |
| 15549 | ret = queue_work_on(cpu: cq->chann, wq: phba->wq, work: &cq->irqwork); |
| 15550 | if (!ret) |
| 15551 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 15552 | "0383 Cannot schedule queue work " |
| 15553 | "for CQ eqcqid=%d, cqid=%d on CPU %d\n" , |
| 15554 | cqid, cq->queue_id, |
| 15555 | raw_smp_processor_id()); |
| 15556 | break; |
| 15557 | } |
| 15558 | } |
| 15559 | |
| 15560 | /** |
| 15561 | * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer |
| 15562 | * @work: pointer to work element |
| 15563 | * |
| 15564 | * translates from the work handler and calls the fast-path handler. |
| 15565 | **/ |
| 15566 | static void |
| 15567 | lpfc_sli4_dly_hba_process_cq(struct work_struct *work) |
| 15568 | { |
| 15569 | struct lpfc_queue *cq = container_of(to_delayed_work(work), |
| 15570 | struct lpfc_queue, sched_irqwork); |
| 15571 | |
| 15572 | __lpfc_sli4_hba_process_cq(cq); |
| 15573 | } |
| 15574 | |
| 15575 | /** |
| 15576 | * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device |
| 15577 | * @irq: Interrupt number. |
| 15578 | * @dev_id: The device context pointer. |
| 15579 | * |
| 15580 | * This function is directly called from the PCI layer as an interrupt |
| 15581 | * service routine when device with SLI-4 interface spec is enabled with |
| 15582 | * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB |
| 15583 | * ring event in the HBA. However, when the device is enabled with either |
| 15584 | * MSI or Pin-IRQ interrupt mode, this function is called as part of the |
| 15585 | * device-level interrupt handler. When the PCI slot is in error recovery |
| 15586 | * or the HBA is undergoing initialization, the interrupt handler will not |
| 15587 | * process the interrupt. The SCSI FCP fast-path ring event are handled in |
| 15588 | * the intrrupt context. This function is called without any lock held. |
| 15589 | * It gets the hbalock to access and update SLI data structures. Note that, |
| 15590 | * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is |
| 15591 | * equal to that of FCP CQ index. |
| 15592 | * |
| 15593 | * The link attention and ELS ring attention events are handled |
| 15594 | * by the worker thread. The interrupt handler signals the worker thread |
| 15595 | * and returns for these events. This function is called without any lock |
| 15596 | * held. It gets the hbalock to access and update SLI data structures. |
| 15597 | * |
| 15598 | * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD |
| 15599 | * when interrupt is scheduled to be handled from a threaded irq context, or |
| 15600 | * else returns IRQ_NONE. |
| 15601 | **/ |
| 15602 | irqreturn_t |
| 15603 | lpfc_sli4_hba_intr_handler(int irq, void *dev_id) |
| 15604 | { |
| 15605 | struct lpfc_hba *phba; |
| 15606 | struct lpfc_hba_eq_hdl *hba_eq_hdl; |
| 15607 | struct lpfc_queue *fpeq; |
| 15608 | unsigned long iflag; |
| 15609 | int hba_eqidx; |
| 15610 | int ecount = 0; |
| 15611 | struct lpfc_eq_intr_info *eqi; |
| 15612 | |
| 15613 | /* Get the driver's phba structure from the dev_id */ |
| 15614 | hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; |
| 15615 | phba = hba_eq_hdl->phba; |
| 15616 | hba_eqidx = hba_eq_hdl->idx; |
| 15617 | |
| 15618 | if (unlikely(!phba)) |
| 15619 | return IRQ_NONE; |
| 15620 | if (unlikely(!phba->sli4_hba.hdwq)) |
| 15621 | return IRQ_NONE; |
| 15622 | |
| 15623 | /* Get to the EQ struct associated with this vector */ |
| 15624 | fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; |
| 15625 | if (unlikely(!fpeq)) |
| 15626 | return IRQ_NONE; |
| 15627 | |
| 15628 | /* Check device state for handling interrupt */ |
| 15629 | if (unlikely(lpfc_intr_state_check(phba))) { |
| 15630 | /* Check again for link_state with lock held */ |
| 15631 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 15632 | if (phba->link_state < LPFC_LINK_DOWN) |
| 15633 | /* Flush, clear interrupt, and rearm the EQ */ |
| 15634 | lpfc_sli4_eqcq_flush(phba, eq: fpeq); |
| 15635 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 15636 | return IRQ_NONE; |
| 15637 | } |
| 15638 | |
| 15639 | switch (fpeq->poll_mode) { |
| 15640 | case LPFC_THREADED_IRQ: |
| 15641 | /* CGN mgmt is mutually exclusive from irq processing */ |
| 15642 | if (phba->cmf_active_mode == LPFC_CFG_OFF) |
| 15643 | return IRQ_WAKE_THREAD; |
| 15644 | fallthrough; |
| 15645 | case LPFC_QUEUE_WORK: |
| 15646 | default: |
| 15647 | eqi = this_cpu_ptr(phba->sli4_hba.eq_info); |
| 15648 | eqi->icnt++; |
| 15649 | |
| 15650 | fpeq->last_cpu = raw_smp_processor_id(); |
| 15651 | |
| 15652 | if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && |
| 15653 | fpeq->q_flag & HBA_EQ_DELAY_CHK && |
| 15654 | phba->cfg_auto_imax && |
| 15655 | fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && |
| 15656 | phba->sli.sli_flag & LPFC_SLI_USE_EQDR) |
| 15657 | lpfc_sli4_mod_hba_eq_delay(phba, eq: fpeq, |
| 15658 | LPFC_MAX_AUTO_EQ_DELAY); |
| 15659 | |
| 15660 | /* process and rearm the EQ */ |
| 15661 | ecount = lpfc_sli4_process_eq(phba, eq: fpeq, LPFC_QUEUE_REARM, |
| 15662 | poll_mode: LPFC_QUEUE_WORK); |
| 15663 | |
| 15664 | if (unlikely(ecount == 0)) { |
| 15665 | fpeq->EQ_no_entry++; |
| 15666 | if (phba->intr_type == MSIX) |
| 15667 | /* MSI-X treated interrupt served as no EQ share INT */ |
| 15668 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 15669 | "0358 MSI-X interrupt with no EQE\n" ); |
| 15670 | else |
| 15671 | /* Non MSI-X treated on interrupt as EQ share INT */ |
| 15672 | return IRQ_NONE; |
| 15673 | } |
| 15674 | } |
| 15675 | |
| 15676 | return IRQ_HANDLED; |
| 15677 | } /* lpfc_sli4_hba_intr_handler */ |
| 15678 | |
| 15679 | /** |
| 15680 | * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device |
| 15681 | * @irq: Interrupt number. |
| 15682 | * @dev_id: The device context pointer. |
| 15683 | * |
| 15684 | * This function is the device-level interrupt handler to device with SLI-4 |
| 15685 | * interface spec, called from the PCI layer when either MSI or Pin-IRQ |
| 15686 | * interrupt mode is enabled and there is an event in the HBA which requires |
| 15687 | * driver attention. This function invokes the slow-path interrupt attention |
| 15688 | * handling function and fast-path interrupt attention handling function in |
| 15689 | * turn to process the relevant HBA attention events. This function is called |
| 15690 | * without any lock held. It gets the hbalock to access and update SLI data |
| 15691 | * structures. |
| 15692 | * |
| 15693 | * This function returns IRQ_HANDLED when interrupt is handled, else it |
| 15694 | * returns IRQ_NONE. |
| 15695 | **/ |
| 15696 | irqreturn_t |
| 15697 | lpfc_sli4_intr_handler(int irq, void *dev_id) |
| 15698 | { |
| 15699 | struct lpfc_hba *phba; |
| 15700 | irqreturn_t hba_irq_rc; |
| 15701 | bool hba_handled = false; |
| 15702 | int qidx; |
| 15703 | |
| 15704 | /* Get the driver's phba structure from the dev_id */ |
| 15705 | phba = (struct lpfc_hba *)dev_id; |
| 15706 | |
| 15707 | if (unlikely(!phba)) |
| 15708 | return IRQ_NONE; |
| 15709 | |
| 15710 | /* |
| 15711 | * Invoke fast-path host attention interrupt handling as appropriate. |
| 15712 | */ |
| 15713 | for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { |
| 15714 | hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, |
| 15715 | dev_id: &phba->sli4_hba.hba_eq_hdl[qidx]); |
| 15716 | if (hba_irq_rc == IRQ_HANDLED) |
| 15717 | hba_handled |= true; |
| 15718 | } |
| 15719 | |
| 15720 | return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; |
| 15721 | } /* lpfc_sli4_intr_handler */ |
| 15722 | |
| 15723 | void lpfc_sli4_poll_hbtimer(struct timer_list *t) |
| 15724 | { |
| 15725 | struct lpfc_hba *phba = timer_container_of(phba, t, cpuhp_poll_timer); |
| 15726 | struct lpfc_queue *eq; |
| 15727 | |
| 15728 | rcu_read_lock(); |
| 15729 | |
| 15730 | list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list) |
| 15731 | lpfc_sli4_poll_eq(eq); |
| 15732 | if (!list_empty(head: &phba->poll_list)) |
| 15733 | mod_timer(timer: &phba->cpuhp_poll_timer, |
| 15734 | expires: jiffies + msecs_to_jiffies(LPFC_POLL_HB)); |
| 15735 | |
| 15736 | rcu_read_unlock(); |
| 15737 | } |
| 15738 | |
| 15739 | static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq) |
| 15740 | { |
| 15741 | struct lpfc_hba *phba = eq->phba; |
| 15742 | |
| 15743 | /* kickstart slowpath processing if needed */ |
| 15744 | if (list_empty(head: &phba->poll_list)) |
| 15745 | mod_timer(timer: &phba->cpuhp_poll_timer, |
| 15746 | expires: jiffies + msecs_to_jiffies(LPFC_POLL_HB)); |
| 15747 | |
| 15748 | list_add_rcu(new: &eq->_poll_list, head: &phba->poll_list); |
| 15749 | synchronize_rcu(); |
| 15750 | } |
| 15751 | |
| 15752 | static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq) |
| 15753 | { |
| 15754 | struct lpfc_hba *phba = eq->phba; |
| 15755 | |
| 15756 | /* Disable slowpath processing for this eq. Kick start the eq |
| 15757 | * by RE-ARMING the eq's ASAP |
| 15758 | */ |
| 15759 | list_del_rcu(entry: &eq->_poll_list); |
| 15760 | synchronize_rcu(); |
| 15761 | |
| 15762 | if (list_empty(head: &phba->poll_list)) |
| 15763 | timer_delete_sync(timer: &phba->cpuhp_poll_timer); |
| 15764 | } |
| 15765 | |
| 15766 | void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba) |
| 15767 | { |
| 15768 | struct lpfc_queue *eq, *next; |
| 15769 | |
| 15770 | list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) |
| 15771 | list_del(entry: &eq->_poll_list); |
| 15772 | |
| 15773 | INIT_LIST_HEAD(list: &phba->poll_list); |
| 15774 | synchronize_rcu(); |
| 15775 | } |
| 15776 | |
| 15777 | static inline void |
| 15778 | __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode) |
| 15779 | { |
| 15780 | if (mode == eq->mode) |
| 15781 | return; |
| 15782 | /* |
| 15783 | * currently this function is only called during a hotplug |
| 15784 | * event and the cpu on which this function is executing |
| 15785 | * is going offline. By now the hotplug has instructed |
| 15786 | * the scheduler to remove this cpu from cpu active mask. |
| 15787 | * So we don't need to work about being put aside by the |
| 15788 | * scheduler for a high priority process. Yes, the inte- |
| 15789 | * rrupts could come but they are known to retire ASAP. |
| 15790 | */ |
| 15791 | |
| 15792 | /* Disable polling in the fastpath */ |
| 15793 | WRITE_ONCE(eq->mode, mode); |
| 15794 | /* flush out the store buffer */ |
| 15795 | smp_wmb(); |
| 15796 | |
| 15797 | /* |
| 15798 | * Add this eq to the polling list and start polling. For |
| 15799 | * a grace period both interrupt handler and poller will |
| 15800 | * try to process the eq _but_ that's fine. We have a |
| 15801 | * synchronization mechanism in place (queue_claimed) to |
| 15802 | * deal with it. This is just a draining phase for int- |
| 15803 | * errupt handler (not eq's) as we have guranteed through |
| 15804 | * barrier that all the CPUs have seen the new CQ_POLLED |
| 15805 | * state. which will effectively disable the REARMING of |
| 15806 | * the EQ. The whole idea is eq's die off eventually as |
| 15807 | * we are not rearming EQ's anymore. |
| 15808 | */ |
| 15809 | mode ? lpfc_sli4_add_to_poll_list(eq) : |
| 15810 | lpfc_sli4_remove_from_poll_list(eq); |
| 15811 | } |
| 15812 | |
| 15813 | void lpfc_sli4_start_polling(struct lpfc_queue *eq) |
| 15814 | { |
| 15815 | __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL); |
| 15816 | } |
| 15817 | |
| 15818 | void lpfc_sli4_stop_polling(struct lpfc_queue *eq) |
| 15819 | { |
| 15820 | struct lpfc_hba *phba = eq->phba; |
| 15821 | |
| 15822 | __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT); |
| 15823 | |
| 15824 | /* Kick start for the pending io's in h/w. |
| 15825 | * Once we switch back to interrupt processing on a eq |
| 15826 | * the io path completion will only arm eq's when it |
| 15827 | * receives a completion. But since eq's are in disa- |
| 15828 | * rmed state it doesn't receive a completion. This |
| 15829 | * creates a deadlock scenaro. |
| 15830 | */ |
| 15831 | phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM); |
| 15832 | } |
| 15833 | |
| 15834 | /** |
| 15835 | * lpfc_sli4_queue_free - free a queue structure and associated memory |
| 15836 | * @queue: The queue structure to free. |
| 15837 | * |
| 15838 | * This function frees a queue structure and the DMAable memory used for |
| 15839 | * the host resident queue. This function must be called after destroying the |
| 15840 | * queue on the HBA. |
| 15841 | **/ |
| 15842 | void |
| 15843 | lpfc_sli4_queue_free(struct lpfc_queue *queue) |
| 15844 | { |
| 15845 | struct lpfc_dmabuf *dmabuf; |
| 15846 | |
| 15847 | if (!queue) |
| 15848 | return; |
| 15849 | |
| 15850 | if (!list_empty(head: &queue->wq_list)) |
| 15851 | list_del(entry: &queue->wq_list); |
| 15852 | |
| 15853 | while (!list_empty(head: &queue->page_list)) { |
| 15854 | list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, |
| 15855 | list); |
| 15856 | dma_free_coherent(dev: &queue->phba->pcidev->dev, size: queue->page_size, |
| 15857 | cpu_addr: dmabuf->virt, dma_handle: dmabuf->phys); |
| 15858 | kfree(objp: dmabuf); |
| 15859 | } |
| 15860 | if (queue->rqbp) { |
| 15861 | lpfc_free_rq_buffer(phba: queue->phba, hq: queue); |
| 15862 | kfree(objp: queue->rqbp); |
| 15863 | } |
| 15864 | |
| 15865 | if (!list_empty(head: &queue->cpu_list)) |
| 15866 | list_del(entry: &queue->cpu_list); |
| 15867 | |
| 15868 | kfree(objp: queue); |
| 15869 | return; |
| 15870 | } |
| 15871 | |
| 15872 | /** |
| 15873 | * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure |
| 15874 | * @phba: The HBA that this queue is being created on. |
| 15875 | * @page_size: The size of a queue page |
| 15876 | * @entry_size: The size of each queue entry for this queue. |
| 15877 | * @entry_count: The number of entries that this queue will handle. |
| 15878 | * @cpu: The cpu that will primarily utilize this queue. |
| 15879 | * |
| 15880 | * This function allocates a queue structure and the DMAable memory used for |
| 15881 | * the host resident queue. This function must be called before creating the |
| 15882 | * queue on the HBA. |
| 15883 | **/ |
| 15884 | struct lpfc_queue * |
| 15885 | lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, |
| 15886 | uint32_t entry_size, uint32_t entry_count, int cpu) |
| 15887 | { |
| 15888 | struct lpfc_queue *queue; |
| 15889 | struct lpfc_dmabuf *dmabuf; |
| 15890 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; |
| 15891 | uint16_t x, pgcnt; |
| 15892 | |
| 15893 | if (!phba->sli4_hba.pc_sli4_params.supported) |
| 15894 | hw_page_size = page_size; |
| 15895 | |
| 15896 | pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size; |
| 15897 | |
| 15898 | /* If needed, Adjust page count to match the max the adapter supports */ |
| 15899 | if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt) |
| 15900 | pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt; |
| 15901 | |
| 15902 | queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt), |
| 15903 | GFP_KERNEL, cpu_to_node(cpu)); |
| 15904 | if (!queue) |
| 15905 | return NULL; |
| 15906 | |
| 15907 | INIT_LIST_HEAD(list: &queue->list); |
| 15908 | INIT_LIST_HEAD(list: &queue->_poll_list); |
| 15909 | INIT_LIST_HEAD(list: &queue->wq_list); |
| 15910 | INIT_LIST_HEAD(list: &queue->wqfull_list); |
| 15911 | INIT_LIST_HEAD(list: &queue->page_list); |
| 15912 | INIT_LIST_HEAD(list: &queue->child_list); |
| 15913 | INIT_LIST_HEAD(list: &queue->cpu_list); |
| 15914 | |
| 15915 | /* Set queue parameters now. If the system cannot provide memory |
| 15916 | * resources, the free routine needs to know what was allocated. |
| 15917 | */ |
| 15918 | queue->page_count = pgcnt; |
| 15919 | queue->q_pgs = (void **)&queue[1]; |
| 15920 | queue->entry_cnt_per_pg = hw_page_size / entry_size; |
| 15921 | queue->entry_size = entry_size; |
| 15922 | queue->entry_count = entry_count; |
| 15923 | queue->page_size = hw_page_size; |
| 15924 | queue->phba = phba; |
| 15925 | |
| 15926 | for (x = 0; x < queue->page_count; x++) { |
| 15927 | dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL, |
| 15928 | dev_to_node(&phba->pcidev->dev)); |
| 15929 | if (!dmabuf) |
| 15930 | goto out_fail; |
| 15931 | dmabuf->virt = dma_alloc_coherent(dev: &phba->pcidev->dev, |
| 15932 | size: hw_page_size, dma_handle: &dmabuf->phys, |
| 15933 | GFP_KERNEL); |
| 15934 | if (!dmabuf->virt) { |
| 15935 | kfree(objp: dmabuf); |
| 15936 | goto out_fail; |
| 15937 | } |
| 15938 | dmabuf->buffer_tag = x; |
| 15939 | list_add_tail(new: &dmabuf->list, head: &queue->page_list); |
| 15940 | /* use lpfc_sli4_qe to index a paritcular entry in this page */ |
| 15941 | queue->q_pgs[x] = dmabuf->virt; |
| 15942 | } |
| 15943 | INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); |
| 15944 | INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); |
| 15945 | INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq); |
| 15946 | INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq); |
| 15947 | |
| 15948 | /* notify_interval will be set during q creation */ |
| 15949 | |
| 15950 | return queue; |
| 15951 | out_fail: |
| 15952 | lpfc_sli4_queue_free(queue); |
| 15953 | return NULL; |
| 15954 | } |
| 15955 | |
| 15956 | /** |
| 15957 | * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory |
| 15958 | * @phba: HBA structure that indicates port to create a queue on. |
| 15959 | * @pci_barset: PCI BAR set flag. |
| 15960 | * |
| 15961 | * This function shall perform iomap of the specified PCI BAR address to host |
| 15962 | * memory address if not already done so and return it. The returned host |
| 15963 | * memory address can be NULL. |
| 15964 | */ |
| 15965 | static void __iomem * |
| 15966 | lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) |
| 15967 | { |
| 15968 | if (!phba->pcidev) |
| 15969 | return NULL; |
| 15970 | |
| 15971 | switch (pci_barset) { |
| 15972 | case WQ_PCI_BAR_0_AND_1: |
| 15973 | return phba->pci_bar0_memmap_p; |
| 15974 | case WQ_PCI_BAR_2_AND_3: |
| 15975 | return phba->pci_bar2_memmap_p; |
| 15976 | case WQ_PCI_BAR_4_AND_5: |
| 15977 | return phba->pci_bar4_memmap_p; |
| 15978 | default: |
| 15979 | break; |
| 15980 | } |
| 15981 | return NULL; |
| 15982 | } |
| 15983 | |
| 15984 | /** |
| 15985 | * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs |
| 15986 | * @phba: HBA structure that EQs are on. |
| 15987 | * @startq: The starting EQ index to modify |
| 15988 | * @numq: The number of EQs (consecutive indexes) to modify |
| 15989 | * @usdelay: amount of delay |
| 15990 | * |
| 15991 | * This function revises the EQ delay on 1 or more EQs. The EQ delay |
| 15992 | * is set either by writing to a register (if supported by the SLI Port) |
| 15993 | * or by mailbox command. The mailbox command allows several EQs to be |
| 15994 | * updated at once. |
| 15995 | * |
| 15996 | * The @phba struct is used to send a mailbox command to HBA. The @startq |
| 15997 | * is used to get the starting EQ index to change. The @numq value is |
| 15998 | * used to specify how many consecutive EQ indexes, starting at EQ index, |
| 15999 | * are to be changed. This function is asynchronous and will wait for any |
| 16000 | * mailbox commands to finish before returning. |
| 16001 | * |
| 16002 | * On success this function will return a zero. If unable to allocate |
| 16003 | * enough memory this function will return -ENOMEM. If a mailbox command |
| 16004 | * fails this function will return -ENXIO. Note: on ENXIO, some EQs may |
| 16005 | * have had their delay multipler changed. |
| 16006 | **/ |
| 16007 | void |
| 16008 | lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, |
| 16009 | uint32_t numq, uint32_t usdelay) |
| 16010 | { |
| 16011 | struct lpfc_mbx_modify_eq_delay *eq_delay; |
| 16012 | LPFC_MBOXQ_t *mbox; |
| 16013 | struct lpfc_queue *eq; |
| 16014 | int cnt = 0, rc, length; |
| 16015 | uint32_t shdr_status, shdr_add_status; |
| 16016 | uint32_t dmult; |
| 16017 | int qidx; |
| 16018 | union lpfc_sli4_cfg_shdr *shdr; |
| 16019 | |
| 16020 | if (startq >= phba->cfg_irq_chann) |
| 16021 | return; |
| 16022 | |
| 16023 | if (usdelay > 0xFFFF) { |
| 16024 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME, |
| 16025 | "6429 usdelay %d too large. Scaled down to " |
| 16026 | "0xFFFF.\n" , usdelay); |
| 16027 | usdelay = 0xFFFF; |
| 16028 | } |
| 16029 | |
| 16030 | /* set values by EQ_DELAY register if supported */ |
| 16031 | if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { |
| 16032 | for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { |
| 16033 | eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; |
| 16034 | if (!eq) |
| 16035 | continue; |
| 16036 | |
| 16037 | lpfc_sli4_mod_hba_eq_delay(phba, eq, delay: usdelay); |
| 16038 | |
| 16039 | if (++cnt >= numq) |
| 16040 | break; |
| 16041 | } |
| 16042 | return; |
| 16043 | } |
| 16044 | |
| 16045 | /* Otherwise, set values by mailbox cmd */ |
| 16046 | |
| 16047 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 16048 | if (!mbox) { |
| 16049 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 16050 | "6428 Failed allocating mailbox cmd buffer." |
| 16051 | " EQ delay was not set.\n" ); |
| 16052 | return; |
| 16053 | } |
| 16054 | length = (sizeof(struct lpfc_mbx_modify_eq_delay) - |
| 16055 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 16056 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 16057 | LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, |
| 16058 | length, LPFC_SLI4_MBX_EMBED); |
| 16059 | eq_delay = &mbox->u.mqe.un.eq_delay; |
| 16060 | |
| 16061 | /* Calculate delay multiper from maximum interrupt per second */ |
| 16062 | dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC; |
| 16063 | if (dmult) |
| 16064 | dmult--; |
| 16065 | if (dmult > LPFC_DMULT_MAX) |
| 16066 | dmult = LPFC_DMULT_MAX; |
| 16067 | |
| 16068 | for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { |
| 16069 | eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; |
| 16070 | if (!eq) |
| 16071 | continue; |
| 16072 | eq->q_mode = usdelay; |
| 16073 | eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; |
| 16074 | eq_delay->u.request.eq[cnt].phase = 0; |
| 16075 | eq_delay->u.request.eq[cnt].delay_multi = dmult; |
| 16076 | |
| 16077 | if (++cnt >= numq) |
| 16078 | break; |
| 16079 | } |
| 16080 | eq_delay->u.request.num_eq = cnt; |
| 16081 | |
| 16082 | mbox->vport = phba->pport; |
| 16083 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 16084 | mbox->ctx_ndlp = NULL; |
| 16085 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 16086 | shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; |
| 16087 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 16088 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 16089 | if (shdr_status || shdr_add_status || rc) { |
| 16090 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 16091 | "2512 MODIFY_EQ_DELAY mailbox failed with " |
| 16092 | "status x%x add_status x%x, mbx status x%x\n" , |
| 16093 | shdr_status, shdr_add_status, rc); |
| 16094 | } |
| 16095 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 16096 | return; |
| 16097 | } |
| 16098 | |
| 16099 | /** |
| 16100 | * lpfc_eq_create - Create an Event Queue on the HBA |
| 16101 | * @phba: HBA structure that indicates port to create a queue on. |
| 16102 | * @eq: The queue structure to use to create the event queue. |
| 16103 | * @imax: The maximum interrupt per second limit. |
| 16104 | * |
| 16105 | * This function creates an event queue, as detailed in @eq, on a port, |
| 16106 | * described by @phba by sending an EQ_CREATE mailbox command to the HBA. |
| 16107 | * |
| 16108 | * The @phba struct is used to send mailbox command to HBA. The @eq struct |
| 16109 | * is used to get the entry count and entry size that are necessary to |
| 16110 | * determine the number of pages to allocate and use for this queue. This |
| 16111 | * function will send the EQ_CREATE mailbox command to the HBA to setup the |
| 16112 | * event queue. This function is asynchronous and will wait for the mailbox |
| 16113 | * command to finish before continuing. |
| 16114 | * |
| 16115 | * On success this function will return a zero. If unable to allocate enough |
| 16116 | * memory this function will return -ENOMEM. If the queue create mailbox command |
| 16117 | * fails this function will return -ENXIO. |
| 16118 | **/ |
| 16119 | int |
| 16120 | lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) |
| 16121 | { |
| 16122 | struct lpfc_mbx_eq_create *eq_create; |
| 16123 | LPFC_MBOXQ_t *mbox; |
| 16124 | int rc, length, status = 0; |
| 16125 | struct lpfc_dmabuf *dmabuf; |
| 16126 | uint32_t shdr_status, shdr_add_status; |
| 16127 | union lpfc_sli4_cfg_shdr *shdr; |
| 16128 | uint16_t dmult; |
| 16129 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; |
| 16130 | |
| 16131 | /* sanity check on queue memory */ |
| 16132 | if (!eq) |
| 16133 | return -ENODEV; |
| 16134 | if (!phba->sli4_hba.pc_sli4_params.supported) |
| 16135 | hw_page_size = SLI4_PAGE_SIZE; |
| 16136 | |
| 16137 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 16138 | if (!mbox) |
| 16139 | return -ENOMEM; |
| 16140 | length = (sizeof(struct lpfc_mbx_eq_create) - |
| 16141 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 16142 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 16143 | LPFC_MBOX_OPCODE_EQ_CREATE, |
| 16144 | length, LPFC_SLI4_MBX_EMBED); |
| 16145 | eq_create = &mbox->u.mqe.un.eq_create; |
| 16146 | shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; |
| 16147 | bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, |
| 16148 | eq->page_count); |
| 16149 | bf_set(lpfc_eq_context_size, &eq_create->u.request.context, |
| 16150 | LPFC_EQE_SIZE); |
| 16151 | bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); |
| 16152 | |
| 16153 | /* Use version 2 of CREATE_EQ if eqav is set */ |
| 16154 | if (phba->sli4_hba.pc_sli4_params.eqav) { |
| 16155 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
| 16156 | LPFC_Q_CREATE_VERSION_2); |
| 16157 | bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context, |
| 16158 | phba->sli4_hba.pc_sli4_params.eqav); |
| 16159 | } |
| 16160 | |
| 16161 | /* don't setup delay multiplier using EQ_CREATE */ |
| 16162 | dmult = 0; |
| 16163 | bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, |
| 16164 | dmult); |
| 16165 | switch (eq->entry_count) { |
| 16166 | default: |
| 16167 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 16168 | "0360 Unsupported EQ count. (%d)\n" , |
| 16169 | eq->entry_count); |
| 16170 | if (eq->entry_count < 256) { |
| 16171 | status = -EINVAL; |
| 16172 | goto out; |
| 16173 | } |
| 16174 | fallthrough; /* otherwise default to smallest count */ |
| 16175 | case 256: |
| 16176 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, |
| 16177 | LPFC_EQ_CNT_256); |
| 16178 | break; |
| 16179 | case 512: |
| 16180 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, |
| 16181 | LPFC_EQ_CNT_512); |
| 16182 | break; |
| 16183 | case 1024: |
| 16184 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, |
| 16185 | LPFC_EQ_CNT_1024); |
| 16186 | break; |
| 16187 | case 2048: |
| 16188 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, |
| 16189 | LPFC_EQ_CNT_2048); |
| 16190 | break; |
| 16191 | case 4096: |
| 16192 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, |
| 16193 | LPFC_EQ_CNT_4096); |
| 16194 | break; |
| 16195 | } |
| 16196 | list_for_each_entry(dmabuf, &eq->page_list, list) { |
| 16197 | memset(dmabuf->virt, 0, hw_page_size); |
| 16198 | eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = |
| 16199 | putPaddrLow(dmabuf->phys); |
| 16200 | eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = |
| 16201 | putPaddrHigh(dmabuf->phys); |
| 16202 | } |
| 16203 | mbox->vport = phba->pport; |
| 16204 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 16205 | mbox->ctx_buf = NULL; |
| 16206 | mbox->ctx_ndlp = NULL; |
| 16207 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 16208 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 16209 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 16210 | if (shdr_status || shdr_add_status || rc) { |
| 16211 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 16212 | "2500 EQ_CREATE mailbox failed with " |
| 16213 | "status x%x add_status x%x, mbx status x%x\n" , |
| 16214 | shdr_status, shdr_add_status, rc); |
| 16215 | status = -ENXIO; |
| 16216 | } |
| 16217 | eq->type = LPFC_EQ; |
| 16218 | eq->subtype = LPFC_NONE; |
| 16219 | eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); |
| 16220 | if (eq->queue_id == 0xFFFF) |
| 16221 | status = -ENXIO; |
| 16222 | eq->host_index = 0; |
| 16223 | eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL; |
| 16224 | eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT; |
| 16225 | out: |
| 16226 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 16227 | return status; |
| 16228 | } |
| 16229 | |
| 16230 | /** |
| 16231 | * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler |
| 16232 | * @irq: Interrupt number. |
| 16233 | * @dev_id: The device context pointer. |
| 16234 | * |
| 16235 | * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within |
| 16236 | * threaded irq context. |
| 16237 | * |
| 16238 | * Returns |
| 16239 | * IRQ_HANDLED - interrupt is handled |
| 16240 | * IRQ_NONE - otherwise |
| 16241 | **/ |
| 16242 | irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id) |
| 16243 | { |
| 16244 | struct lpfc_hba *phba; |
| 16245 | struct lpfc_hba_eq_hdl *hba_eq_hdl; |
| 16246 | struct lpfc_queue *fpeq; |
| 16247 | int ecount = 0; |
| 16248 | int hba_eqidx; |
| 16249 | struct lpfc_eq_intr_info *eqi; |
| 16250 | |
| 16251 | /* Get the driver's phba structure from the dev_id */ |
| 16252 | hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; |
| 16253 | phba = hba_eq_hdl->phba; |
| 16254 | hba_eqidx = hba_eq_hdl->idx; |
| 16255 | |
| 16256 | if (unlikely(!phba)) |
| 16257 | return IRQ_NONE; |
| 16258 | if (unlikely(!phba->sli4_hba.hdwq)) |
| 16259 | return IRQ_NONE; |
| 16260 | |
| 16261 | /* Get to the EQ struct associated with this vector */ |
| 16262 | fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; |
| 16263 | if (unlikely(!fpeq)) |
| 16264 | return IRQ_NONE; |
| 16265 | |
| 16266 | eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id()); |
| 16267 | eqi->icnt++; |
| 16268 | |
| 16269 | fpeq->last_cpu = raw_smp_processor_id(); |
| 16270 | |
| 16271 | if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && |
| 16272 | fpeq->q_flag & HBA_EQ_DELAY_CHK && |
| 16273 | phba->cfg_auto_imax && |
| 16274 | fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && |
| 16275 | phba->sli.sli_flag & LPFC_SLI_USE_EQDR) |
| 16276 | lpfc_sli4_mod_hba_eq_delay(phba, eq: fpeq, LPFC_MAX_AUTO_EQ_DELAY); |
| 16277 | |
| 16278 | /* process and rearm the EQ */ |
| 16279 | ecount = lpfc_sli4_process_eq(phba, eq: fpeq, LPFC_QUEUE_REARM, |
| 16280 | poll_mode: LPFC_THREADED_IRQ); |
| 16281 | |
| 16282 | if (unlikely(ecount == 0)) { |
| 16283 | fpeq->EQ_no_entry++; |
| 16284 | if (phba->intr_type == MSIX) |
| 16285 | /* MSI-X treated interrupt served as no EQ share INT */ |
| 16286 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 16287 | "3358 MSI-X interrupt with no EQE\n" ); |
| 16288 | else |
| 16289 | /* Non MSI-X treated on interrupt as EQ share INT */ |
| 16290 | return IRQ_NONE; |
| 16291 | } |
| 16292 | return IRQ_HANDLED; |
| 16293 | } |
| 16294 | |
| 16295 | /** |
| 16296 | * lpfc_cq_create - Create a Completion Queue on the HBA |
| 16297 | * @phba: HBA structure that indicates port to create a queue on. |
| 16298 | * @cq: The queue structure to use to create the completion queue. |
| 16299 | * @eq: The event queue to bind this completion queue to. |
| 16300 | * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc). |
| 16301 | * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). |
| 16302 | * |
| 16303 | * This function creates a completion queue, as detailed in @wq, on a port, |
| 16304 | * described by @phba by sending a CQ_CREATE mailbox command to the HBA. |
| 16305 | * |
| 16306 | * The @phba struct is used to send mailbox command to HBA. The @cq struct |
| 16307 | * is used to get the entry count and entry size that are necessary to |
| 16308 | * determine the number of pages to allocate and use for this queue. The @eq |
| 16309 | * is used to indicate which event queue to bind this completion queue to. This |
| 16310 | * function will send the CQ_CREATE mailbox command to the HBA to setup the |
| 16311 | * completion queue. This function is asynchronous and will wait for the mailbox |
| 16312 | * command to finish before continuing. |
| 16313 | * |
| 16314 | * On success this function will return a zero. If unable to allocate enough |
| 16315 | * memory this function will return -ENOMEM. If the queue create mailbox command |
| 16316 | * fails this function will return -ENXIO. |
| 16317 | **/ |
| 16318 | int |
| 16319 | lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, |
| 16320 | struct lpfc_queue *eq, uint32_t type, uint32_t subtype) |
| 16321 | { |
| 16322 | struct lpfc_mbx_cq_create *cq_create; |
| 16323 | struct lpfc_dmabuf *dmabuf; |
| 16324 | LPFC_MBOXQ_t *mbox; |
| 16325 | int rc, length, status = 0; |
| 16326 | uint32_t shdr_status, shdr_add_status; |
| 16327 | union lpfc_sli4_cfg_shdr *shdr; |
| 16328 | |
| 16329 | /* sanity check on queue memory */ |
| 16330 | if (!cq || !eq) |
| 16331 | return -ENODEV; |
| 16332 | |
| 16333 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 16334 | if (!mbox) |
| 16335 | return -ENOMEM; |
| 16336 | length = (sizeof(struct lpfc_mbx_cq_create) - |
| 16337 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 16338 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 16339 | LPFC_MBOX_OPCODE_CQ_CREATE, |
| 16340 | length, LPFC_SLI4_MBX_EMBED); |
| 16341 | cq_create = &mbox->u.mqe.un.cq_create; |
| 16342 | shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; |
| 16343 | bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, |
| 16344 | cq->page_count); |
| 16345 | bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); |
| 16346 | bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); |
| 16347 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
| 16348 | phba->sli4_hba.pc_sli4_params.cqv); |
| 16349 | if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { |
| 16350 | bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, |
| 16351 | (cq->page_size / SLI4_PAGE_SIZE)); |
| 16352 | bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, |
| 16353 | eq->queue_id); |
| 16354 | bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context, |
| 16355 | phba->sli4_hba.pc_sli4_params.cqav); |
| 16356 | } else { |
| 16357 | bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, |
| 16358 | eq->queue_id); |
| 16359 | } |
| 16360 | switch (cq->entry_count) { |
| 16361 | case 2048: |
| 16362 | case 4096: |
| 16363 | if (phba->sli4_hba.pc_sli4_params.cqv == |
| 16364 | LPFC_Q_CREATE_VERSION_2) { |
| 16365 | cq_create->u.request.context.lpfc_cq_context_count = |
| 16366 | cq->entry_count; |
| 16367 | bf_set(lpfc_cq_context_count, |
| 16368 | &cq_create->u.request.context, |
| 16369 | LPFC_CQ_CNT_WORD7); |
| 16370 | break; |
| 16371 | } |
| 16372 | fallthrough; |
| 16373 | default: |
| 16374 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 16375 | "0361 Unsupported CQ count: " |
| 16376 | "entry cnt %d sz %d pg cnt %d\n" , |
| 16377 | cq->entry_count, cq->entry_size, |
| 16378 | cq->page_count); |
| 16379 | if (cq->entry_count < 256) { |
| 16380 | status = -EINVAL; |
| 16381 | goto out; |
| 16382 | } |
| 16383 | fallthrough; /* otherwise default to smallest count */ |
| 16384 | case 256: |
| 16385 | bf_set(lpfc_cq_context_count, &cq_create->u.request.context, |
| 16386 | LPFC_CQ_CNT_256); |
| 16387 | break; |
| 16388 | case 512: |
| 16389 | bf_set(lpfc_cq_context_count, &cq_create->u.request.context, |
| 16390 | LPFC_CQ_CNT_512); |
| 16391 | break; |
| 16392 | case 1024: |
| 16393 | bf_set(lpfc_cq_context_count, &cq_create->u.request.context, |
| 16394 | LPFC_CQ_CNT_1024); |
| 16395 | break; |
| 16396 | } |
| 16397 | list_for_each_entry(dmabuf, &cq->page_list, list) { |
| 16398 | memset(dmabuf->virt, 0, cq->page_size); |
| 16399 | cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = |
| 16400 | putPaddrLow(dmabuf->phys); |
| 16401 | cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = |
| 16402 | putPaddrHigh(dmabuf->phys); |
| 16403 | } |
| 16404 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 16405 | |
| 16406 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 16407 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 16408 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 16409 | if (shdr_status || shdr_add_status || rc) { |
| 16410 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 16411 | "2501 CQ_CREATE mailbox failed with " |
| 16412 | "status x%x add_status x%x, mbx status x%x\n" , |
| 16413 | shdr_status, shdr_add_status, rc); |
| 16414 | status = -ENXIO; |
| 16415 | goto out; |
| 16416 | } |
| 16417 | cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); |
| 16418 | if (cq->queue_id == 0xFFFF) { |
| 16419 | status = -ENXIO; |
| 16420 | goto out; |
| 16421 | } |
| 16422 | /* link the cq onto the parent eq child list */ |
| 16423 | list_add_tail(new: &cq->list, head: &eq->child_list); |
| 16424 | /* Set up completion queue's type and subtype */ |
| 16425 | cq->type = type; |
| 16426 | cq->subtype = subtype; |
| 16427 | cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); |
| 16428 | cq->assoc_qid = eq->queue_id; |
| 16429 | cq->assoc_qp = eq; |
| 16430 | cq->host_index = 0; |
| 16431 | cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; |
| 16432 | cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); |
| 16433 | |
| 16434 | if (cq->queue_id > phba->sli4_hba.cq_max) |
| 16435 | phba->sli4_hba.cq_max = cq->queue_id; |
| 16436 | out: |
| 16437 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 16438 | return status; |
| 16439 | } |
| 16440 | |
| 16441 | /** |
| 16442 | * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ |
| 16443 | * @phba: HBA structure that indicates port to create a queue on. |
| 16444 | * @cqp: The queue structure array to use to create the completion queues. |
| 16445 | * @hdwq: The hardware queue array with the EQ to bind completion queues to. |
| 16446 | * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc). |
| 16447 | * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). |
| 16448 | * |
| 16449 | * This function creates a set of completion queue, s to support MRQ |
| 16450 | * as detailed in @cqp, on a port, |
| 16451 | * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. |
| 16452 | * |
| 16453 | * The @phba struct is used to send mailbox command to HBA. The @cq struct |
| 16454 | * is used to get the entry count and entry size that are necessary to |
| 16455 | * determine the number of pages to allocate and use for this queue. The @eq |
| 16456 | * is used to indicate which event queue to bind this completion queue to. This |
| 16457 | * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the |
| 16458 | * completion queue. This function is asynchronous and will wait for the mailbox |
| 16459 | * command to finish before continuing. |
| 16460 | * |
| 16461 | * On success this function will return a zero. If unable to allocate enough |
| 16462 | * memory this function will return -ENOMEM. If the queue create mailbox command |
| 16463 | * fails this function will return -ENXIO. |
| 16464 | **/ |
| 16465 | int |
| 16466 | lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, |
| 16467 | struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, |
| 16468 | uint32_t subtype) |
| 16469 | { |
| 16470 | struct lpfc_queue *cq; |
| 16471 | struct lpfc_queue *eq; |
| 16472 | struct lpfc_mbx_cq_create_set *cq_set; |
| 16473 | struct lpfc_dmabuf *dmabuf; |
| 16474 | LPFC_MBOXQ_t *mbox; |
| 16475 | int rc, length, alloclen, status = 0; |
| 16476 | int cnt, idx, numcq, page_idx = 0; |
| 16477 | uint32_t shdr_status, shdr_add_status; |
| 16478 | union lpfc_sli4_cfg_shdr *shdr; |
| 16479 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; |
| 16480 | |
| 16481 | /* sanity check on queue memory */ |
| 16482 | numcq = phba->cfg_nvmet_mrq; |
| 16483 | if (!cqp || !hdwq || !numcq) |
| 16484 | return -ENODEV; |
| 16485 | |
| 16486 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 16487 | if (!mbox) |
| 16488 | return -ENOMEM; |
| 16489 | |
| 16490 | length = sizeof(struct lpfc_mbx_cq_create_set); |
| 16491 | length += ((numcq * cqp[0]->page_count) * |
| 16492 | sizeof(struct dma_address)); |
| 16493 | alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
| 16494 | LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, |
| 16495 | LPFC_SLI4_MBX_NEMBED); |
| 16496 | if (alloclen < length) { |
| 16497 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 16498 | "3098 Allocated DMA memory size (%d) is " |
| 16499 | "less than the requested DMA memory size " |
| 16500 | "(%d)\n" , alloclen, length); |
| 16501 | status = -ENOMEM; |
| 16502 | goto out; |
| 16503 | } |
| 16504 | cq_set = mbox->sge_array->addr[0]; |
| 16505 | shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; |
| 16506 | bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); |
| 16507 | |
| 16508 | for (idx = 0; idx < numcq; idx++) { |
| 16509 | cq = cqp[idx]; |
| 16510 | eq = hdwq[idx].hba_eq; |
| 16511 | if (!cq || !eq) { |
| 16512 | status = -ENOMEM; |
| 16513 | goto out; |
| 16514 | } |
| 16515 | if (!phba->sli4_hba.pc_sli4_params.supported) |
| 16516 | hw_page_size = cq->page_size; |
| 16517 | |
| 16518 | switch (idx) { |
| 16519 | case 0: |
| 16520 | bf_set(lpfc_mbx_cq_create_set_page_size, |
| 16521 | &cq_set->u.request, |
| 16522 | (hw_page_size / SLI4_PAGE_SIZE)); |
| 16523 | bf_set(lpfc_mbx_cq_create_set_num_pages, |
| 16524 | &cq_set->u.request, cq->page_count); |
| 16525 | bf_set(lpfc_mbx_cq_create_set_evt, |
| 16526 | &cq_set->u.request, 1); |
| 16527 | bf_set(lpfc_mbx_cq_create_set_valid, |
| 16528 | &cq_set->u.request, 1); |
| 16529 | bf_set(lpfc_mbx_cq_create_set_cqe_size, |
| 16530 | &cq_set->u.request, 0); |
| 16531 | bf_set(lpfc_mbx_cq_create_set_num_cq, |
| 16532 | &cq_set->u.request, numcq); |
| 16533 | bf_set(lpfc_mbx_cq_create_set_autovalid, |
| 16534 | &cq_set->u.request, |
| 16535 | phba->sli4_hba.pc_sli4_params.cqav); |
| 16536 | switch (cq->entry_count) { |
| 16537 | case 2048: |
| 16538 | case 4096: |
| 16539 | if (phba->sli4_hba.pc_sli4_params.cqv == |
| 16540 | LPFC_Q_CREATE_VERSION_2) { |
| 16541 | bf_set(lpfc_mbx_cq_create_set_cqe_cnt_lo, |
| 16542 | &cq_set->u.request, |
| 16543 | cq->entry_count); |
| 16544 | bf_set(lpfc_mbx_cq_create_set_cqecnt, |
| 16545 | &cq_set->u.request, |
| 16546 | LPFC_CQ_CNT_WORD7); |
| 16547 | break; |
| 16548 | } |
| 16549 | fallthrough; |
| 16550 | default: |
| 16551 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 16552 | "3118 Bad CQ count. (%d)\n" , |
| 16553 | cq->entry_count); |
| 16554 | if (cq->entry_count < 256) { |
| 16555 | status = -EINVAL; |
| 16556 | goto out; |
| 16557 | } |
| 16558 | fallthrough; /* otherwise default to smallest */ |
| 16559 | case 256: |
| 16560 | bf_set(lpfc_mbx_cq_create_set_cqecnt, |
| 16561 | &cq_set->u.request, LPFC_CQ_CNT_256); |
| 16562 | break; |
| 16563 | case 512: |
| 16564 | bf_set(lpfc_mbx_cq_create_set_cqecnt, |
| 16565 | &cq_set->u.request, LPFC_CQ_CNT_512); |
| 16566 | break; |
| 16567 | case 1024: |
| 16568 | bf_set(lpfc_mbx_cq_create_set_cqecnt, |
| 16569 | &cq_set->u.request, LPFC_CQ_CNT_1024); |
| 16570 | break; |
| 16571 | } |
| 16572 | bf_set(lpfc_mbx_cq_create_set_eq_id0, |
| 16573 | &cq_set->u.request, eq->queue_id); |
| 16574 | break; |
| 16575 | case 1: |
| 16576 | bf_set(lpfc_mbx_cq_create_set_eq_id1, |
| 16577 | &cq_set->u.request, eq->queue_id); |
| 16578 | break; |
| 16579 | case 2: |
| 16580 | bf_set(lpfc_mbx_cq_create_set_eq_id2, |
| 16581 | &cq_set->u.request, eq->queue_id); |
| 16582 | break; |
| 16583 | case 3: |
| 16584 | bf_set(lpfc_mbx_cq_create_set_eq_id3, |
| 16585 | &cq_set->u.request, eq->queue_id); |
| 16586 | break; |
| 16587 | case 4: |
| 16588 | bf_set(lpfc_mbx_cq_create_set_eq_id4, |
| 16589 | &cq_set->u.request, eq->queue_id); |
| 16590 | break; |
| 16591 | case 5: |
| 16592 | bf_set(lpfc_mbx_cq_create_set_eq_id5, |
| 16593 | &cq_set->u.request, eq->queue_id); |
| 16594 | break; |
| 16595 | case 6: |
| 16596 | bf_set(lpfc_mbx_cq_create_set_eq_id6, |
| 16597 | &cq_set->u.request, eq->queue_id); |
| 16598 | break; |
| 16599 | case 7: |
| 16600 | bf_set(lpfc_mbx_cq_create_set_eq_id7, |
| 16601 | &cq_set->u.request, eq->queue_id); |
| 16602 | break; |
| 16603 | case 8: |
| 16604 | bf_set(lpfc_mbx_cq_create_set_eq_id8, |
| 16605 | &cq_set->u.request, eq->queue_id); |
| 16606 | break; |
| 16607 | case 9: |
| 16608 | bf_set(lpfc_mbx_cq_create_set_eq_id9, |
| 16609 | &cq_set->u.request, eq->queue_id); |
| 16610 | break; |
| 16611 | case 10: |
| 16612 | bf_set(lpfc_mbx_cq_create_set_eq_id10, |
| 16613 | &cq_set->u.request, eq->queue_id); |
| 16614 | break; |
| 16615 | case 11: |
| 16616 | bf_set(lpfc_mbx_cq_create_set_eq_id11, |
| 16617 | &cq_set->u.request, eq->queue_id); |
| 16618 | break; |
| 16619 | case 12: |
| 16620 | bf_set(lpfc_mbx_cq_create_set_eq_id12, |
| 16621 | &cq_set->u.request, eq->queue_id); |
| 16622 | break; |
| 16623 | case 13: |
| 16624 | bf_set(lpfc_mbx_cq_create_set_eq_id13, |
| 16625 | &cq_set->u.request, eq->queue_id); |
| 16626 | break; |
| 16627 | case 14: |
| 16628 | bf_set(lpfc_mbx_cq_create_set_eq_id14, |
| 16629 | &cq_set->u.request, eq->queue_id); |
| 16630 | break; |
| 16631 | case 15: |
| 16632 | bf_set(lpfc_mbx_cq_create_set_eq_id15, |
| 16633 | &cq_set->u.request, eq->queue_id); |
| 16634 | break; |
| 16635 | } |
| 16636 | |
| 16637 | /* link the cq onto the parent eq child list */ |
| 16638 | list_add_tail(new: &cq->list, head: &eq->child_list); |
| 16639 | /* Set up completion queue's type and subtype */ |
| 16640 | cq->type = type; |
| 16641 | cq->subtype = subtype; |
| 16642 | cq->assoc_qid = eq->queue_id; |
| 16643 | cq->assoc_qp = eq; |
| 16644 | cq->host_index = 0; |
| 16645 | cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; |
| 16646 | cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, |
| 16647 | cq->entry_count); |
| 16648 | cq->chann = idx; |
| 16649 | |
| 16650 | rc = 0; |
| 16651 | list_for_each_entry(dmabuf, &cq->page_list, list) { |
| 16652 | memset(dmabuf->virt, 0, hw_page_size); |
| 16653 | cnt = page_idx + dmabuf->buffer_tag; |
| 16654 | cq_set->u.request.page[cnt].addr_lo = |
| 16655 | putPaddrLow(dmabuf->phys); |
| 16656 | cq_set->u.request.page[cnt].addr_hi = |
| 16657 | putPaddrHigh(dmabuf->phys); |
| 16658 | rc++; |
| 16659 | } |
| 16660 | page_idx += rc; |
| 16661 | } |
| 16662 | |
| 16663 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 16664 | |
| 16665 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 16666 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 16667 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 16668 | if (shdr_status || shdr_add_status || rc) { |
| 16669 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 16670 | "3119 CQ_CREATE_SET mailbox failed with " |
| 16671 | "status x%x add_status x%x, mbx status x%x\n" , |
| 16672 | shdr_status, shdr_add_status, rc); |
| 16673 | status = -ENXIO; |
| 16674 | goto out; |
| 16675 | } |
| 16676 | rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); |
| 16677 | if (rc == 0xFFFF) { |
| 16678 | status = -ENXIO; |
| 16679 | goto out; |
| 16680 | } |
| 16681 | |
| 16682 | for (idx = 0; idx < numcq; idx++) { |
| 16683 | cq = cqp[idx]; |
| 16684 | cq->queue_id = rc + idx; |
| 16685 | if (cq->queue_id > phba->sli4_hba.cq_max) |
| 16686 | phba->sli4_hba.cq_max = cq->queue_id; |
| 16687 | } |
| 16688 | |
| 16689 | out: |
| 16690 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
| 16691 | return status; |
| 16692 | } |
| 16693 | |
| 16694 | /** |
| 16695 | * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration |
| 16696 | * @phba: HBA structure that indicates port to create a queue on. |
| 16697 | * @mq: The queue structure to use to create the mailbox queue. |
| 16698 | * @mbox: An allocated pointer to type LPFC_MBOXQ_t |
| 16699 | * @cq: The completion queue to associate with this cq. |
| 16700 | * |
| 16701 | * This function provides failback (fb) functionality when the |
| 16702 | * mq_create_ext fails on older FW generations. It's purpose is identical |
| 16703 | * to mq_create_ext otherwise. |
| 16704 | * |
| 16705 | * This routine cannot fail as all attributes were previously accessed and |
| 16706 | * initialized in mq_create_ext. |
| 16707 | **/ |
| 16708 | static void |
| 16709 | lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, |
| 16710 | LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) |
| 16711 | { |
| 16712 | struct lpfc_mbx_mq_create *mq_create; |
| 16713 | struct lpfc_dmabuf *dmabuf; |
| 16714 | int length; |
| 16715 | |
| 16716 | length = (sizeof(struct lpfc_mbx_mq_create) - |
| 16717 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 16718 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 16719 | LPFC_MBOX_OPCODE_MQ_CREATE, |
| 16720 | length, LPFC_SLI4_MBX_EMBED); |
| 16721 | mq_create = &mbox->u.mqe.un.mq_create; |
| 16722 | bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, |
| 16723 | mq->page_count); |
| 16724 | bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, |
| 16725 | cq->queue_id); |
| 16726 | bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); |
| 16727 | switch (mq->entry_count) { |
| 16728 | case 16: |
| 16729 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
| 16730 | LPFC_MQ_RING_SIZE_16); |
| 16731 | break; |
| 16732 | case 32: |
| 16733 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
| 16734 | LPFC_MQ_RING_SIZE_32); |
| 16735 | break; |
| 16736 | case 64: |
| 16737 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
| 16738 | LPFC_MQ_RING_SIZE_64); |
| 16739 | break; |
| 16740 | case 128: |
| 16741 | bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, |
| 16742 | LPFC_MQ_RING_SIZE_128); |
| 16743 | break; |
| 16744 | } |
| 16745 | list_for_each_entry(dmabuf, &mq->page_list, list) { |
| 16746 | mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = |
| 16747 | putPaddrLow(dmabuf->phys); |
| 16748 | mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = |
| 16749 | putPaddrHigh(dmabuf->phys); |
| 16750 | } |
| 16751 | } |
| 16752 | |
| 16753 | /** |
| 16754 | * lpfc_mq_create - Create a mailbox Queue on the HBA |
| 16755 | * @phba: HBA structure that indicates port to create a queue on. |
| 16756 | * @mq: The queue structure to use to create the mailbox queue. |
| 16757 | * @cq: The completion queue to associate with this cq. |
| 16758 | * @subtype: The queue's subtype. |
| 16759 | * |
| 16760 | * This function creates a mailbox queue, as detailed in @mq, on a port, |
| 16761 | * described by @phba by sending a MQ_CREATE mailbox command to the HBA. |
| 16762 | * |
| 16763 | * The @phba struct is used to send mailbox command to HBA. The @cq struct |
| 16764 | * is used to get the entry count and entry size that are necessary to |
| 16765 | * determine the number of pages to allocate and use for this queue. This |
| 16766 | * function will send the MQ_CREATE mailbox command to the HBA to setup the |
| 16767 | * mailbox queue. This function is asynchronous and will wait for the mailbox |
| 16768 | * command to finish before continuing. |
| 16769 | * |
| 16770 | * On success this function will return a zero. If unable to allocate enough |
| 16771 | * memory this function will return -ENOMEM. If the queue create mailbox command |
| 16772 | * fails this function will return -ENXIO. |
| 16773 | **/ |
| 16774 | int32_t |
| 16775 | lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, |
| 16776 | struct lpfc_queue *cq, uint32_t subtype) |
| 16777 | { |
| 16778 | struct lpfc_mbx_mq_create *mq_create; |
| 16779 | struct lpfc_mbx_mq_create_ext *mq_create_ext; |
| 16780 | struct lpfc_dmabuf *dmabuf; |
| 16781 | LPFC_MBOXQ_t *mbox; |
| 16782 | int rc, length, status = 0; |
| 16783 | uint32_t shdr_status, shdr_add_status; |
| 16784 | union lpfc_sli4_cfg_shdr *shdr; |
| 16785 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; |
| 16786 | |
| 16787 | /* sanity check on queue memory */ |
| 16788 | if (!mq || !cq) |
| 16789 | return -ENODEV; |
| 16790 | if (!phba->sli4_hba.pc_sli4_params.supported) |
| 16791 | hw_page_size = SLI4_PAGE_SIZE; |
| 16792 | |
| 16793 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 16794 | if (!mbox) |
| 16795 | return -ENOMEM; |
| 16796 | length = (sizeof(struct lpfc_mbx_mq_create_ext) - |
| 16797 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 16798 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 16799 | LPFC_MBOX_OPCODE_MQ_CREATE_EXT, |
| 16800 | length, LPFC_SLI4_MBX_EMBED); |
| 16801 | |
| 16802 | mq_create_ext = &mbox->u.mqe.un.mq_create_ext; |
| 16803 | shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; |
| 16804 | bf_set(lpfc_mbx_mq_create_ext_num_pages, |
| 16805 | &mq_create_ext->u.request, mq->page_count); |
| 16806 | bf_set(lpfc_mbx_mq_create_ext_async_evt_link, |
| 16807 | &mq_create_ext->u.request, 1); |
| 16808 | bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, |
| 16809 | &mq_create_ext->u.request, 1); |
| 16810 | bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, |
| 16811 | &mq_create_ext->u.request, 1); |
| 16812 | bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, |
| 16813 | &mq_create_ext->u.request, 1); |
| 16814 | bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, |
| 16815 | &mq_create_ext->u.request, 1); |
| 16816 | bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); |
| 16817 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
| 16818 | phba->sli4_hba.pc_sli4_params.mqv); |
| 16819 | if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) |
| 16820 | bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, |
| 16821 | cq->queue_id); |
| 16822 | else |
| 16823 | bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, |
| 16824 | cq->queue_id); |
| 16825 | switch (mq->entry_count) { |
| 16826 | default: |
| 16827 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 16828 | "0362 Unsupported MQ count. (%d)\n" , |
| 16829 | mq->entry_count); |
| 16830 | if (mq->entry_count < 16) { |
| 16831 | status = -EINVAL; |
| 16832 | goto out; |
| 16833 | } |
| 16834 | fallthrough; /* otherwise default to smallest count */ |
| 16835 | case 16: |
| 16836 | bf_set(lpfc_mq_context_ring_size, |
| 16837 | &mq_create_ext->u.request.context, |
| 16838 | LPFC_MQ_RING_SIZE_16); |
| 16839 | break; |
| 16840 | case 32: |
| 16841 | bf_set(lpfc_mq_context_ring_size, |
| 16842 | &mq_create_ext->u.request.context, |
| 16843 | LPFC_MQ_RING_SIZE_32); |
| 16844 | break; |
| 16845 | case 64: |
| 16846 | bf_set(lpfc_mq_context_ring_size, |
| 16847 | &mq_create_ext->u.request.context, |
| 16848 | LPFC_MQ_RING_SIZE_64); |
| 16849 | break; |
| 16850 | case 128: |
| 16851 | bf_set(lpfc_mq_context_ring_size, |
| 16852 | &mq_create_ext->u.request.context, |
| 16853 | LPFC_MQ_RING_SIZE_128); |
| 16854 | break; |
| 16855 | } |
| 16856 | list_for_each_entry(dmabuf, &mq->page_list, list) { |
| 16857 | memset(dmabuf->virt, 0, hw_page_size); |
| 16858 | mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = |
| 16859 | putPaddrLow(dmabuf->phys); |
| 16860 | mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = |
| 16861 | putPaddrHigh(dmabuf->phys); |
| 16862 | } |
| 16863 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 16864 | mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, |
| 16865 | &mq_create_ext->u.response); |
| 16866 | if (rc != MBX_SUCCESS) { |
| 16867 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 16868 | "2795 MQ_CREATE_EXT failed with " |
| 16869 | "status x%x. Failback to MQ_CREATE.\n" , |
| 16870 | rc); |
| 16871 | lpfc_mq_create_fb_init(phba, mq, mbox, cq); |
| 16872 | mq_create = &mbox->u.mqe.un.mq_create; |
| 16873 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 16874 | shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; |
| 16875 | mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, |
| 16876 | &mq_create->u.response); |
| 16877 | } |
| 16878 | |
| 16879 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 16880 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 16881 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 16882 | if (shdr_status || shdr_add_status || rc) { |
| 16883 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 16884 | "2502 MQ_CREATE mailbox failed with " |
| 16885 | "status x%x add_status x%x, mbx status x%x\n" , |
| 16886 | shdr_status, shdr_add_status, rc); |
| 16887 | status = -ENXIO; |
| 16888 | goto out; |
| 16889 | } |
| 16890 | if (mq->queue_id == 0xFFFF) { |
| 16891 | status = -ENXIO; |
| 16892 | goto out; |
| 16893 | } |
| 16894 | mq->type = LPFC_MQ; |
| 16895 | mq->assoc_qid = cq->queue_id; |
| 16896 | mq->subtype = subtype; |
| 16897 | mq->host_index = 0; |
| 16898 | mq->hba_index = 0; |
| 16899 | |
| 16900 | /* link the mq onto the parent cq child list */ |
| 16901 | list_add_tail(new: &mq->list, head: &cq->child_list); |
| 16902 | out: |
| 16903 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 16904 | return status; |
| 16905 | } |
| 16906 | |
| 16907 | /** |
| 16908 | * lpfc_wq_create - Create a Work Queue on the HBA |
| 16909 | * @phba: HBA structure that indicates port to create a queue on. |
| 16910 | * @wq: The queue structure to use to create the work queue. |
| 16911 | * @cq: The completion queue to bind this work queue to. |
| 16912 | * @subtype: The subtype of the work queue indicating its functionality. |
| 16913 | * |
| 16914 | * This function creates a work queue, as detailed in @wq, on a port, described |
| 16915 | * by @phba by sending a WQ_CREATE mailbox command to the HBA. |
| 16916 | * |
| 16917 | * The @phba struct is used to send mailbox command to HBA. The @wq struct |
| 16918 | * is used to get the entry count and entry size that are necessary to |
| 16919 | * determine the number of pages to allocate and use for this queue. The @cq |
| 16920 | * is used to indicate which completion queue to bind this work queue to. This |
| 16921 | * function will send the WQ_CREATE mailbox command to the HBA to setup the |
| 16922 | * work queue. This function is asynchronous and will wait for the mailbox |
| 16923 | * command to finish before continuing. |
| 16924 | * |
| 16925 | * On success this function will return a zero. If unable to allocate enough |
| 16926 | * memory this function will return -ENOMEM. If the queue create mailbox command |
| 16927 | * fails this function will return -ENXIO. |
| 16928 | **/ |
| 16929 | int |
| 16930 | lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, |
| 16931 | struct lpfc_queue *cq, uint32_t subtype) |
| 16932 | { |
| 16933 | struct lpfc_mbx_wq_create *wq_create; |
| 16934 | struct lpfc_dmabuf *dmabuf; |
| 16935 | LPFC_MBOXQ_t *mbox; |
| 16936 | int rc, length, status = 0; |
| 16937 | uint32_t shdr_status, shdr_add_status; |
| 16938 | union lpfc_sli4_cfg_shdr *shdr; |
| 16939 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; |
| 16940 | struct dma_address *page; |
| 16941 | void __iomem *bar_memmap_p; |
| 16942 | uint32_t db_offset; |
| 16943 | uint16_t pci_barset; |
| 16944 | uint8_t dpp_barset; |
| 16945 | uint32_t dpp_offset; |
| 16946 | uint8_t wq_create_version; |
| 16947 | #ifdef CONFIG_X86 |
| 16948 | unsigned long pg_addr; |
| 16949 | #endif |
| 16950 | |
| 16951 | /* sanity check on queue memory */ |
| 16952 | if (!wq || !cq) |
| 16953 | return -ENODEV; |
| 16954 | if (!phba->sli4_hba.pc_sli4_params.supported) |
| 16955 | hw_page_size = wq->page_size; |
| 16956 | |
| 16957 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 16958 | if (!mbox) |
| 16959 | return -ENOMEM; |
| 16960 | length = (sizeof(struct lpfc_mbx_wq_create) - |
| 16961 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 16962 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
| 16963 | LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, |
| 16964 | length, LPFC_SLI4_MBX_EMBED); |
| 16965 | wq_create = &mbox->u.mqe.un.wq_create; |
| 16966 | shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; |
| 16967 | bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, |
| 16968 | wq->page_count); |
| 16969 | bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, |
| 16970 | cq->queue_id); |
| 16971 | |
| 16972 | /* wqv is the earliest version supported, NOT the latest */ |
| 16973 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
| 16974 | phba->sli4_hba.pc_sli4_params.wqv); |
| 16975 | |
| 16976 | if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) || |
| 16977 | (wq->page_size > SLI4_PAGE_SIZE)) |
| 16978 | wq_create_version = LPFC_Q_CREATE_VERSION_1; |
| 16979 | else |
| 16980 | wq_create_version = LPFC_Q_CREATE_VERSION_0; |
| 16981 | |
| 16982 | switch (wq_create_version) { |
| 16983 | case LPFC_Q_CREATE_VERSION_1: |
| 16984 | bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, |
| 16985 | wq->entry_count); |
| 16986 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
| 16987 | LPFC_Q_CREATE_VERSION_1); |
| 16988 | |
| 16989 | switch (wq->entry_size) { |
| 16990 | default: |
| 16991 | case 64: |
| 16992 | bf_set(lpfc_mbx_wq_create_wqe_size, |
| 16993 | &wq_create->u.request_1, |
| 16994 | LPFC_WQ_WQE_SIZE_64); |
| 16995 | break; |
| 16996 | case 128: |
| 16997 | bf_set(lpfc_mbx_wq_create_wqe_size, |
| 16998 | &wq_create->u.request_1, |
| 16999 | LPFC_WQ_WQE_SIZE_128); |
| 17000 | break; |
| 17001 | } |
| 17002 | /* Request DPP by default */ |
| 17003 | bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); |
| 17004 | bf_set(lpfc_mbx_wq_create_page_size, |
| 17005 | &wq_create->u.request_1, |
| 17006 | (wq->page_size / SLI4_PAGE_SIZE)); |
| 17007 | page = wq_create->u.request_1.page; |
| 17008 | break; |
| 17009 | default: |
| 17010 | page = wq_create->u.request.page; |
| 17011 | break; |
| 17012 | } |
| 17013 | |
| 17014 | list_for_each_entry(dmabuf, &wq->page_list, list) { |
| 17015 | memset(dmabuf->virt, 0, hw_page_size); |
| 17016 | page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); |
| 17017 | page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); |
| 17018 | } |
| 17019 | |
| 17020 | if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) |
| 17021 | bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); |
| 17022 | |
| 17023 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 17024 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 17025 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 17026 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 17027 | if (shdr_status || shdr_add_status || rc) { |
| 17028 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17029 | "2503 WQ_CREATE mailbox failed with " |
| 17030 | "status x%x add_status x%x, mbx status x%x\n" , |
| 17031 | shdr_status, shdr_add_status, rc); |
| 17032 | status = -ENXIO; |
| 17033 | goto out; |
| 17034 | } |
| 17035 | |
| 17036 | if (wq_create_version == LPFC_Q_CREATE_VERSION_0) |
| 17037 | wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, |
| 17038 | &wq_create->u.response); |
| 17039 | else |
| 17040 | wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, |
| 17041 | &wq_create->u.response_1); |
| 17042 | |
| 17043 | if (wq->queue_id == 0xFFFF) { |
| 17044 | status = -ENXIO; |
| 17045 | goto out; |
| 17046 | } |
| 17047 | |
| 17048 | wq->db_format = LPFC_DB_LIST_FORMAT; |
| 17049 | if (wq_create_version == LPFC_Q_CREATE_VERSION_0) { |
| 17050 | if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { |
| 17051 | wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, |
| 17052 | &wq_create->u.response); |
| 17053 | if ((wq->db_format != LPFC_DB_LIST_FORMAT) && |
| 17054 | (wq->db_format != LPFC_DB_RING_FORMAT)) { |
| 17055 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17056 | "3265 WQ[%d] doorbell format " |
| 17057 | "not supported: x%x\n" , |
| 17058 | wq->queue_id, wq->db_format); |
| 17059 | status = -EINVAL; |
| 17060 | goto out; |
| 17061 | } |
| 17062 | pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, |
| 17063 | &wq_create->u.response); |
| 17064 | bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, |
| 17065 | pci_barset); |
| 17066 | if (!bar_memmap_p) { |
| 17067 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17068 | "3263 WQ[%d] failed to memmap " |
| 17069 | "pci barset:x%x\n" , |
| 17070 | wq->queue_id, pci_barset); |
| 17071 | status = -ENOMEM; |
| 17072 | goto out; |
| 17073 | } |
| 17074 | db_offset = wq_create->u.response.doorbell_offset; |
| 17075 | if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && |
| 17076 | (db_offset != LPFC_ULP1_WQ_DOORBELL)) { |
| 17077 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17078 | "3252 WQ[%d] doorbell offset " |
| 17079 | "not supported: x%x\n" , |
| 17080 | wq->queue_id, db_offset); |
| 17081 | status = -EINVAL; |
| 17082 | goto out; |
| 17083 | } |
| 17084 | wq->db_regaddr = bar_memmap_p + db_offset; |
| 17085 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 17086 | "3264 WQ[%d]: barset:x%x, offset:x%x, " |
| 17087 | "format:x%x\n" , wq->queue_id, |
| 17088 | pci_barset, db_offset, wq->db_format); |
| 17089 | } else |
| 17090 | wq->db_regaddr = phba->sli4_hba.WQDBregaddr; |
| 17091 | } else { |
| 17092 | /* Check if DPP was honored by the firmware */ |
| 17093 | wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, |
| 17094 | &wq_create->u.response_1); |
| 17095 | if (wq->dpp_enable) { |
| 17096 | pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set, |
| 17097 | &wq_create->u.response_1); |
| 17098 | bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, |
| 17099 | pci_barset); |
| 17100 | if (!bar_memmap_p) { |
| 17101 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17102 | "3267 WQ[%d] failed to memmap " |
| 17103 | "pci barset:x%x\n" , |
| 17104 | wq->queue_id, pci_barset); |
| 17105 | status = -ENOMEM; |
| 17106 | goto out; |
| 17107 | } |
| 17108 | db_offset = wq_create->u.response_1.doorbell_offset; |
| 17109 | wq->db_regaddr = bar_memmap_p + db_offset; |
| 17110 | wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, |
| 17111 | &wq_create->u.response_1); |
| 17112 | dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar, |
| 17113 | &wq_create->u.response_1); |
| 17114 | bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, |
| 17115 | pci_barset: dpp_barset); |
| 17116 | if (!bar_memmap_p) { |
| 17117 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17118 | "3268 WQ[%d] failed to memmap " |
| 17119 | "pci barset:x%x\n" , |
| 17120 | wq->queue_id, dpp_barset); |
| 17121 | status = -ENOMEM; |
| 17122 | goto out; |
| 17123 | } |
| 17124 | dpp_offset = wq_create->u.response_1.dpp_offset; |
| 17125 | wq->dpp_regaddr = bar_memmap_p + dpp_offset; |
| 17126 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 17127 | "3271 WQ[%d]: barset:x%x, offset:x%x, " |
| 17128 | "dpp_id:x%x dpp_barset:x%x " |
| 17129 | "dpp_offset:x%x\n" , |
| 17130 | wq->queue_id, pci_barset, db_offset, |
| 17131 | wq->dpp_id, dpp_barset, dpp_offset); |
| 17132 | |
| 17133 | #ifdef CONFIG_X86 |
| 17134 | /* Enable combined writes for DPP aperture */ |
| 17135 | pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; |
| 17136 | rc = set_memory_wc(addr: pg_addr, numpages: 1); |
| 17137 | if (rc) { |
| 17138 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 17139 | "3272 Cannot setup Combined " |
| 17140 | "Write on WQ[%d] - disable DPP\n" , |
| 17141 | wq->queue_id); |
| 17142 | phba->cfg_enable_dpp = 0; |
| 17143 | } |
| 17144 | #else |
| 17145 | phba->cfg_enable_dpp = 0; |
| 17146 | #endif |
| 17147 | } else |
| 17148 | wq->db_regaddr = phba->sli4_hba.WQDBregaddr; |
| 17149 | } |
| 17150 | wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); |
| 17151 | if (wq->pring == NULL) { |
| 17152 | status = -ENOMEM; |
| 17153 | goto out; |
| 17154 | } |
| 17155 | wq->type = LPFC_WQ; |
| 17156 | wq->assoc_qid = cq->queue_id; |
| 17157 | wq->subtype = subtype; |
| 17158 | wq->host_index = 0; |
| 17159 | wq->hba_index = 0; |
| 17160 | wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; |
| 17161 | |
| 17162 | /* link the wq onto the parent cq child list */ |
| 17163 | list_add_tail(new: &wq->list, head: &cq->child_list); |
| 17164 | out: |
| 17165 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 17166 | return status; |
| 17167 | } |
| 17168 | |
| 17169 | /** |
| 17170 | * lpfc_rq_create - Create a Receive Queue on the HBA |
| 17171 | * @phba: HBA structure that indicates port to create a queue on. |
| 17172 | * @hrq: The queue structure to use to create the header receive queue. |
| 17173 | * @drq: The queue structure to use to create the data receive queue. |
| 17174 | * @cq: The completion queue to bind this work queue to. |
| 17175 | * @subtype: The subtype of the work queue indicating its functionality. |
| 17176 | * |
| 17177 | * This function creates a receive buffer queue pair , as detailed in @hrq and |
| 17178 | * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command |
| 17179 | * to the HBA. |
| 17180 | * |
| 17181 | * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq |
| 17182 | * struct is used to get the entry count that is necessary to determine the |
| 17183 | * number of pages to use for this queue. The @cq is used to indicate which |
| 17184 | * completion queue to bind received buffers that are posted to these queues to. |
| 17185 | * This function will send the RQ_CREATE mailbox command to the HBA to setup the |
| 17186 | * receive queue pair. This function is asynchronous and will wait for the |
| 17187 | * mailbox command to finish before continuing. |
| 17188 | * |
| 17189 | * On success this function will return a zero. If unable to allocate enough |
| 17190 | * memory this function will return -ENOMEM. If the queue create mailbox command |
| 17191 | * fails this function will return -ENXIO. |
| 17192 | **/ |
| 17193 | int |
| 17194 | lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, |
| 17195 | struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) |
| 17196 | { |
| 17197 | struct lpfc_mbx_rq_create *rq_create; |
| 17198 | struct lpfc_dmabuf *dmabuf; |
| 17199 | LPFC_MBOXQ_t *mbox; |
| 17200 | int rc, length, status = 0; |
| 17201 | uint32_t shdr_status, shdr_add_status; |
| 17202 | union lpfc_sli4_cfg_shdr *shdr; |
| 17203 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; |
| 17204 | void __iomem *bar_memmap_p; |
| 17205 | uint32_t db_offset; |
| 17206 | uint16_t pci_barset; |
| 17207 | |
| 17208 | /* sanity check on queue memory */ |
| 17209 | if (!hrq || !drq || !cq) |
| 17210 | return -ENODEV; |
| 17211 | if (!phba->sli4_hba.pc_sli4_params.supported) |
| 17212 | hw_page_size = SLI4_PAGE_SIZE; |
| 17213 | |
| 17214 | if (hrq->entry_count != drq->entry_count) |
| 17215 | return -EINVAL; |
| 17216 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 17217 | if (!mbox) |
| 17218 | return -ENOMEM; |
| 17219 | length = (sizeof(struct lpfc_mbx_rq_create) - |
| 17220 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 17221 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
| 17222 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, |
| 17223 | length, LPFC_SLI4_MBX_EMBED); |
| 17224 | rq_create = &mbox->u.mqe.un.rq_create; |
| 17225 | shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; |
| 17226 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
| 17227 | phba->sli4_hba.pc_sli4_params.rqv); |
| 17228 | if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { |
| 17229 | bf_set(lpfc_rq_context_rqe_count_1, |
| 17230 | &rq_create->u.request.context, |
| 17231 | hrq->entry_count); |
| 17232 | rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; |
| 17233 | bf_set(lpfc_rq_context_rqe_size, |
| 17234 | &rq_create->u.request.context, |
| 17235 | LPFC_RQE_SIZE_8); |
| 17236 | bf_set(lpfc_rq_context_page_size, |
| 17237 | &rq_create->u.request.context, |
| 17238 | LPFC_RQ_PAGE_SIZE_4096); |
| 17239 | } else { |
| 17240 | switch (hrq->entry_count) { |
| 17241 | default: |
| 17242 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17243 | "2535 Unsupported RQ count. (%d)\n" , |
| 17244 | hrq->entry_count); |
| 17245 | if (hrq->entry_count < 512) { |
| 17246 | status = -EINVAL; |
| 17247 | goto out; |
| 17248 | } |
| 17249 | fallthrough; /* otherwise default to smallest count */ |
| 17250 | case 512: |
| 17251 | bf_set(lpfc_rq_context_rqe_count, |
| 17252 | &rq_create->u.request.context, |
| 17253 | LPFC_RQ_RING_SIZE_512); |
| 17254 | break; |
| 17255 | case 1024: |
| 17256 | bf_set(lpfc_rq_context_rqe_count, |
| 17257 | &rq_create->u.request.context, |
| 17258 | LPFC_RQ_RING_SIZE_1024); |
| 17259 | break; |
| 17260 | case 2048: |
| 17261 | bf_set(lpfc_rq_context_rqe_count, |
| 17262 | &rq_create->u.request.context, |
| 17263 | LPFC_RQ_RING_SIZE_2048); |
| 17264 | break; |
| 17265 | case 4096: |
| 17266 | bf_set(lpfc_rq_context_rqe_count, |
| 17267 | &rq_create->u.request.context, |
| 17268 | LPFC_RQ_RING_SIZE_4096); |
| 17269 | break; |
| 17270 | } |
| 17271 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, |
| 17272 | LPFC_HDR_BUF_SIZE); |
| 17273 | } |
| 17274 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, |
| 17275 | cq->queue_id); |
| 17276 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, |
| 17277 | hrq->page_count); |
| 17278 | list_for_each_entry(dmabuf, &hrq->page_list, list) { |
| 17279 | memset(dmabuf->virt, 0, hw_page_size); |
| 17280 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = |
| 17281 | putPaddrLow(dmabuf->phys); |
| 17282 | rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = |
| 17283 | putPaddrHigh(dmabuf->phys); |
| 17284 | } |
| 17285 | if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) |
| 17286 | bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); |
| 17287 | |
| 17288 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 17289 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 17290 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 17291 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 17292 | if (shdr_status || shdr_add_status || rc) { |
| 17293 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17294 | "2504 RQ_CREATE mailbox failed with " |
| 17295 | "status x%x add_status x%x, mbx status x%x\n" , |
| 17296 | shdr_status, shdr_add_status, rc); |
| 17297 | status = -ENXIO; |
| 17298 | goto out; |
| 17299 | } |
| 17300 | hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); |
| 17301 | if (hrq->queue_id == 0xFFFF) { |
| 17302 | status = -ENXIO; |
| 17303 | goto out; |
| 17304 | } |
| 17305 | |
| 17306 | if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { |
| 17307 | hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, |
| 17308 | &rq_create->u.response); |
| 17309 | if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && |
| 17310 | (hrq->db_format != LPFC_DB_RING_FORMAT)) { |
| 17311 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17312 | "3262 RQ [%d] doorbell format not " |
| 17313 | "supported: x%x\n" , hrq->queue_id, |
| 17314 | hrq->db_format); |
| 17315 | status = -EINVAL; |
| 17316 | goto out; |
| 17317 | } |
| 17318 | |
| 17319 | pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, |
| 17320 | &rq_create->u.response); |
| 17321 | bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); |
| 17322 | if (!bar_memmap_p) { |
| 17323 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17324 | "3269 RQ[%d] failed to memmap pci " |
| 17325 | "barset:x%x\n" , hrq->queue_id, |
| 17326 | pci_barset); |
| 17327 | status = -ENOMEM; |
| 17328 | goto out; |
| 17329 | } |
| 17330 | |
| 17331 | db_offset = rq_create->u.response.doorbell_offset; |
| 17332 | if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && |
| 17333 | (db_offset != LPFC_ULP1_RQ_DOORBELL)) { |
| 17334 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17335 | "3270 RQ[%d] doorbell offset not " |
| 17336 | "supported: x%x\n" , hrq->queue_id, |
| 17337 | db_offset); |
| 17338 | status = -EINVAL; |
| 17339 | goto out; |
| 17340 | } |
| 17341 | hrq->db_regaddr = bar_memmap_p + db_offset; |
| 17342 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 17343 | "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " |
| 17344 | "format:x%x\n" , hrq->queue_id, pci_barset, |
| 17345 | db_offset, hrq->db_format); |
| 17346 | } else { |
| 17347 | hrq->db_format = LPFC_DB_RING_FORMAT; |
| 17348 | hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; |
| 17349 | } |
| 17350 | hrq->type = LPFC_HRQ; |
| 17351 | hrq->assoc_qid = cq->queue_id; |
| 17352 | hrq->subtype = subtype; |
| 17353 | hrq->host_index = 0; |
| 17354 | hrq->hba_index = 0; |
| 17355 | hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; |
| 17356 | |
| 17357 | /* now create the data queue */ |
| 17358 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
| 17359 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, |
| 17360 | length, LPFC_SLI4_MBX_EMBED); |
| 17361 | bf_set(lpfc_mbox_hdr_version, &shdr->request, |
| 17362 | phba->sli4_hba.pc_sli4_params.rqv); |
| 17363 | if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { |
| 17364 | bf_set(lpfc_rq_context_rqe_count_1, |
| 17365 | &rq_create->u.request.context, hrq->entry_count); |
| 17366 | if (subtype == LPFC_NVMET) |
| 17367 | rq_create->u.request.context.buffer_size = |
| 17368 | LPFC_NVMET_DATA_BUF_SIZE; |
| 17369 | else |
| 17370 | rq_create->u.request.context.buffer_size = |
| 17371 | LPFC_DATA_BUF_SIZE; |
| 17372 | bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, |
| 17373 | LPFC_RQE_SIZE_8); |
| 17374 | bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, |
| 17375 | (PAGE_SIZE/SLI4_PAGE_SIZE)); |
| 17376 | } else { |
| 17377 | switch (drq->entry_count) { |
| 17378 | default: |
| 17379 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17380 | "2536 Unsupported RQ count. (%d)\n" , |
| 17381 | drq->entry_count); |
| 17382 | if (drq->entry_count < 512) { |
| 17383 | status = -EINVAL; |
| 17384 | goto out; |
| 17385 | } |
| 17386 | fallthrough; /* otherwise default to smallest count */ |
| 17387 | case 512: |
| 17388 | bf_set(lpfc_rq_context_rqe_count, |
| 17389 | &rq_create->u.request.context, |
| 17390 | LPFC_RQ_RING_SIZE_512); |
| 17391 | break; |
| 17392 | case 1024: |
| 17393 | bf_set(lpfc_rq_context_rqe_count, |
| 17394 | &rq_create->u.request.context, |
| 17395 | LPFC_RQ_RING_SIZE_1024); |
| 17396 | break; |
| 17397 | case 2048: |
| 17398 | bf_set(lpfc_rq_context_rqe_count, |
| 17399 | &rq_create->u.request.context, |
| 17400 | LPFC_RQ_RING_SIZE_2048); |
| 17401 | break; |
| 17402 | case 4096: |
| 17403 | bf_set(lpfc_rq_context_rqe_count, |
| 17404 | &rq_create->u.request.context, |
| 17405 | LPFC_RQ_RING_SIZE_4096); |
| 17406 | break; |
| 17407 | } |
| 17408 | if (subtype == LPFC_NVMET) |
| 17409 | bf_set(lpfc_rq_context_buf_size, |
| 17410 | &rq_create->u.request.context, |
| 17411 | LPFC_NVMET_DATA_BUF_SIZE); |
| 17412 | else |
| 17413 | bf_set(lpfc_rq_context_buf_size, |
| 17414 | &rq_create->u.request.context, |
| 17415 | LPFC_DATA_BUF_SIZE); |
| 17416 | } |
| 17417 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, |
| 17418 | cq->queue_id); |
| 17419 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, |
| 17420 | drq->page_count); |
| 17421 | list_for_each_entry(dmabuf, &drq->page_list, list) { |
| 17422 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = |
| 17423 | putPaddrLow(dmabuf->phys); |
| 17424 | rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = |
| 17425 | putPaddrHigh(dmabuf->phys); |
| 17426 | } |
| 17427 | if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) |
| 17428 | bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); |
| 17429 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 17430 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 17431 | shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; |
| 17432 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 17433 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 17434 | if (shdr_status || shdr_add_status || rc) { |
| 17435 | status = -ENXIO; |
| 17436 | goto out; |
| 17437 | } |
| 17438 | drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); |
| 17439 | if (drq->queue_id == 0xFFFF) { |
| 17440 | status = -ENXIO; |
| 17441 | goto out; |
| 17442 | } |
| 17443 | drq->type = LPFC_DRQ; |
| 17444 | drq->assoc_qid = cq->queue_id; |
| 17445 | drq->subtype = subtype; |
| 17446 | drq->host_index = 0; |
| 17447 | drq->hba_index = 0; |
| 17448 | drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; |
| 17449 | |
| 17450 | /* link the header and data RQs onto the parent cq child list */ |
| 17451 | list_add_tail(new: &hrq->list, head: &cq->child_list); |
| 17452 | list_add_tail(new: &drq->list, head: &cq->child_list); |
| 17453 | |
| 17454 | out: |
| 17455 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 17456 | return status; |
| 17457 | } |
| 17458 | |
| 17459 | /** |
| 17460 | * lpfc_mrq_create - Create MRQ Receive Queues on the HBA |
| 17461 | * @phba: HBA structure that indicates port to create a queue on. |
| 17462 | * @hrqp: The queue structure array to use to create the header receive queues. |
| 17463 | * @drqp: The queue structure array to use to create the data receive queues. |
| 17464 | * @cqp: The completion queue array to bind these receive queues to. |
| 17465 | * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). |
| 17466 | * |
| 17467 | * This function creates a receive buffer queue pair , as detailed in @hrq and |
| 17468 | * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command |
| 17469 | * to the HBA. |
| 17470 | * |
| 17471 | * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq |
| 17472 | * struct is used to get the entry count that is necessary to determine the |
| 17473 | * number of pages to use for this queue. The @cq is used to indicate which |
| 17474 | * completion queue to bind received buffers that are posted to these queues to. |
| 17475 | * This function will send the RQ_CREATE mailbox command to the HBA to setup the |
| 17476 | * receive queue pair. This function is asynchronous and will wait for the |
| 17477 | * mailbox command to finish before continuing. |
| 17478 | * |
| 17479 | * On success this function will return a zero. If unable to allocate enough |
| 17480 | * memory this function will return -ENOMEM. If the queue create mailbox command |
| 17481 | * fails this function will return -ENXIO. |
| 17482 | **/ |
| 17483 | int |
| 17484 | lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, |
| 17485 | struct lpfc_queue **drqp, struct lpfc_queue **cqp, |
| 17486 | uint32_t subtype) |
| 17487 | { |
| 17488 | struct lpfc_queue *hrq, *drq, *cq; |
| 17489 | struct lpfc_mbx_rq_create_v2 *rq_create; |
| 17490 | struct lpfc_dmabuf *dmabuf; |
| 17491 | LPFC_MBOXQ_t *mbox; |
| 17492 | int rc, length, alloclen, status = 0; |
| 17493 | int cnt, idx, numrq, page_idx = 0; |
| 17494 | uint32_t shdr_status, shdr_add_status; |
| 17495 | union lpfc_sli4_cfg_shdr *shdr; |
| 17496 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; |
| 17497 | |
| 17498 | numrq = phba->cfg_nvmet_mrq; |
| 17499 | /* sanity check on array memory */ |
| 17500 | if (!hrqp || !drqp || !cqp || !numrq) |
| 17501 | return -ENODEV; |
| 17502 | if (!phba->sli4_hba.pc_sli4_params.supported) |
| 17503 | hw_page_size = SLI4_PAGE_SIZE; |
| 17504 | |
| 17505 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 17506 | if (!mbox) |
| 17507 | return -ENOMEM; |
| 17508 | |
| 17509 | length = sizeof(struct lpfc_mbx_rq_create_v2); |
| 17510 | length += ((2 * numrq * hrqp[0]->page_count) * |
| 17511 | sizeof(struct dma_address)); |
| 17512 | |
| 17513 | alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
| 17514 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, |
| 17515 | LPFC_SLI4_MBX_NEMBED); |
| 17516 | if (alloclen < length) { |
| 17517 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17518 | "3099 Allocated DMA memory size (%d) is " |
| 17519 | "less than the requested DMA memory size " |
| 17520 | "(%d)\n" , alloclen, length); |
| 17521 | status = -ENOMEM; |
| 17522 | goto out; |
| 17523 | } |
| 17524 | |
| 17525 | |
| 17526 | |
| 17527 | rq_create = mbox->sge_array->addr[0]; |
| 17528 | shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; |
| 17529 | |
| 17530 | bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); |
| 17531 | cnt = 0; |
| 17532 | |
| 17533 | for (idx = 0; idx < numrq; idx++) { |
| 17534 | hrq = hrqp[idx]; |
| 17535 | drq = drqp[idx]; |
| 17536 | cq = cqp[idx]; |
| 17537 | |
| 17538 | /* sanity check on queue memory */ |
| 17539 | if (!hrq || !drq || !cq) { |
| 17540 | status = -ENODEV; |
| 17541 | goto out; |
| 17542 | } |
| 17543 | |
| 17544 | if (hrq->entry_count != drq->entry_count) { |
| 17545 | status = -EINVAL; |
| 17546 | goto out; |
| 17547 | } |
| 17548 | |
| 17549 | if (idx == 0) { |
| 17550 | bf_set(lpfc_mbx_rq_create_num_pages, |
| 17551 | &rq_create->u.request, |
| 17552 | hrq->page_count); |
| 17553 | bf_set(lpfc_mbx_rq_create_rq_cnt, |
| 17554 | &rq_create->u.request, (numrq * 2)); |
| 17555 | bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, |
| 17556 | 1); |
| 17557 | bf_set(lpfc_rq_context_base_cq, |
| 17558 | &rq_create->u.request.context, |
| 17559 | cq->queue_id); |
| 17560 | bf_set(lpfc_rq_context_data_size, |
| 17561 | &rq_create->u.request.context, |
| 17562 | LPFC_NVMET_DATA_BUF_SIZE); |
| 17563 | bf_set(lpfc_rq_context_hdr_size, |
| 17564 | &rq_create->u.request.context, |
| 17565 | LPFC_HDR_BUF_SIZE); |
| 17566 | bf_set(lpfc_rq_context_rqe_count_1, |
| 17567 | &rq_create->u.request.context, |
| 17568 | hrq->entry_count); |
| 17569 | bf_set(lpfc_rq_context_rqe_size, |
| 17570 | &rq_create->u.request.context, |
| 17571 | LPFC_RQE_SIZE_8); |
| 17572 | bf_set(lpfc_rq_context_page_size, |
| 17573 | &rq_create->u.request.context, |
| 17574 | (PAGE_SIZE/SLI4_PAGE_SIZE)); |
| 17575 | } |
| 17576 | rc = 0; |
| 17577 | list_for_each_entry(dmabuf, &hrq->page_list, list) { |
| 17578 | memset(dmabuf->virt, 0, hw_page_size); |
| 17579 | cnt = page_idx + dmabuf->buffer_tag; |
| 17580 | rq_create->u.request.page[cnt].addr_lo = |
| 17581 | putPaddrLow(dmabuf->phys); |
| 17582 | rq_create->u.request.page[cnt].addr_hi = |
| 17583 | putPaddrHigh(dmabuf->phys); |
| 17584 | rc++; |
| 17585 | } |
| 17586 | page_idx += rc; |
| 17587 | |
| 17588 | rc = 0; |
| 17589 | list_for_each_entry(dmabuf, &drq->page_list, list) { |
| 17590 | memset(dmabuf->virt, 0, hw_page_size); |
| 17591 | cnt = page_idx + dmabuf->buffer_tag; |
| 17592 | rq_create->u.request.page[cnt].addr_lo = |
| 17593 | putPaddrLow(dmabuf->phys); |
| 17594 | rq_create->u.request.page[cnt].addr_hi = |
| 17595 | putPaddrHigh(dmabuf->phys); |
| 17596 | rc++; |
| 17597 | } |
| 17598 | page_idx += rc; |
| 17599 | |
| 17600 | hrq->db_format = LPFC_DB_RING_FORMAT; |
| 17601 | hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; |
| 17602 | hrq->type = LPFC_HRQ; |
| 17603 | hrq->assoc_qid = cq->queue_id; |
| 17604 | hrq->subtype = subtype; |
| 17605 | hrq->host_index = 0; |
| 17606 | hrq->hba_index = 0; |
| 17607 | hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; |
| 17608 | |
| 17609 | drq->db_format = LPFC_DB_RING_FORMAT; |
| 17610 | drq->db_regaddr = phba->sli4_hba.RQDBregaddr; |
| 17611 | drq->type = LPFC_DRQ; |
| 17612 | drq->assoc_qid = cq->queue_id; |
| 17613 | drq->subtype = subtype; |
| 17614 | drq->host_index = 0; |
| 17615 | drq->hba_index = 0; |
| 17616 | drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; |
| 17617 | |
| 17618 | list_add_tail(new: &hrq->list, head: &cq->child_list); |
| 17619 | list_add_tail(new: &drq->list, head: &cq->child_list); |
| 17620 | } |
| 17621 | |
| 17622 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 17623 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 17624 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 17625 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 17626 | if (shdr_status || shdr_add_status || rc) { |
| 17627 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17628 | "3120 RQ_CREATE mailbox failed with " |
| 17629 | "status x%x add_status x%x, mbx status x%x\n" , |
| 17630 | shdr_status, shdr_add_status, rc); |
| 17631 | status = -ENXIO; |
| 17632 | goto out; |
| 17633 | } |
| 17634 | rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); |
| 17635 | if (rc == 0xFFFF) { |
| 17636 | status = -ENXIO; |
| 17637 | goto out; |
| 17638 | } |
| 17639 | |
| 17640 | /* Initialize all RQs with associated queue id */ |
| 17641 | for (idx = 0; idx < numrq; idx++) { |
| 17642 | hrq = hrqp[idx]; |
| 17643 | hrq->queue_id = rc + (2 * idx); |
| 17644 | drq = drqp[idx]; |
| 17645 | drq->queue_id = rc + (2 * idx) + 1; |
| 17646 | } |
| 17647 | |
| 17648 | out: |
| 17649 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
| 17650 | return status; |
| 17651 | } |
| 17652 | |
| 17653 | /** |
| 17654 | * lpfc_eq_destroy - Destroy an event Queue on the HBA |
| 17655 | * @phba: HBA structure that indicates port to destroy a queue on. |
| 17656 | * @eq: The queue structure associated with the queue to destroy. |
| 17657 | * |
| 17658 | * This function destroys a queue, as detailed in @eq by sending an mailbox |
| 17659 | * command, specific to the type of queue, to the HBA. |
| 17660 | * |
| 17661 | * The @eq struct is used to get the queue ID of the queue to destroy. |
| 17662 | * |
| 17663 | * On success this function will return a zero. If the queue destroy mailbox |
| 17664 | * command fails this function will return -ENXIO. |
| 17665 | **/ |
| 17666 | int |
| 17667 | lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) |
| 17668 | { |
| 17669 | LPFC_MBOXQ_t *mbox; |
| 17670 | int rc, length, status = 0; |
| 17671 | uint32_t shdr_status, shdr_add_status; |
| 17672 | union lpfc_sli4_cfg_shdr *shdr; |
| 17673 | |
| 17674 | /* sanity check on queue memory */ |
| 17675 | if (!eq) |
| 17676 | return -ENODEV; |
| 17677 | |
| 17678 | if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) |
| 17679 | goto list_remove; |
| 17680 | |
| 17681 | mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); |
| 17682 | if (!mbox) |
| 17683 | return -ENOMEM; |
| 17684 | length = (sizeof(struct lpfc_mbx_eq_destroy) - |
| 17685 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 17686 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 17687 | LPFC_MBOX_OPCODE_EQ_DESTROY, |
| 17688 | length, LPFC_SLI4_MBX_EMBED); |
| 17689 | bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, |
| 17690 | eq->queue_id); |
| 17691 | mbox->vport = eq->phba->pport; |
| 17692 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 17693 | |
| 17694 | rc = lpfc_sli_issue_mbox(phba: eq->phba, pmbox: mbox, MBX_POLL); |
| 17695 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 17696 | shdr = (union lpfc_sli4_cfg_shdr *) |
| 17697 | &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; |
| 17698 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 17699 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 17700 | if (shdr_status || shdr_add_status || rc) { |
| 17701 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17702 | "2505 EQ_DESTROY mailbox failed with " |
| 17703 | "status x%x add_status x%x, mbx status x%x\n" , |
| 17704 | shdr_status, shdr_add_status, rc); |
| 17705 | status = -ENXIO; |
| 17706 | } |
| 17707 | mempool_free(element: mbox, pool: eq->phba->mbox_mem_pool); |
| 17708 | |
| 17709 | list_remove: |
| 17710 | /* Remove eq from any list */ |
| 17711 | list_del_init(entry: &eq->list); |
| 17712 | |
| 17713 | return status; |
| 17714 | } |
| 17715 | |
| 17716 | /** |
| 17717 | * lpfc_cq_destroy - Destroy a Completion Queue on the HBA |
| 17718 | * @phba: HBA structure that indicates port to destroy a queue on. |
| 17719 | * @cq: The queue structure associated with the queue to destroy. |
| 17720 | * |
| 17721 | * This function destroys a queue, as detailed in @cq by sending an mailbox |
| 17722 | * command, specific to the type of queue, to the HBA. |
| 17723 | * |
| 17724 | * The @cq struct is used to get the queue ID of the queue to destroy. |
| 17725 | * |
| 17726 | * On success this function will return a zero. If the queue destroy mailbox |
| 17727 | * command fails this function will return -ENXIO. |
| 17728 | **/ |
| 17729 | int |
| 17730 | lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) |
| 17731 | { |
| 17732 | LPFC_MBOXQ_t *mbox; |
| 17733 | int rc, length, status = 0; |
| 17734 | uint32_t shdr_status, shdr_add_status; |
| 17735 | union lpfc_sli4_cfg_shdr *shdr; |
| 17736 | |
| 17737 | /* sanity check on queue memory */ |
| 17738 | if (!cq) |
| 17739 | return -ENODEV; |
| 17740 | |
| 17741 | if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) |
| 17742 | goto list_remove; |
| 17743 | |
| 17744 | mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); |
| 17745 | if (!mbox) |
| 17746 | return -ENOMEM; |
| 17747 | length = (sizeof(struct lpfc_mbx_cq_destroy) - |
| 17748 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 17749 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 17750 | LPFC_MBOX_OPCODE_CQ_DESTROY, |
| 17751 | length, LPFC_SLI4_MBX_EMBED); |
| 17752 | bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, |
| 17753 | cq->queue_id); |
| 17754 | mbox->vport = cq->phba->pport; |
| 17755 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 17756 | rc = lpfc_sli_issue_mbox(phba: cq->phba, pmbox: mbox, MBX_POLL); |
| 17757 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 17758 | shdr = (union lpfc_sli4_cfg_shdr *) |
| 17759 | &mbox->u.mqe.un.wq_create.header.cfg_shdr; |
| 17760 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 17761 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 17762 | if (shdr_status || shdr_add_status || rc) { |
| 17763 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17764 | "2506 CQ_DESTROY mailbox failed with " |
| 17765 | "status x%x add_status x%x, mbx status x%x\n" , |
| 17766 | shdr_status, shdr_add_status, rc); |
| 17767 | status = -ENXIO; |
| 17768 | } |
| 17769 | mempool_free(element: mbox, pool: cq->phba->mbox_mem_pool); |
| 17770 | |
| 17771 | list_remove: |
| 17772 | /* Remove cq from any list */ |
| 17773 | list_del_init(entry: &cq->list); |
| 17774 | return status; |
| 17775 | } |
| 17776 | |
| 17777 | /** |
| 17778 | * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA |
| 17779 | * @phba: HBA structure that indicates port to destroy a queue on. |
| 17780 | * @mq: The queue structure associated with the queue to destroy. |
| 17781 | * |
| 17782 | * This function destroys a queue, as detailed in @mq by sending an mailbox |
| 17783 | * command, specific to the type of queue, to the HBA. |
| 17784 | * |
| 17785 | * The @mq struct is used to get the queue ID of the queue to destroy. |
| 17786 | * |
| 17787 | * On success this function will return a zero. If the queue destroy mailbox |
| 17788 | * command fails this function will return -ENXIO. |
| 17789 | **/ |
| 17790 | int |
| 17791 | lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) |
| 17792 | { |
| 17793 | LPFC_MBOXQ_t *mbox; |
| 17794 | int rc, length, status = 0; |
| 17795 | uint32_t shdr_status, shdr_add_status; |
| 17796 | union lpfc_sli4_cfg_shdr *shdr; |
| 17797 | |
| 17798 | /* sanity check on queue memory */ |
| 17799 | if (!mq) |
| 17800 | return -ENODEV; |
| 17801 | |
| 17802 | if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) |
| 17803 | goto list_remove; |
| 17804 | |
| 17805 | mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); |
| 17806 | if (!mbox) |
| 17807 | return -ENOMEM; |
| 17808 | length = (sizeof(struct lpfc_mbx_mq_destroy) - |
| 17809 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 17810 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 17811 | LPFC_MBOX_OPCODE_MQ_DESTROY, |
| 17812 | length, LPFC_SLI4_MBX_EMBED); |
| 17813 | bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, |
| 17814 | mq->queue_id); |
| 17815 | mbox->vport = mq->phba->pport; |
| 17816 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 17817 | rc = lpfc_sli_issue_mbox(phba: mq->phba, pmbox: mbox, MBX_POLL); |
| 17818 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 17819 | shdr = (union lpfc_sli4_cfg_shdr *) |
| 17820 | &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; |
| 17821 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 17822 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 17823 | if (shdr_status || shdr_add_status || rc) { |
| 17824 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17825 | "2507 MQ_DESTROY mailbox failed with " |
| 17826 | "status x%x add_status x%x, mbx status x%x\n" , |
| 17827 | shdr_status, shdr_add_status, rc); |
| 17828 | status = -ENXIO; |
| 17829 | } |
| 17830 | mempool_free(element: mbox, pool: mq->phba->mbox_mem_pool); |
| 17831 | |
| 17832 | list_remove: |
| 17833 | /* Remove mq from any list */ |
| 17834 | list_del_init(entry: &mq->list); |
| 17835 | return status; |
| 17836 | } |
| 17837 | |
| 17838 | /** |
| 17839 | * lpfc_wq_destroy - Destroy a Work Queue on the HBA |
| 17840 | * @phba: HBA structure that indicates port to destroy a queue on. |
| 17841 | * @wq: The queue structure associated with the queue to destroy. |
| 17842 | * |
| 17843 | * This function destroys a queue, as detailed in @wq by sending an mailbox |
| 17844 | * command, specific to the type of queue, to the HBA. |
| 17845 | * |
| 17846 | * The @wq struct is used to get the queue ID of the queue to destroy. |
| 17847 | * |
| 17848 | * On success this function will return a zero. If the queue destroy mailbox |
| 17849 | * command fails this function will return -ENXIO. |
| 17850 | **/ |
| 17851 | int |
| 17852 | lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) |
| 17853 | { |
| 17854 | LPFC_MBOXQ_t *mbox; |
| 17855 | int rc, length, status = 0; |
| 17856 | uint32_t shdr_status, shdr_add_status; |
| 17857 | union lpfc_sli4_cfg_shdr *shdr; |
| 17858 | |
| 17859 | /* sanity check on queue memory */ |
| 17860 | if (!wq) |
| 17861 | return -ENODEV; |
| 17862 | |
| 17863 | if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) |
| 17864 | goto list_remove; |
| 17865 | |
| 17866 | mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); |
| 17867 | if (!mbox) |
| 17868 | return -ENOMEM; |
| 17869 | length = (sizeof(struct lpfc_mbx_wq_destroy) - |
| 17870 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 17871 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
| 17872 | LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, |
| 17873 | length, LPFC_SLI4_MBX_EMBED); |
| 17874 | bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, |
| 17875 | wq->queue_id); |
| 17876 | mbox->vport = wq->phba->pport; |
| 17877 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 17878 | rc = lpfc_sli_issue_mbox(phba: wq->phba, pmbox: mbox, MBX_POLL); |
| 17879 | shdr = (union lpfc_sli4_cfg_shdr *) |
| 17880 | &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; |
| 17881 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 17882 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 17883 | if (shdr_status || shdr_add_status || rc) { |
| 17884 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17885 | "2508 WQ_DESTROY mailbox failed with " |
| 17886 | "status x%x add_status x%x, mbx status x%x\n" , |
| 17887 | shdr_status, shdr_add_status, rc); |
| 17888 | status = -ENXIO; |
| 17889 | } |
| 17890 | mempool_free(element: mbox, pool: wq->phba->mbox_mem_pool); |
| 17891 | |
| 17892 | list_remove: |
| 17893 | /* Remove wq from any list */ |
| 17894 | list_del_init(entry: &wq->list); |
| 17895 | kfree(objp: wq->pring); |
| 17896 | wq->pring = NULL; |
| 17897 | return status; |
| 17898 | } |
| 17899 | |
| 17900 | /** |
| 17901 | * lpfc_rq_destroy - Destroy a Receive Queue on the HBA |
| 17902 | * @phba: HBA structure that indicates port to destroy a queue on. |
| 17903 | * @hrq: The queue structure associated with the queue to destroy. |
| 17904 | * @drq: The queue structure associated with the queue to destroy. |
| 17905 | * |
| 17906 | * This function destroys a queue, as detailed in @rq by sending an mailbox |
| 17907 | * command, specific to the type of queue, to the HBA. |
| 17908 | * |
| 17909 | * The @rq struct is used to get the queue ID of the queue to destroy. |
| 17910 | * |
| 17911 | * On success this function will return a zero. If the queue destroy mailbox |
| 17912 | * command fails this function will return -ENXIO. |
| 17913 | **/ |
| 17914 | int |
| 17915 | lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, |
| 17916 | struct lpfc_queue *drq) |
| 17917 | { |
| 17918 | LPFC_MBOXQ_t *mbox; |
| 17919 | int rc, length, status = 0; |
| 17920 | uint32_t shdr_status, shdr_add_status; |
| 17921 | union lpfc_sli4_cfg_shdr *shdr; |
| 17922 | |
| 17923 | /* sanity check on queue memory */ |
| 17924 | if (!hrq || !drq) |
| 17925 | return -ENODEV; |
| 17926 | |
| 17927 | if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) |
| 17928 | goto list_remove; |
| 17929 | |
| 17930 | mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); |
| 17931 | if (!mbox) |
| 17932 | return -ENOMEM; |
| 17933 | length = (sizeof(struct lpfc_mbx_rq_destroy) - |
| 17934 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 17935 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
| 17936 | LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, |
| 17937 | length, LPFC_SLI4_MBX_EMBED); |
| 17938 | bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, |
| 17939 | hrq->queue_id); |
| 17940 | mbox->vport = hrq->phba->pport; |
| 17941 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 17942 | rc = lpfc_sli_issue_mbox(phba: hrq->phba, pmbox: mbox, MBX_POLL); |
| 17943 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 17944 | shdr = (union lpfc_sli4_cfg_shdr *) |
| 17945 | &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; |
| 17946 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 17947 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 17948 | if (shdr_status || shdr_add_status || rc) { |
| 17949 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17950 | "2509 RQ_DESTROY mailbox failed with " |
| 17951 | "status x%x add_status x%x, mbx status x%x\n" , |
| 17952 | shdr_status, shdr_add_status, rc); |
| 17953 | mempool_free(element: mbox, pool: hrq->phba->mbox_mem_pool); |
| 17954 | return -ENXIO; |
| 17955 | } |
| 17956 | bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, |
| 17957 | drq->queue_id); |
| 17958 | rc = lpfc_sli_issue_mbox(phba: drq->phba, pmbox: mbox, MBX_POLL); |
| 17959 | shdr = (union lpfc_sli4_cfg_shdr *) |
| 17960 | &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; |
| 17961 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 17962 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 17963 | if (shdr_status || shdr_add_status || rc) { |
| 17964 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 17965 | "2510 RQ_DESTROY mailbox failed with " |
| 17966 | "status x%x add_status x%x, mbx status x%x\n" , |
| 17967 | shdr_status, shdr_add_status, rc); |
| 17968 | status = -ENXIO; |
| 17969 | } |
| 17970 | mempool_free(element: mbox, pool: hrq->phba->mbox_mem_pool); |
| 17971 | |
| 17972 | list_remove: |
| 17973 | list_del_init(entry: &hrq->list); |
| 17974 | list_del_init(entry: &drq->list); |
| 17975 | return status; |
| 17976 | } |
| 17977 | |
| 17978 | /** |
| 17979 | * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA |
| 17980 | * @phba: The virtual port for which this call being executed. |
| 17981 | * @pdma_phys_addr0: Physical address of the 1st SGL page. |
| 17982 | * @pdma_phys_addr1: Physical address of the 2nd SGL page. |
| 17983 | * @xritag: the xritag that ties this io to the SGL pages. |
| 17984 | * |
| 17985 | * This routine will post the sgl pages for the IO that has the xritag |
| 17986 | * that is in the iocbq structure. The xritag is assigned during iocbq |
| 17987 | * creation and persists for as long as the driver is loaded. |
| 17988 | * if the caller has fewer than 256 scatter gather segments to map then |
| 17989 | * pdma_phys_addr1 should be 0. |
| 17990 | * If the caller needs to map more than 256 scatter gather segment then |
| 17991 | * pdma_phys_addr1 should be a valid physical address. |
| 17992 | * physical address for SGLs must be 64 byte aligned. |
| 17993 | * If you are going to map 2 SGL's then the first one must have 256 entries |
| 17994 | * the second sgl can have between 1 and 256 entries. |
| 17995 | * |
| 17996 | * Return codes: |
| 17997 | * 0 - Success |
| 17998 | * -ENXIO, -ENOMEM - Failure |
| 17999 | **/ |
| 18000 | int |
| 18001 | lpfc_sli4_post_sgl(struct lpfc_hba *phba, |
| 18002 | dma_addr_t pdma_phys_addr0, |
| 18003 | dma_addr_t pdma_phys_addr1, |
| 18004 | uint16_t xritag) |
| 18005 | { |
| 18006 | struct lpfc_mbx_post_sgl_pages *post_sgl_pages; |
| 18007 | LPFC_MBOXQ_t *mbox; |
| 18008 | int rc; |
| 18009 | uint32_t shdr_status, shdr_add_status; |
| 18010 | uint32_t mbox_tmo; |
| 18011 | union lpfc_sli4_cfg_shdr *shdr; |
| 18012 | |
| 18013 | if (xritag == NO_XRI) { |
| 18014 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 18015 | "0364 Invalid param:\n" ); |
| 18016 | return -EINVAL; |
| 18017 | } |
| 18018 | |
| 18019 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 18020 | if (!mbox) |
| 18021 | return -ENOMEM; |
| 18022 | |
| 18023 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
| 18024 | LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, |
| 18025 | sizeof(struct lpfc_mbx_post_sgl_pages) - |
| 18026 | sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); |
| 18027 | |
| 18028 | post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) |
| 18029 | &mbox->u.mqe.un.post_sgl_pages; |
| 18030 | bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); |
| 18031 | bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); |
| 18032 | |
| 18033 | post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = |
| 18034 | cpu_to_le32(putPaddrLow(pdma_phys_addr0)); |
| 18035 | post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = |
| 18036 | cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); |
| 18037 | |
| 18038 | post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = |
| 18039 | cpu_to_le32(putPaddrLow(pdma_phys_addr1)); |
| 18040 | post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = |
| 18041 | cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); |
| 18042 | if (!phba->sli4_hba.intr_enable) |
| 18043 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 18044 | else { |
| 18045 | mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); |
| 18046 | rc = lpfc_sli_issue_mbox_wait(phba, pmboxq: mbox, timeout: mbox_tmo); |
| 18047 | } |
| 18048 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 18049 | shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; |
| 18050 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 18051 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 18052 | if (!phba->sli4_hba.intr_enable) |
| 18053 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 18054 | else if (rc != MBX_TIMEOUT) |
| 18055 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 18056 | if (shdr_status || shdr_add_status || rc) { |
| 18057 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 18058 | "2511 POST_SGL mailbox failed with " |
| 18059 | "status x%x add_status x%x, mbx status x%x\n" , |
| 18060 | shdr_status, shdr_add_status, rc); |
| 18061 | } |
| 18062 | return 0; |
| 18063 | } |
| 18064 | |
| 18065 | /** |
| 18066 | * lpfc_sli4_alloc_xri - Get an available rpi in the device's range |
| 18067 | * @phba: pointer to lpfc hba data structure. |
| 18068 | * |
| 18069 | * This routine is invoked to post rpi header templates to the |
| 18070 | * HBA consistent with the SLI-4 interface spec. This routine |
| 18071 | * posts a SLI4_PAGE_SIZE memory region to the port to hold up to |
| 18072 | * SLI4_PAGE_SIZE modulo 64 rpi context headers. |
| 18073 | * |
| 18074 | * Returns |
| 18075 | * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful |
| 18076 | * LPFC_RPI_ALLOC_ERROR if no rpis are available. |
| 18077 | **/ |
| 18078 | static uint16_t |
| 18079 | lpfc_sli4_alloc_xri(struct lpfc_hba *phba) |
| 18080 | { |
| 18081 | unsigned long xri; |
| 18082 | |
| 18083 | /* |
| 18084 | * Fetch the next logical xri. Because this index is logical, |
| 18085 | * the driver starts at 0 each time. |
| 18086 | */ |
| 18087 | spin_lock_irq(lock: &phba->hbalock); |
| 18088 | xri = find_first_zero_bit(addr: phba->sli4_hba.xri_bmask, |
| 18089 | size: phba->sli4_hba.max_cfg_param.max_xri); |
| 18090 | if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { |
| 18091 | spin_unlock_irq(lock: &phba->hbalock); |
| 18092 | return NO_XRI; |
| 18093 | } else { |
| 18094 | set_bit(nr: xri, addr: phba->sli4_hba.xri_bmask); |
| 18095 | phba->sli4_hba.max_cfg_param.xri_used++; |
| 18096 | } |
| 18097 | spin_unlock_irq(lock: &phba->hbalock); |
| 18098 | return xri; |
| 18099 | } |
| 18100 | |
| 18101 | /** |
| 18102 | * __lpfc_sli4_free_xri - Release an xri for reuse. |
| 18103 | * @phba: pointer to lpfc hba data structure. |
| 18104 | * @xri: xri to release. |
| 18105 | * |
| 18106 | * This routine is invoked to release an xri to the pool of |
| 18107 | * available rpis maintained by the driver. |
| 18108 | **/ |
| 18109 | static void |
| 18110 | __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) |
| 18111 | { |
| 18112 | if (test_and_clear_bit(nr: xri, addr: phba->sli4_hba.xri_bmask)) { |
| 18113 | phba->sli4_hba.max_cfg_param.xri_used--; |
| 18114 | } |
| 18115 | } |
| 18116 | |
| 18117 | /** |
| 18118 | * lpfc_sli4_free_xri - Release an xri for reuse. |
| 18119 | * @phba: pointer to lpfc hba data structure. |
| 18120 | * @xri: xri to release. |
| 18121 | * |
| 18122 | * This routine is invoked to release an xri to the pool of |
| 18123 | * available rpis maintained by the driver. |
| 18124 | **/ |
| 18125 | void |
| 18126 | lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) |
| 18127 | { |
| 18128 | spin_lock_irq(lock: &phba->hbalock); |
| 18129 | __lpfc_sli4_free_xri(phba, xri); |
| 18130 | spin_unlock_irq(lock: &phba->hbalock); |
| 18131 | } |
| 18132 | |
| 18133 | /** |
| 18134 | * lpfc_sli4_next_xritag - Get an xritag for the io |
| 18135 | * @phba: Pointer to HBA context object. |
| 18136 | * |
| 18137 | * This function gets an xritag for the iocb. If there is no unused xritag |
| 18138 | * it will return 0xffff. |
| 18139 | * The function returns the allocated xritag if successful, else returns zero. |
| 18140 | * Zero is not a valid xritag. |
| 18141 | * The caller is not required to hold any lock. |
| 18142 | **/ |
| 18143 | uint16_t |
| 18144 | lpfc_sli4_next_xritag(struct lpfc_hba *phba) |
| 18145 | { |
| 18146 | uint16_t xri_index; |
| 18147 | |
| 18148 | xri_index = lpfc_sli4_alloc_xri(phba); |
| 18149 | if (xri_index == NO_XRI) |
| 18150 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 18151 | "2004 Failed to allocate XRI.last XRITAG is %d" |
| 18152 | " Max XRI is %d, Used XRI is %d\n" , |
| 18153 | xri_index, |
| 18154 | phba->sli4_hba.max_cfg_param.max_xri, |
| 18155 | phba->sli4_hba.max_cfg_param.xri_used); |
| 18156 | return xri_index; |
| 18157 | } |
| 18158 | |
| 18159 | /** |
| 18160 | * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. |
| 18161 | * @phba: pointer to lpfc hba data structure. |
| 18162 | * @post_sgl_list: pointer to els sgl entry list. |
| 18163 | * @post_cnt: number of els sgl entries on the list. |
| 18164 | * |
| 18165 | * This routine is invoked to post a block of driver's sgl pages to the |
| 18166 | * HBA using non-embedded mailbox command. No Lock is held. This routine |
| 18167 | * is only called when the driver is loading and after all IO has been |
| 18168 | * stopped. |
| 18169 | **/ |
| 18170 | static int |
| 18171 | lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, |
| 18172 | struct list_head *post_sgl_list, |
| 18173 | int post_cnt) |
| 18174 | { |
| 18175 | struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; |
| 18176 | struct lpfc_mbx_post_uembed_sgl_page1 *sgl; |
| 18177 | struct sgl_page_pairs *sgl_pg_pairs; |
| 18178 | void *viraddr; |
| 18179 | LPFC_MBOXQ_t *mbox; |
| 18180 | uint32_t reqlen, alloclen, pg_pairs; |
| 18181 | uint32_t mbox_tmo; |
| 18182 | uint16_t xritag_start = 0; |
| 18183 | int rc = 0; |
| 18184 | uint32_t shdr_status, shdr_add_status; |
| 18185 | union lpfc_sli4_cfg_shdr *shdr; |
| 18186 | |
| 18187 | reqlen = post_cnt * sizeof(struct sgl_page_pairs) + |
| 18188 | sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); |
| 18189 | if (reqlen > SLI4_PAGE_SIZE) { |
| 18190 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 18191 | "2559 Block sgl registration required DMA " |
| 18192 | "size (%d) great than a page\n" , reqlen); |
| 18193 | return -ENOMEM; |
| 18194 | } |
| 18195 | |
| 18196 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 18197 | if (!mbox) |
| 18198 | return -ENOMEM; |
| 18199 | |
| 18200 | /* Allocate DMA memory and set up the non-embedded mailbox command */ |
| 18201 | alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
| 18202 | LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, |
| 18203 | LPFC_SLI4_MBX_NEMBED); |
| 18204 | |
| 18205 | if (alloclen < reqlen) { |
| 18206 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 18207 | "0285 Allocated DMA memory size (%d) is " |
| 18208 | "less than the requested DMA memory " |
| 18209 | "size (%d)\n" , alloclen, reqlen); |
| 18210 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
| 18211 | return -ENOMEM; |
| 18212 | } |
| 18213 | /* Set up the SGL pages in the non-embedded DMA pages */ |
| 18214 | viraddr = mbox->sge_array->addr[0]; |
| 18215 | sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; |
| 18216 | sgl_pg_pairs = &sgl->sgl_pg_pairs; |
| 18217 | |
| 18218 | pg_pairs = 0; |
| 18219 | list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { |
| 18220 | /* Set up the sge entry */ |
| 18221 | sgl_pg_pairs->sgl_pg0_addr_lo = |
| 18222 | cpu_to_le32(putPaddrLow(sglq_entry->phys)); |
| 18223 | sgl_pg_pairs->sgl_pg0_addr_hi = |
| 18224 | cpu_to_le32(putPaddrHigh(sglq_entry->phys)); |
| 18225 | sgl_pg_pairs->sgl_pg1_addr_lo = |
| 18226 | cpu_to_le32(putPaddrLow(0)); |
| 18227 | sgl_pg_pairs->sgl_pg1_addr_hi = |
| 18228 | cpu_to_le32(putPaddrHigh(0)); |
| 18229 | |
| 18230 | /* Keep the first xritag on the list */ |
| 18231 | if (pg_pairs == 0) |
| 18232 | xritag_start = sglq_entry->sli4_xritag; |
| 18233 | sgl_pg_pairs++; |
| 18234 | pg_pairs++; |
| 18235 | } |
| 18236 | |
| 18237 | /* Complete initialization and perform endian conversion. */ |
| 18238 | bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); |
| 18239 | bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); |
| 18240 | sgl->word0 = cpu_to_le32(sgl->word0); |
| 18241 | |
| 18242 | if (!phba->sli4_hba.intr_enable) |
| 18243 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 18244 | else { |
| 18245 | mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); |
| 18246 | rc = lpfc_sli_issue_mbox_wait(phba, pmboxq: mbox, timeout: mbox_tmo); |
| 18247 | } |
| 18248 | shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; |
| 18249 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 18250 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 18251 | if (!phba->sli4_hba.intr_enable) |
| 18252 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
| 18253 | else if (rc != MBX_TIMEOUT) |
| 18254 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
| 18255 | if (shdr_status || shdr_add_status || rc) { |
| 18256 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 18257 | "2513 POST_SGL_BLOCK mailbox command failed " |
| 18258 | "status x%x add_status x%x mbx status x%x\n" , |
| 18259 | shdr_status, shdr_add_status, rc); |
| 18260 | rc = -ENXIO; |
| 18261 | } |
| 18262 | return rc; |
| 18263 | } |
| 18264 | |
| 18265 | /** |
| 18266 | * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware |
| 18267 | * @phba: pointer to lpfc hba data structure. |
| 18268 | * @nblist: pointer to nvme buffer list. |
| 18269 | * @count: number of scsi buffers on the list. |
| 18270 | * |
| 18271 | * This routine is invoked to post a block of @count scsi sgl pages from a |
| 18272 | * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. |
| 18273 | * No Lock is held. |
| 18274 | * |
| 18275 | **/ |
| 18276 | static int |
| 18277 | lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, |
| 18278 | int count) |
| 18279 | { |
| 18280 | struct lpfc_io_buf *lpfc_ncmd; |
| 18281 | struct lpfc_mbx_post_uembed_sgl_page1 *sgl; |
| 18282 | struct sgl_page_pairs *sgl_pg_pairs; |
| 18283 | void *viraddr; |
| 18284 | LPFC_MBOXQ_t *mbox; |
| 18285 | uint32_t reqlen, alloclen, pg_pairs; |
| 18286 | uint32_t mbox_tmo; |
| 18287 | uint16_t xritag_start = 0; |
| 18288 | int rc = 0; |
| 18289 | uint32_t shdr_status, shdr_add_status; |
| 18290 | dma_addr_t pdma_phys_bpl1; |
| 18291 | union lpfc_sli4_cfg_shdr *shdr; |
| 18292 | |
| 18293 | /* Calculate the requested length of the dma memory */ |
| 18294 | reqlen = count * sizeof(struct sgl_page_pairs) + |
| 18295 | sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); |
| 18296 | if (reqlen > SLI4_PAGE_SIZE) { |
| 18297 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
| 18298 | "6118 Block sgl registration required DMA " |
| 18299 | "size (%d) great than a page\n" , reqlen); |
| 18300 | return -ENOMEM; |
| 18301 | } |
| 18302 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 18303 | if (!mbox) { |
| 18304 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 18305 | "6119 Failed to allocate mbox cmd memory\n" ); |
| 18306 | return -ENOMEM; |
| 18307 | } |
| 18308 | |
| 18309 | /* Allocate DMA memory and set up the non-embedded mailbox command */ |
| 18310 | alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
| 18311 | LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, |
| 18312 | reqlen, LPFC_SLI4_MBX_NEMBED); |
| 18313 | |
| 18314 | if (alloclen < reqlen) { |
| 18315 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 18316 | "6120 Allocated DMA memory size (%d) is " |
| 18317 | "less than the requested DMA memory " |
| 18318 | "size (%d)\n" , alloclen, reqlen); |
| 18319 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
| 18320 | return -ENOMEM; |
| 18321 | } |
| 18322 | |
| 18323 | /* Get the first SGE entry from the non-embedded DMA memory */ |
| 18324 | viraddr = mbox->sge_array->addr[0]; |
| 18325 | |
| 18326 | /* Set up the SGL pages in the non-embedded DMA pages */ |
| 18327 | sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; |
| 18328 | sgl_pg_pairs = &sgl->sgl_pg_pairs; |
| 18329 | |
| 18330 | pg_pairs = 0; |
| 18331 | list_for_each_entry(lpfc_ncmd, nblist, list) { |
| 18332 | /* Set up the sge entry */ |
| 18333 | sgl_pg_pairs->sgl_pg0_addr_lo = |
| 18334 | cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); |
| 18335 | sgl_pg_pairs->sgl_pg0_addr_hi = |
| 18336 | cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); |
| 18337 | if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) |
| 18338 | pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + |
| 18339 | SGL_PAGE_SIZE; |
| 18340 | else |
| 18341 | pdma_phys_bpl1 = 0; |
| 18342 | sgl_pg_pairs->sgl_pg1_addr_lo = |
| 18343 | cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); |
| 18344 | sgl_pg_pairs->sgl_pg1_addr_hi = |
| 18345 | cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); |
| 18346 | /* Keep the first xritag on the list */ |
| 18347 | if (pg_pairs == 0) |
| 18348 | xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; |
| 18349 | sgl_pg_pairs++; |
| 18350 | pg_pairs++; |
| 18351 | } |
| 18352 | bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); |
| 18353 | bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); |
| 18354 | /* Perform endian conversion if necessary */ |
| 18355 | sgl->word0 = cpu_to_le32(sgl->word0); |
| 18356 | |
| 18357 | if (!phba->sli4_hba.intr_enable) { |
| 18358 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 18359 | } else { |
| 18360 | mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); |
| 18361 | rc = lpfc_sli_issue_mbox_wait(phba, pmboxq: mbox, timeout: mbox_tmo); |
| 18362 | } |
| 18363 | shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; |
| 18364 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 18365 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 18366 | if (!phba->sli4_hba.intr_enable) |
| 18367 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
| 18368 | else if (rc != MBX_TIMEOUT) |
| 18369 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
| 18370 | if (shdr_status || shdr_add_status || rc) { |
| 18371 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 18372 | "6125 POST_SGL_BLOCK mailbox command failed " |
| 18373 | "status x%x add_status x%x mbx status x%x\n" , |
| 18374 | shdr_status, shdr_add_status, rc); |
| 18375 | rc = -ENXIO; |
| 18376 | } |
| 18377 | return rc; |
| 18378 | } |
| 18379 | |
| 18380 | /** |
| 18381 | * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list |
| 18382 | * @phba: pointer to lpfc hba data structure. |
| 18383 | * @post_nblist: pointer to the nvme buffer list. |
| 18384 | * @sb_count: number of nvme buffers. |
| 18385 | * |
| 18386 | * This routine walks a list of nvme buffers that was passed in. It attempts |
| 18387 | * to construct blocks of nvme buffer sgls which contains contiguous xris and |
| 18388 | * uses the non-embedded SGL block post mailbox commands to post to the port. |
| 18389 | * For single NVME buffer sgl with non-contiguous xri, if any, it shall use |
| 18390 | * embedded SGL post mailbox command for posting. The @post_nblist passed in |
| 18391 | * must be local list, thus no lock is needed when manipulate the list. |
| 18392 | * |
| 18393 | * Returns: 0 = failure, non-zero number of successfully posted buffers. |
| 18394 | **/ |
| 18395 | int |
| 18396 | lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, |
| 18397 | struct list_head *post_nblist, int sb_count) |
| 18398 | { |
| 18399 | struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; |
| 18400 | int status, sgl_size; |
| 18401 | int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; |
| 18402 | dma_addr_t pdma_phys_sgl1; |
| 18403 | int last_xritag = NO_XRI; |
| 18404 | int cur_xritag; |
| 18405 | LIST_HEAD(prep_nblist); |
| 18406 | LIST_HEAD(blck_nblist); |
| 18407 | LIST_HEAD(nvme_nblist); |
| 18408 | |
| 18409 | /* sanity check */ |
| 18410 | if (sb_count <= 0) |
| 18411 | return -EINVAL; |
| 18412 | |
| 18413 | sgl_size = phba->cfg_sg_dma_buf_size; |
| 18414 | list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { |
| 18415 | list_del_init(entry: &lpfc_ncmd->list); |
| 18416 | block_cnt++; |
| 18417 | if ((last_xritag != NO_XRI) && |
| 18418 | (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { |
| 18419 | /* a hole in xri block, form a sgl posting block */ |
| 18420 | list_splice_init(list: &prep_nblist, head: &blck_nblist); |
| 18421 | post_cnt = block_cnt - 1; |
| 18422 | /* prepare list for next posting block */ |
| 18423 | list_add_tail(new: &lpfc_ncmd->list, head: &prep_nblist); |
| 18424 | block_cnt = 1; |
| 18425 | } else { |
| 18426 | /* prepare list for next posting block */ |
| 18427 | list_add_tail(new: &lpfc_ncmd->list, head: &prep_nblist); |
| 18428 | /* enough sgls for non-embed sgl mbox command */ |
| 18429 | if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { |
| 18430 | list_splice_init(list: &prep_nblist, head: &blck_nblist); |
| 18431 | post_cnt = block_cnt; |
| 18432 | block_cnt = 0; |
| 18433 | } |
| 18434 | } |
| 18435 | num_posting++; |
| 18436 | last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; |
| 18437 | |
| 18438 | /* end of repost sgl list condition for NVME buffers */ |
| 18439 | if (num_posting == sb_count) { |
| 18440 | if (post_cnt == 0) { |
| 18441 | /* last sgl posting block */ |
| 18442 | list_splice_init(list: &prep_nblist, head: &blck_nblist); |
| 18443 | post_cnt = block_cnt; |
| 18444 | } else if (block_cnt == 1) { |
| 18445 | /* last single sgl with non-contiguous xri */ |
| 18446 | if (sgl_size > SGL_PAGE_SIZE) |
| 18447 | pdma_phys_sgl1 = |
| 18448 | lpfc_ncmd->dma_phys_sgl + |
| 18449 | SGL_PAGE_SIZE; |
| 18450 | else |
| 18451 | pdma_phys_sgl1 = 0; |
| 18452 | cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; |
| 18453 | status = lpfc_sli4_post_sgl( |
| 18454 | phba, pdma_phys_addr0: lpfc_ncmd->dma_phys_sgl, |
| 18455 | pdma_phys_addr1: pdma_phys_sgl1, xritag: cur_xritag); |
| 18456 | if (status) { |
| 18457 | /* Post error. Buffer unavailable. */ |
| 18458 | lpfc_ncmd->flags |= |
| 18459 | LPFC_SBUF_NOT_POSTED; |
| 18460 | } else { |
| 18461 | /* Post success. Bffer available. */ |
| 18462 | lpfc_ncmd->flags &= |
| 18463 | ~LPFC_SBUF_NOT_POSTED; |
| 18464 | lpfc_ncmd->status = IOSTAT_SUCCESS; |
| 18465 | num_posted++; |
| 18466 | } |
| 18467 | /* success, put on NVME buffer sgl list */ |
| 18468 | list_add_tail(new: &lpfc_ncmd->list, head: &nvme_nblist); |
| 18469 | } |
| 18470 | } |
| 18471 | |
| 18472 | /* continue until a nembed page worth of sgls */ |
| 18473 | if (post_cnt == 0) |
| 18474 | continue; |
| 18475 | |
| 18476 | /* post block of NVME buffer list sgls */ |
| 18477 | status = lpfc_sli4_post_io_sgl_block(phba, nblist: &blck_nblist, |
| 18478 | count: post_cnt); |
| 18479 | |
| 18480 | /* don't reset xirtag due to hole in xri block */ |
| 18481 | if (block_cnt == 0) |
| 18482 | last_xritag = NO_XRI; |
| 18483 | |
| 18484 | /* reset NVME buffer post count for next round of posting */ |
| 18485 | post_cnt = 0; |
| 18486 | |
| 18487 | /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ |
| 18488 | while (!list_empty(head: &blck_nblist)) { |
| 18489 | list_remove_head(&blck_nblist, lpfc_ncmd, |
| 18490 | struct lpfc_io_buf, list); |
| 18491 | if (status) { |
| 18492 | /* Post error. Mark buffer unavailable. */ |
| 18493 | lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; |
| 18494 | } else { |
| 18495 | /* Post success, Mark buffer available. */ |
| 18496 | lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; |
| 18497 | lpfc_ncmd->status = IOSTAT_SUCCESS; |
| 18498 | num_posted++; |
| 18499 | } |
| 18500 | list_add_tail(new: &lpfc_ncmd->list, head: &nvme_nblist); |
| 18501 | } |
| 18502 | } |
| 18503 | /* Push NVME buffers with sgl posted to the available list */ |
| 18504 | lpfc_io_buf_replenish(phba, cbuf: &nvme_nblist); |
| 18505 | |
| 18506 | return num_posted; |
| 18507 | } |
| 18508 | |
| 18509 | /** |
| 18510 | * lpfc_fc_frame_check - Check that this frame is a valid frame to handle |
| 18511 | * @phba: pointer to lpfc_hba struct that the frame was received on |
| 18512 | * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) |
| 18513 | * |
| 18514 | * This function checks the fields in the @fc_hdr to see if the FC frame is a |
| 18515 | * valid type of frame that the LPFC driver will handle. This function will |
| 18516 | * return a zero if the frame is a valid frame or a non zero value when the |
| 18517 | * frame does not pass the check. |
| 18518 | **/ |
| 18519 | static int |
| 18520 | lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) |
| 18521 | { |
| 18522 | /* make rctl_names static to save stack space */ |
| 18523 | struct fc_vft_header *fc_vft_hdr; |
| 18524 | struct fc_app_header *fc_app_hdr; |
| 18525 | uint32_t * = (uint32_t *) fc_hdr; |
| 18526 | |
| 18527 | #define FC_RCTL_MDS_DIAGS 0xF4 |
| 18528 | |
| 18529 | switch (fc_hdr->fh_r_ctl) { |
| 18530 | case FC_RCTL_DD_UNCAT: /* uncategorized information */ |
| 18531 | case FC_RCTL_DD_SOL_DATA: /* solicited data */ |
| 18532 | case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ |
| 18533 | case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ |
| 18534 | case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ |
| 18535 | case FC_RCTL_DD_DATA_DESC: /* data descriptor */ |
| 18536 | case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ |
| 18537 | case FC_RCTL_DD_CMD_STATUS: /* command status */ |
| 18538 | case FC_RCTL_ELS_REQ: /* extended link services request */ |
| 18539 | case FC_RCTL_ELS_REP: /* extended link services reply */ |
| 18540 | case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ |
| 18541 | case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ |
| 18542 | case FC_RCTL_BA_ABTS: /* basic link service abort */ |
| 18543 | case FC_RCTL_BA_RMC: /* remove connection */ |
| 18544 | case FC_RCTL_BA_ACC: /* basic accept */ |
| 18545 | case FC_RCTL_BA_RJT: /* basic reject */ |
| 18546 | case FC_RCTL_BA_PRMT: |
| 18547 | case FC_RCTL_ACK_1: /* acknowledge_1 */ |
| 18548 | case FC_RCTL_ACK_0: /* acknowledge_0 */ |
| 18549 | case FC_RCTL_P_RJT: /* port reject */ |
| 18550 | case FC_RCTL_F_RJT: /* fabric reject */ |
| 18551 | case FC_RCTL_P_BSY: /* port busy */ |
| 18552 | case FC_RCTL_F_BSY: /* fabric busy to data frame */ |
| 18553 | case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ |
| 18554 | case FC_RCTL_LCR: /* link credit reset */ |
| 18555 | case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ |
| 18556 | case FC_RCTL_END: /* end */ |
| 18557 | break; |
| 18558 | case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ |
| 18559 | fc_vft_hdr = (struct fc_vft_header *)fc_hdr; |
| 18560 | fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; |
| 18561 | return lpfc_fc_frame_check(phba, fc_hdr); |
| 18562 | case FC_RCTL_BA_NOP: /* basic link service NOP */ |
| 18563 | default: |
| 18564 | goto drop; |
| 18565 | } |
| 18566 | |
| 18567 | switch (fc_hdr->fh_type) { |
| 18568 | case FC_TYPE_BLS: |
| 18569 | case FC_TYPE_ELS: |
| 18570 | case FC_TYPE_FCP: |
| 18571 | case FC_TYPE_CT: |
| 18572 | case FC_TYPE_NVME: |
| 18573 | break; |
| 18574 | case FC_TYPE_IP: |
| 18575 | case FC_TYPE_ILS: |
| 18576 | default: |
| 18577 | goto drop; |
| 18578 | } |
| 18579 | |
| 18580 | if (unlikely(phba->link_flag == LS_LOOPBACK_MODE && |
| 18581 | phba->cfg_vmid_app_header)) { |
| 18582 | /* Application header is 16B device header */ |
| 18583 | if (fc_hdr->fh_df_ctl & LPFC_FC_16B_DEVICE_HEADER) { |
| 18584 | fc_app_hdr = (struct fc_app_header *) (fc_hdr + 1); |
| 18585 | if (be32_to_cpu(fc_app_hdr->src_app_id) != |
| 18586 | LOOPBACK_SRC_APPID) { |
| 18587 | lpfc_printf_log(phba, KERN_WARNING, |
| 18588 | LOG_ELS | LOG_LIBDFC, |
| 18589 | "1932 Loopback src app id " |
| 18590 | "not matched, app_id:x%x\n" , |
| 18591 | be32_to_cpu(fc_app_hdr->src_app_id)); |
| 18592 | |
| 18593 | goto drop; |
| 18594 | } |
| 18595 | } else { |
| 18596 | lpfc_printf_log(phba, KERN_WARNING, |
| 18597 | LOG_ELS | LOG_LIBDFC, |
| 18598 | "1933 Loopback df_ctl bit not set, " |
| 18599 | "df_ctl:x%x\n" , |
| 18600 | fc_hdr->fh_df_ctl); |
| 18601 | |
| 18602 | goto drop; |
| 18603 | } |
| 18604 | } |
| 18605 | |
| 18606 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, |
| 18607 | "2538 Received frame rctl:x%x, type:x%x, " |
| 18608 | "frame Data:%08x %08x %08x %08x %08x %08x %08x\n" , |
| 18609 | fc_hdr->fh_r_ctl, fc_hdr->fh_type, |
| 18610 | be32_to_cpu(header[0]), be32_to_cpu(header[1]), |
| 18611 | be32_to_cpu(header[2]), be32_to_cpu(header[3]), |
| 18612 | be32_to_cpu(header[4]), be32_to_cpu(header[5]), |
| 18613 | be32_to_cpu(header[6])); |
| 18614 | return 0; |
| 18615 | drop: |
| 18616 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, |
| 18617 | "2539 Dropped frame rctl:x%x type:x%x\n" , |
| 18618 | fc_hdr->fh_r_ctl, fc_hdr->fh_type); |
| 18619 | return 1; |
| 18620 | } |
| 18621 | |
| 18622 | /** |
| 18623 | * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame |
| 18624 | * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) |
| 18625 | * |
| 18626 | * This function processes the FC header to retrieve the VFI from the VF |
| 18627 | * header, if one exists. This function will return the VFI if one exists |
| 18628 | * or 0 if no VSAN Header exists. |
| 18629 | **/ |
| 18630 | static uint32_t |
| 18631 | lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) |
| 18632 | { |
| 18633 | struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; |
| 18634 | |
| 18635 | if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) |
| 18636 | return 0; |
| 18637 | return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); |
| 18638 | } |
| 18639 | |
| 18640 | /** |
| 18641 | * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to |
| 18642 | * @phba: Pointer to the HBA structure to search for the vport on |
| 18643 | * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) |
| 18644 | * @fcfi: The FC Fabric ID that the frame came from |
| 18645 | * @did: Destination ID to match against |
| 18646 | * |
| 18647 | * This function searches the @phba for a vport that matches the content of the |
| 18648 | * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the |
| 18649 | * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function |
| 18650 | * returns the matching vport pointer or NULL if unable to match frame to a |
| 18651 | * vport. |
| 18652 | **/ |
| 18653 | static struct lpfc_vport * |
| 18654 | lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, |
| 18655 | uint16_t fcfi, uint32_t did) |
| 18656 | { |
| 18657 | struct lpfc_vport **vports; |
| 18658 | struct lpfc_vport *vport = NULL; |
| 18659 | int i; |
| 18660 | |
| 18661 | if (did == Fabric_DID) |
| 18662 | return phba->pport; |
| 18663 | if (test_bit(FC_PT2PT, &phba->pport->fc_flag) && |
| 18664 | phba->link_state != LPFC_HBA_READY) |
| 18665 | return phba->pport; |
| 18666 | |
| 18667 | vports = lpfc_create_vport_work_array(phba); |
| 18668 | if (vports != NULL) { |
| 18669 | for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { |
| 18670 | if (phba->fcf.fcfi == fcfi && |
| 18671 | vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && |
| 18672 | vports[i]->fc_myDID == did) { |
| 18673 | vport = vports[i]; |
| 18674 | break; |
| 18675 | } |
| 18676 | } |
| 18677 | } |
| 18678 | lpfc_destroy_vport_work_array(phba, vports); |
| 18679 | return vport; |
| 18680 | } |
| 18681 | |
| 18682 | /** |
| 18683 | * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp |
| 18684 | * @vport: The vport to work on. |
| 18685 | * |
| 18686 | * This function updates the receive sequence time stamp for this vport. The |
| 18687 | * receive sequence time stamp indicates the time that the last frame of the |
| 18688 | * the sequence that has been idle for the longest amount of time was received. |
| 18689 | * the driver uses this time stamp to indicate if any received sequences have |
| 18690 | * timed out. |
| 18691 | **/ |
| 18692 | static void |
| 18693 | lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) |
| 18694 | { |
| 18695 | struct lpfc_dmabuf *h_buf; |
| 18696 | struct hbq_dmabuf *dmabuf = NULL; |
| 18697 | |
| 18698 | /* get the oldest sequence on the rcv list */ |
| 18699 | h_buf = list_get_first(&vport->rcv_buffer_list, |
| 18700 | struct lpfc_dmabuf, list); |
| 18701 | if (!h_buf) |
| 18702 | return; |
| 18703 | dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); |
| 18704 | vport->rcv_buffer_time_stamp = dmabuf->time_stamp; |
| 18705 | } |
| 18706 | |
| 18707 | /** |
| 18708 | * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. |
| 18709 | * @vport: The vport that the received sequences were sent to. |
| 18710 | * |
| 18711 | * This function cleans up all outstanding received sequences. This is called |
| 18712 | * by the driver when a link event or user action invalidates all the received |
| 18713 | * sequences. |
| 18714 | **/ |
| 18715 | void |
| 18716 | lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) |
| 18717 | { |
| 18718 | struct lpfc_dmabuf *h_buf, *hnext; |
| 18719 | struct lpfc_dmabuf *d_buf, *dnext; |
| 18720 | struct hbq_dmabuf *dmabuf = NULL; |
| 18721 | |
| 18722 | /* start with the oldest sequence on the rcv list */ |
| 18723 | list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { |
| 18724 | dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); |
| 18725 | list_del_init(entry: &dmabuf->hbuf.list); |
| 18726 | list_for_each_entry_safe(d_buf, dnext, |
| 18727 | &dmabuf->dbuf.list, list) { |
| 18728 | list_del_init(entry: &d_buf->list); |
| 18729 | lpfc_in_buf_free(vport->phba, d_buf); |
| 18730 | } |
| 18731 | lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); |
| 18732 | } |
| 18733 | } |
| 18734 | |
| 18735 | /** |
| 18736 | * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. |
| 18737 | * @vport: The vport that the received sequences were sent to. |
| 18738 | * |
| 18739 | * This function determines whether any received sequences have timed out by |
| 18740 | * first checking the vport's rcv_buffer_time_stamp. If this time_stamp |
| 18741 | * indicates that there is at least one timed out sequence this routine will |
| 18742 | * go through the received sequences one at a time from most inactive to most |
| 18743 | * active to determine which ones need to be cleaned up. Once it has determined |
| 18744 | * that a sequence needs to be cleaned up it will simply free up the resources |
| 18745 | * without sending an abort. |
| 18746 | **/ |
| 18747 | void |
| 18748 | lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) |
| 18749 | { |
| 18750 | struct lpfc_dmabuf *h_buf, *hnext; |
| 18751 | struct lpfc_dmabuf *d_buf, *dnext; |
| 18752 | struct hbq_dmabuf *dmabuf = NULL; |
| 18753 | unsigned long timeout; |
| 18754 | int abort_count = 0; |
| 18755 | |
| 18756 | timeout = (msecs_to_jiffies(m: vport->phba->fc_edtov) + |
| 18757 | vport->rcv_buffer_time_stamp); |
| 18758 | if (list_empty(head: &vport->rcv_buffer_list) || |
| 18759 | time_before(jiffies, timeout)) |
| 18760 | return; |
| 18761 | /* start with the oldest sequence on the rcv list */ |
| 18762 | list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { |
| 18763 | dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); |
| 18764 | timeout = (msecs_to_jiffies(m: vport->phba->fc_edtov) + |
| 18765 | dmabuf->time_stamp); |
| 18766 | if (time_before(jiffies, timeout)) |
| 18767 | break; |
| 18768 | abort_count++; |
| 18769 | list_del_init(entry: &dmabuf->hbuf.list); |
| 18770 | list_for_each_entry_safe(d_buf, dnext, |
| 18771 | &dmabuf->dbuf.list, list) { |
| 18772 | list_del_init(entry: &d_buf->list); |
| 18773 | lpfc_in_buf_free(vport->phba, d_buf); |
| 18774 | } |
| 18775 | lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); |
| 18776 | } |
| 18777 | if (abort_count) |
| 18778 | lpfc_update_rcv_time_stamp(vport); |
| 18779 | } |
| 18780 | |
| 18781 | /** |
| 18782 | * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences |
| 18783 | * @vport: pointer to a vitural port |
| 18784 | * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame |
| 18785 | * |
| 18786 | * This function searches through the existing incomplete sequences that have |
| 18787 | * been sent to this @vport. If the frame matches one of the incomplete |
| 18788 | * sequences then the dbuf in the @dmabuf is added to the list of frames that |
| 18789 | * make up that sequence. If no sequence is found that matches this frame then |
| 18790 | * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list |
| 18791 | * This function returns a pointer to the first dmabuf in the sequence list that |
| 18792 | * the frame was linked to. |
| 18793 | **/ |
| 18794 | static struct hbq_dmabuf * |
| 18795 | lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) |
| 18796 | { |
| 18797 | struct fc_frame_header *new_hdr; |
| 18798 | struct fc_frame_header *temp_hdr; |
| 18799 | struct lpfc_dmabuf *d_buf; |
| 18800 | struct lpfc_dmabuf *h_buf; |
| 18801 | struct hbq_dmabuf *seq_dmabuf = NULL; |
| 18802 | struct hbq_dmabuf *temp_dmabuf = NULL; |
| 18803 | uint8_t found = 0; |
| 18804 | |
| 18805 | INIT_LIST_HEAD(list: &dmabuf->dbuf.list); |
| 18806 | dmabuf->time_stamp = jiffies; |
| 18807 | new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; |
| 18808 | |
| 18809 | /* Use the hdr_buf to find the sequence that this frame belongs to */ |
| 18810 | list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { |
| 18811 | temp_hdr = (struct fc_frame_header *)h_buf->virt; |
| 18812 | if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || |
| 18813 | (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || |
| 18814 | (memcmp(p: &temp_hdr->fh_s_id, q: &new_hdr->fh_s_id, size: 3))) |
| 18815 | continue; |
| 18816 | /* found a pending sequence that matches this frame */ |
| 18817 | seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); |
| 18818 | break; |
| 18819 | } |
| 18820 | if (!seq_dmabuf) { |
| 18821 | /* |
| 18822 | * This indicates first frame received for this sequence. |
| 18823 | * Queue the buffer on the vport's rcv_buffer_list. |
| 18824 | */ |
| 18825 | list_add_tail(new: &dmabuf->hbuf.list, head: &vport->rcv_buffer_list); |
| 18826 | lpfc_update_rcv_time_stamp(vport); |
| 18827 | return dmabuf; |
| 18828 | } |
| 18829 | temp_hdr = seq_dmabuf->hbuf.virt; |
| 18830 | if (be16_to_cpu(new_hdr->fh_seq_cnt) < |
| 18831 | be16_to_cpu(temp_hdr->fh_seq_cnt)) { |
| 18832 | list_del_init(entry: &seq_dmabuf->hbuf.list); |
| 18833 | list_add_tail(new: &dmabuf->hbuf.list, head: &vport->rcv_buffer_list); |
| 18834 | list_add_tail(new: &dmabuf->dbuf.list, head: &seq_dmabuf->dbuf.list); |
| 18835 | lpfc_update_rcv_time_stamp(vport); |
| 18836 | return dmabuf; |
| 18837 | } |
| 18838 | /* move this sequence to the tail to indicate a young sequence */ |
| 18839 | list_move_tail(list: &seq_dmabuf->hbuf.list, head: &vport->rcv_buffer_list); |
| 18840 | seq_dmabuf->time_stamp = jiffies; |
| 18841 | lpfc_update_rcv_time_stamp(vport); |
| 18842 | if (list_empty(head: &seq_dmabuf->dbuf.list)) { |
| 18843 | list_add_tail(new: &dmabuf->dbuf.list, head: &seq_dmabuf->dbuf.list); |
| 18844 | return seq_dmabuf; |
| 18845 | } |
| 18846 | /* find the correct place in the sequence to insert this frame */ |
| 18847 | d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); |
| 18848 | while (!found) { |
| 18849 | temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); |
| 18850 | temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; |
| 18851 | /* |
| 18852 | * If the frame's sequence count is greater than the frame on |
| 18853 | * the list then insert the frame right after this frame |
| 18854 | */ |
| 18855 | if (be16_to_cpu(new_hdr->fh_seq_cnt) > |
| 18856 | be16_to_cpu(temp_hdr->fh_seq_cnt)) { |
| 18857 | list_add(new: &dmabuf->dbuf.list, head: &temp_dmabuf->dbuf.list); |
| 18858 | found = 1; |
| 18859 | break; |
| 18860 | } |
| 18861 | |
| 18862 | if (&d_buf->list == &seq_dmabuf->dbuf.list) |
| 18863 | break; |
| 18864 | d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); |
| 18865 | } |
| 18866 | |
| 18867 | if (found) |
| 18868 | return seq_dmabuf; |
| 18869 | return NULL; |
| 18870 | } |
| 18871 | |
| 18872 | /** |
| 18873 | * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence |
| 18874 | * @vport: pointer to a vitural port |
| 18875 | * @dmabuf: pointer to a dmabuf that describes the FC sequence |
| 18876 | * |
| 18877 | * This function tries to abort from the partially assembed sequence, described |
| 18878 | * by the information from basic abbort @dmabuf. It checks to see whether such |
| 18879 | * partially assembled sequence held by the driver. If so, it shall free up all |
| 18880 | * the frames from the partially assembled sequence. |
| 18881 | * |
| 18882 | * Return |
| 18883 | * true -- if there is matching partially assembled sequence present and all |
| 18884 | * the frames freed with the sequence; |
| 18885 | * false -- if there is no matching partially assembled sequence present so |
| 18886 | * nothing got aborted in the lower layer driver |
| 18887 | **/ |
| 18888 | static bool |
| 18889 | lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, |
| 18890 | struct hbq_dmabuf *dmabuf) |
| 18891 | { |
| 18892 | struct fc_frame_header *new_hdr; |
| 18893 | struct fc_frame_header *temp_hdr; |
| 18894 | struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; |
| 18895 | struct hbq_dmabuf *seq_dmabuf = NULL; |
| 18896 | |
| 18897 | /* Use the hdr_buf to find the sequence that matches this frame */ |
| 18898 | INIT_LIST_HEAD(list: &dmabuf->dbuf.list); |
| 18899 | INIT_LIST_HEAD(list: &dmabuf->hbuf.list); |
| 18900 | new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; |
| 18901 | list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { |
| 18902 | temp_hdr = (struct fc_frame_header *)h_buf->virt; |
| 18903 | if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || |
| 18904 | (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || |
| 18905 | (memcmp(p: &temp_hdr->fh_s_id, q: &new_hdr->fh_s_id, size: 3))) |
| 18906 | continue; |
| 18907 | /* found a pending sequence that matches this frame */ |
| 18908 | seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); |
| 18909 | break; |
| 18910 | } |
| 18911 | |
| 18912 | /* Free up all the frames from the partially assembled sequence */ |
| 18913 | if (seq_dmabuf) { |
| 18914 | list_for_each_entry_safe(d_buf, n_buf, |
| 18915 | &seq_dmabuf->dbuf.list, list) { |
| 18916 | list_del_init(entry: &d_buf->list); |
| 18917 | lpfc_in_buf_free(vport->phba, d_buf); |
| 18918 | } |
| 18919 | return true; |
| 18920 | } |
| 18921 | return false; |
| 18922 | } |
| 18923 | |
| 18924 | /** |
| 18925 | * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp |
| 18926 | * @vport: pointer to a vitural port |
| 18927 | * @dmabuf: pointer to a dmabuf that describes the FC sequence |
| 18928 | * |
| 18929 | * This function tries to abort from the assembed sequence from upper level |
| 18930 | * protocol, described by the information from basic abbort @dmabuf. It |
| 18931 | * checks to see whether such pending context exists at upper level protocol. |
| 18932 | * If so, it shall clean up the pending context. |
| 18933 | * |
| 18934 | * Return |
| 18935 | * true -- if there is matching pending context of the sequence cleaned |
| 18936 | * at ulp; |
| 18937 | * false -- if there is no matching pending context of the sequence present |
| 18938 | * at ulp. |
| 18939 | **/ |
| 18940 | static bool |
| 18941 | lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) |
| 18942 | { |
| 18943 | struct lpfc_hba *phba = vport->phba; |
| 18944 | int handled; |
| 18945 | |
| 18946 | /* Accepting abort at ulp with SLI4 only */ |
| 18947 | if (phba->sli_rev < LPFC_SLI_REV4) |
| 18948 | return false; |
| 18949 | |
| 18950 | /* Register all caring upper level protocols to attend abort */ |
| 18951 | handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); |
| 18952 | if (handled) |
| 18953 | return true; |
| 18954 | |
| 18955 | return false; |
| 18956 | } |
| 18957 | |
| 18958 | /** |
| 18959 | * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler |
| 18960 | * @phba: Pointer to HBA context object. |
| 18961 | * @cmd_iocbq: pointer to the command iocbq structure. |
| 18962 | * @rsp_iocbq: pointer to the response iocbq structure. |
| 18963 | * |
| 18964 | * This function handles the sequence abort response iocb command complete |
| 18965 | * event. It properly releases the memory allocated to the sequence abort |
| 18966 | * accept iocb. |
| 18967 | **/ |
| 18968 | static void |
| 18969 | lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, |
| 18970 | struct lpfc_iocbq *cmd_iocbq, |
| 18971 | struct lpfc_iocbq *rsp_iocbq) |
| 18972 | { |
| 18973 | if (cmd_iocbq) { |
| 18974 | lpfc_nlp_put(cmd_iocbq->ndlp); |
| 18975 | lpfc_sli_release_iocbq(phba, iocbq: cmd_iocbq); |
| 18976 | } |
| 18977 | |
| 18978 | /* Failure means BLS ABORT RSP did not get delivered to remote node*/ |
| 18979 | if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) |
| 18980 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 18981 | "3154 BLS ABORT RSP failed, data: x%x/x%x\n" , |
| 18982 | get_job_ulpstatus(phba, rsp_iocbq), |
| 18983 | get_job_word4(phba, rsp_iocbq)); |
| 18984 | } |
| 18985 | |
| 18986 | /** |
| 18987 | * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. |
| 18988 | * @phba: Pointer to HBA context object. |
| 18989 | * @xri: xri id in transaction. |
| 18990 | * |
| 18991 | * This function validates the xri maps to the known range of XRIs allocated an |
| 18992 | * used by the driver. |
| 18993 | **/ |
| 18994 | uint16_t |
| 18995 | lpfc_sli4_xri_inrange(struct lpfc_hba *phba, |
| 18996 | uint16_t xri) |
| 18997 | { |
| 18998 | uint16_t i; |
| 18999 | |
| 19000 | for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { |
| 19001 | if (xri == phba->sli4_hba.xri_ids[i]) |
| 19002 | return i; |
| 19003 | } |
| 19004 | return NO_XRI; |
| 19005 | } |
| 19006 | |
| 19007 | /** |
| 19008 | * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort |
| 19009 | * @vport: pointer to a virtual port. |
| 19010 | * @fc_hdr: pointer to a FC frame header. |
| 19011 | * @aborted: was the partially assembled receive sequence successfully aborted |
| 19012 | * |
| 19013 | * This function sends a basic response to a previous unsol sequence abort |
| 19014 | * event after aborting the sequence handling. |
| 19015 | **/ |
| 19016 | void |
| 19017 | lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, |
| 19018 | struct fc_frame_header *fc_hdr, bool aborted) |
| 19019 | { |
| 19020 | struct lpfc_hba *phba = vport->phba; |
| 19021 | struct lpfc_iocbq *ctiocb = NULL; |
| 19022 | struct lpfc_nodelist *ndlp; |
| 19023 | uint16_t oxid, rxid, xri, lxri; |
| 19024 | uint32_t sid, fctl; |
| 19025 | union lpfc_wqe128 *icmd; |
| 19026 | int rc; |
| 19027 | |
| 19028 | if (!lpfc_is_link_up(phba)) |
| 19029 | return; |
| 19030 | |
| 19031 | sid = sli4_sid_from_fc_hdr(fc_hdr); |
| 19032 | oxid = be16_to_cpu(fc_hdr->fh_ox_id); |
| 19033 | rxid = be16_to_cpu(fc_hdr->fh_rx_id); |
| 19034 | |
| 19035 | ndlp = lpfc_findnode_did(vport, sid); |
| 19036 | if (!ndlp) { |
| 19037 | ndlp = lpfc_nlp_init(vport, did: sid); |
| 19038 | if (!ndlp) { |
| 19039 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, |
| 19040 | "1268 Failed to allocate ndlp for " |
| 19041 | "oxid:x%x SID:x%x\n" , oxid, sid); |
| 19042 | return; |
| 19043 | } |
| 19044 | /* Put ndlp onto vport node list */ |
| 19045 | lpfc_enqueue_node(vport, ndlp); |
| 19046 | } |
| 19047 | |
| 19048 | /* Allocate buffer for rsp iocb */ |
| 19049 | ctiocb = lpfc_sli_get_iocbq(phba); |
| 19050 | if (!ctiocb) |
| 19051 | return; |
| 19052 | |
| 19053 | icmd = &ctiocb->wqe; |
| 19054 | |
| 19055 | /* Extract the F_CTL field from FC_HDR */ |
| 19056 | fctl = sli4_fctl_from_fc_hdr(fc_hdr); |
| 19057 | |
| 19058 | ctiocb->ndlp = lpfc_nlp_get(ndlp); |
| 19059 | if (!ctiocb->ndlp) { |
| 19060 | lpfc_sli_release_iocbq(phba, iocbq: ctiocb); |
| 19061 | return; |
| 19062 | } |
| 19063 | |
| 19064 | ctiocb->vport = vport; |
| 19065 | ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; |
| 19066 | ctiocb->sli4_lxritag = NO_XRI; |
| 19067 | ctiocb->sli4_xritag = NO_XRI; |
| 19068 | ctiocb->abort_rctl = FC_RCTL_BA_ACC; |
| 19069 | |
| 19070 | if (fctl & FC_FC_EX_CTX) |
| 19071 | /* Exchange responder sent the abort so we |
| 19072 | * own the oxid. |
| 19073 | */ |
| 19074 | xri = oxid; |
| 19075 | else |
| 19076 | xri = rxid; |
| 19077 | lxri = lpfc_sli4_xri_inrange(phba, xri); |
| 19078 | if (lxri != NO_XRI) |
| 19079 | lpfc_set_rrq_active(phba, ndlp, xritag: lxri, |
| 19080 | rxid: (xri == oxid) ? rxid : oxid, send_rrq: 0); |
| 19081 | /* For BA_ABTS from exchange responder, if the logical xri with |
| 19082 | * the oxid maps to the FCP XRI range, the port no longer has |
| 19083 | * that exchange context, send a BLS_RJT. Override the IOCB for |
| 19084 | * a BA_RJT. |
| 19085 | */ |
| 19086 | if ((fctl & FC_FC_EX_CTX) && |
| 19087 | (lxri > lpfc_sli4_get_iocb_cnt(phba))) { |
| 19088 | ctiocb->abort_rctl = FC_RCTL_BA_RJT; |
| 19089 | bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0); |
| 19090 | bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp, |
| 19091 | FC_BA_RJT_INV_XID); |
| 19092 | bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp, |
| 19093 | FC_BA_RJT_UNABLE); |
| 19094 | } |
| 19095 | |
| 19096 | /* If BA_ABTS failed to abort a partially assembled receive sequence, |
| 19097 | * the driver no longer has that exchange, send a BLS_RJT. Override |
| 19098 | * the IOCB for a BA_RJT. |
| 19099 | */ |
| 19100 | if (aborted == false) { |
| 19101 | ctiocb->abort_rctl = FC_RCTL_BA_RJT; |
| 19102 | bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0); |
| 19103 | bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp, |
| 19104 | FC_BA_RJT_INV_XID); |
| 19105 | bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp, |
| 19106 | FC_BA_RJT_UNABLE); |
| 19107 | } |
| 19108 | |
| 19109 | if (fctl & FC_FC_EX_CTX) { |
| 19110 | /* ABTS sent by responder to CT exchange, construction |
| 19111 | * of BA_ACC will use OX_ID from ABTS for the XRI_TAG |
| 19112 | * field and RX_ID from ABTS for RX_ID field. |
| 19113 | */ |
| 19114 | ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP; |
| 19115 | bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid); |
| 19116 | } else { |
| 19117 | /* ABTS sent by initiator to CT exchange, construction |
| 19118 | * of BA_ACC will need to allocate a new XRI as for the |
| 19119 | * XRI_TAG field. |
| 19120 | */ |
| 19121 | ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT; |
| 19122 | } |
| 19123 | |
| 19124 | /* OX_ID is invariable to who sent ABTS to CT exchange */ |
| 19125 | bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid); |
| 19126 | bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid); |
| 19127 | |
| 19128 | /* Use CT=VPI */ |
| 19129 | bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest, |
| 19130 | ndlp->nlp_DID); |
| 19131 | bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp, |
| 19132 | phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); |
| 19133 | bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX); |
| 19134 | |
| 19135 | /* Xmit CT abts response on exchange <xid> */ |
| 19136 | lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, |
| 19137 | "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n" , |
| 19138 | ctiocb->abort_rctl, oxid, phba->link_state); |
| 19139 | |
| 19140 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, piocb: ctiocb, flag: 0); |
| 19141 | if (rc == IOCB_ERROR) { |
| 19142 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
| 19143 | "2925 Failed to issue CT ABTS RSP x%x on " |
| 19144 | "xri x%x, Data x%x\n" , |
| 19145 | ctiocb->abort_rctl, oxid, |
| 19146 | phba->link_state); |
| 19147 | lpfc_nlp_put(ndlp); |
| 19148 | ctiocb->ndlp = NULL; |
| 19149 | lpfc_sli_release_iocbq(phba, iocbq: ctiocb); |
| 19150 | } |
| 19151 | |
| 19152 | /* if only usage of this nodelist is BLS response, release initial ref |
| 19153 | * to free ndlp when transmit completes |
| 19154 | */ |
| 19155 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE && |
| 19156 | !test_bit(NLP_DROPPED, &ndlp->nlp_flag) && |
| 19157 | !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) { |
| 19158 | set_bit(nr: NLP_DROPPED, addr: &ndlp->nlp_flag); |
| 19159 | lpfc_nlp_put(ndlp); |
| 19160 | } |
| 19161 | } |
| 19162 | |
| 19163 | /** |
| 19164 | * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event |
| 19165 | * @vport: Pointer to the vport on which this sequence was received |
| 19166 | * @dmabuf: pointer to a dmabuf that describes the FC sequence |
| 19167 | * |
| 19168 | * This function handles an SLI-4 unsolicited abort event. If the unsolicited |
| 19169 | * receive sequence is only partially assembed by the driver, it shall abort |
| 19170 | * the partially assembled frames for the sequence. Otherwise, if the |
| 19171 | * unsolicited receive sequence has been completely assembled and passed to |
| 19172 | * the Upper Layer Protocol (ULP), it then mark the per oxid status for the |
| 19173 | * unsolicited sequence has been aborted. After that, it will issue a basic |
| 19174 | * accept to accept the abort. |
| 19175 | **/ |
| 19176 | static void |
| 19177 | lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, |
| 19178 | struct hbq_dmabuf *dmabuf) |
| 19179 | { |
| 19180 | struct lpfc_hba *phba = vport->phba; |
| 19181 | struct fc_frame_header fc_hdr; |
| 19182 | uint32_t fctl; |
| 19183 | bool aborted; |
| 19184 | |
| 19185 | /* Make a copy of fc_hdr before the dmabuf being released */ |
| 19186 | memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); |
| 19187 | fctl = sli4_fctl_from_fc_hdr(&fc_hdr); |
| 19188 | |
| 19189 | if (fctl & FC_FC_EX_CTX) { |
| 19190 | /* ABTS by responder to exchange, no cleanup needed */ |
| 19191 | aborted = true; |
| 19192 | } else { |
| 19193 | /* ABTS by initiator to exchange, need to do cleanup */ |
| 19194 | aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); |
| 19195 | if (aborted == false) |
| 19196 | aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); |
| 19197 | } |
| 19198 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
| 19199 | |
| 19200 | if (phba->nvmet_support) { |
| 19201 | lpfc_nvmet_rcv_unsol_abort(vport, fc_hdr: &fc_hdr); |
| 19202 | return; |
| 19203 | } |
| 19204 | |
| 19205 | /* Respond with BA_ACC or BA_RJT accordingly */ |
| 19206 | lpfc_sli4_seq_abort_rsp(vport, fc_hdr: &fc_hdr, aborted); |
| 19207 | } |
| 19208 | |
| 19209 | /** |
| 19210 | * lpfc_seq_complete - Indicates if a sequence is complete |
| 19211 | * @dmabuf: pointer to a dmabuf that describes the FC sequence |
| 19212 | * |
| 19213 | * This function checks the sequence, starting with the frame described by |
| 19214 | * @dmabuf, to see if all the frames associated with this sequence are present. |
| 19215 | * the frames associated with this sequence are linked to the @dmabuf using the |
| 19216 | * dbuf list. This function looks for two major things. 1) That the first frame |
| 19217 | * has a sequence count of zero. 2) There is a frame with last frame of sequence |
| 19218 | * set. 3) That there are no holes in the sequence count. The function will |
| 19219 | * return 1 when the sequence is complete, otherwise it will return 0. |
| 19220 | **/ |
| 19221 | static int |
| 19222 | lpfc_seq_complete(struct hbq_dmabuf *dmabuf) |
| 19223 | { |
| 19224 | struct fc_frame_header *hdr; |
| 19225 | struct lpfc_dmabuf *d_buf; |
| 19226 | struct hbq_dmabuf *seq_dmabuf; |
| 19227 | uint32_t fctl; |
| 19228 | int seq_count = 0; |
| 19229 | |
| 19230 | hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; |
| 19231 | /* make sure first fame of sequence has a sequence count of zero */ |
| 19232 | if (hdr->fh_seq_cnt != seq_count) |
| 19233 | return 0; |
| 19234 | fctl = (hdr->fh_f_ctl[0] << 16 | |
| 19235 | hdr->fh_f_ctl[1] << 8 | |
| 19236 | hdr->fh_f_ctl[2]); |
| 19237 | /* If last frame of sequence we can return success. */ |
| 19238 | if (fctl & FC_FC_END_SEQ) |
| 19239 | return 1; |
| 19240 | list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { |
| 19241 | seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); |
| 19242 | hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; |
| 19243 | /* If there is a hole in the sequence count then fail. */ |
| 19244 | if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) |
| 19245 | return 0; |
| 19246 | fctl = (hdr->fh_f_ctl[0] << 16 | |
| 19247 | hdr->fh_f_ctl[1] << 8 | |
| 19248 | hdr->fh_f_ctl[2]); |
| 19249 | /* If last frame of sequence we can return success. */ |
| 19250 | if (fctl & FC_FC_END_SEQ) |
| 19251 | return 1; |
| 19252 | } |
| 19253 | return 0; |
| 19254 | } |
| 19255 | |
| 19256 | /** |
| 19257 | * lpfc_prep_seq - Prep sequence for ULP processing |
| 19258 | * @vport: Pointer to the vport on which this sequence was received |
| 19259 | * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence |
| 19260 | * |
| 19261 | * This function takes a sequence, described by a list of frames, and creates |
| 19262 | * a list of iocbq structures to describe the sequence. This iocbq list will be |
| 19263 | * used to issue to the generic unsolicited sequence handler. This routine |
| 19264 | * returns a pointer to the first iocbq in the list. If the function is unable |
| 19265 | * to allocate an iocbq then it throw out the received frames that were not |
| 19266 | * able to be described and return a pointer to the first iocbq. If unable to |
| 19267 | * allocate any iocbqs (including the first) this function will return NULL. |
| 19268 | **/ |
| 19269 | static struct lpfc_iocbq * |
| 19270 | lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) |
| 19271 | { |
| 19272 | struct hbq_dmabuf *hbq_buf; |
| 19273 | struct lpfc_dmabuf *d_buf, *n_buf; |
| 19274 | struct lpfc_iocbq *first_iocbq, *iocbq; |
| 19275 | struct fc_frame_header *fc_hdr; |
| 19276 | uint32_t sid; |
| 19277 | uint32_t len, tot_len; |
| 19278 | |
| 19279 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; |
| 19280 | /* remove from receive buffer list */ |
| 19281 | list_del_init(entry: &seq_dmabuf->hbuf.list); |
| 19282 | lpfc_update_rcv_time_stamp(vport); |
| 19283 | /* get the Remote Port's SID */ |
| 19284 | sid = sli4_sid_from_fc_hdr(fc_hdr); |
| 19285 | tot_len = 0; |
| 19286 | /* Get an iocbq struct to fill in. */ |
| 19287 | first_iocbq = lpfc_sli_get_iocbq(phba: vport->phba); |
| 19288 | if (first_iocbq) { |
| 19289 | /* Initialize the first IOCB. */ |
| 19290 | first_iocbq->wcqe_cmpl.total_data_placed = 0; |
| 19291 | bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl, |
| 19292 | IOSTAT_SUCCESS); |
| 19293 | first_iocbq->vport = vport; |
| 19294 | |
| 19295 | /* Check FC Header to see what TYPE of frame we are rcv'ing */ |
| 19296 | if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { |
| 19297 | bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp, |
| 19298 | sli4_did_from_fc_hdr(fc_hdr)); |
| 19299 | } |
| 19300 | |
| 19301 | bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com, |
| 19302 | NO_XRI); |
| 19303 | bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com, |
| 19304 | be16_to_cpu(fc_hdr->fh_ox_id)); |
| 19305 | |
| 19306 | /* put the first buffer into the first iocb */ |
| 19307 | tot_len = bf_get(lpfc_rcqe_length, |
| 19308 | &seq_dmabuf->cq_event.cqe.rcqe_cmpl); |
| 19309 | |
| 19310 | first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf; |
| 19311 | first_iocbq->bpl_dmabuf = NULL; |
| 19312 | /* Keep track of the BDE count */ |
| 19313 | first_iocbq->wcqe_cmpl.word3 = 1; |
| 19314 | |
| 19315 | if (tot_len > LPFC_DATA_BUF_SIZE) |
| 19316 | first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = |
| 19317 | LPFC_DATA_BUF_SIZE; |
| 19318 | else |
| 19319 | first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len; |
| 19320 | |
| 19321 | first_iocbq->wcqe_cmpl.total_data_placed = tot_len; |
| 19322 | bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest, |
| 19323 | sid); |
| 19324 | } |
| 19325 | iocbq = first_iocbq; |
| 19326 | /* |
| 19327 | * Each IOCBq can have two Buffers assigned, so go through the list |
| 19328 | * of buffers for this sequence and save two buffers in each IOCBq |
| 19329 | */ |
| 19330 | list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { |
| 19331 | if (!iocbq) { |
| 19332 | lpfc_in_buf_free(vport->phba, d_buf); |
| 19333 | continue; |
| 19334 | } |
| 19335 | if (!iocbq->bpl_dmabuf) { |
| 19336 | iocbq->bpl_dmabuf = d_buf; |
| 19337 | iocbq->wcqe_cmpl.word3++; |
| 19338 | /* We need to get the size out of the right CQE */ |
| 19339 | hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); |
| 19340 | len = bf_get(lpfc_rcqe_length, |
| 19341 | &hbq_buf->cq_event.cqe.rcqe_cmpl); |
| 19342 | iocbq->unsol_rcv_len = len; |
| 19343 | iocbq->wcqe_cmpl.total_data_placed += len; |
| 19344 | tot_len += len; |
| 19345 | } else { |
| 19346 | iocbq = lpfc_sli_get_iocbq(phba: vport->phba); |
| 19347 | if (!iocbq) { |
| 19348 | if (first_iocbq) { |
| 19349 | bf_set(lpfc_wcqe_c_status, |
| 19350 | &first_iocbq->wcqe_cmpl, |
| 19351 | IOSTAT_SUCCESS); |
| 19352 | first_iocbq->wcqe_cmpl.parameter = |
| 19353 | IOERR_NO_RESOURCES; |
| 19354 | } |
| 19355 | lpfc_in_buf_free(vport->phba, d_buf); |
| 19356 | continue; |
| 19357 | } |
| 19358 | /* We need to get the size out of the right CQE */ |
| 19359 | hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); |
| 19360 | len = bf_get(lpfc_rcqe_length, |
| 19361 | &hbq_buf->cq_event.cqe.rcqe_cmpl); |
| 19362 | iocbq->cmd_dmabuf = d_buf; |
| 19363 | iocbq->bpl_dmabuf = NULL; |
| 19364 | iocbq->wcqe_cmpl.word3 = 1; |
| 19365 | |
| 19366 | if (len > LPFC_DATA_BUF_SIZE) |
| 19367 | iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize = |
| 19368 | LPFC_DATA_BUF_SIZE; |
| 19369 | else |
| 19370 | iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize = |
| 19371 | len; |
| 19372 | |
| 19373 | tot_len += len; |
| 19374 | iocbq->wcqe_cmpl.total_data_placed = tot_len; |
| 19375 | bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest, |
| 19376 | sid); |
| 19377 | list_add_tail(new: &iocbq->list, head: &first_iocbq->list); |
| 19378 | } |
| 19379 | } |
| 19380 | /* Free the sequence's header buffer */ |
| 19381 | if (!first_iocbq) |
| 19382 | lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf); |
| 19383 | |
| 19384 | return first_iocbq; |
| 19385 | } |
| 19386 | |
| 19387 | static void |
| 19388 | lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, |
| 19389 | struct hbq_dmabuf *seq_dmabuf) |
| 19390 | { |
| 19391 | struct fc_frame_header *fc_hdr; |
| 19392 | struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; |
| 19393 | struct lpfc_hba *phba = vport->phba; |
| 19394 | |
| 19395 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; |
| 19396 | iocbq = lpfc_prep_seq(vport, seq_dmabuf); |
| 19397 | if (!iocbq) { |
| 19398 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 19399 | "2707 Ring %d handler: Failed to allocate " |
| 19400 | "iocb Rctl x%x Type x%x received\n" , |
| 19401 | LPFC_ELS_RING, |
| 19402 | fc_hdr->fh_r_ctl, fc_hdr->fh_type); |
| 19403 | return; |
| 19404 | } |
| 19405 | if (!lpfc_complete_unsol_iocb(phba, |
| 19406 | pring: phba->sli4_hba.els_wq->pring, |
| 19407 | saveq: iocbq, fch_r_ctl: fc_hdr->fh_r_ctl, |
| 19408 | fch_type: fc_hdr->fh_type)) { |
| 19409 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 19410 | "2540 Ring %d handler: unexpected Rctl " |
| 19411 | "x%x Type x%x received\n" , |
| 19412 | LPFC_ELS_RING, |
| 19413 | fc_hdr->fh_r_ctl, fc_hdr->fh_type); |
| 19414 | lpfc_in_buf_free(phba, &seq_dmabuf->dbuf); |
| 19415 | } |
| 19416 | |
| 19417 | /* Free iocb created in lpfc_prep_seq */ |
| 19418 | list_for_each_entry_safe(curr_iocb, next_iocb, |
| 19419 | &iocbq->list, list) { |
| 19420 | list_del_init(entry: &curr_iocb->list); |
| 19421 | lpfc_sli_release_iocbq(phba, iocbq: curr_iocb); |
| 19422 | } |
| 19423 | lpfc_sli_release_iocbq(phba, iocbq); |
| 19424 | } |
| 19425 | |
| 19426 | static void |
| 19427 | lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
| 19428 | struct lpfc_iocbq *rspiocb) |
| 19429 | { |
| 19430 | struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; |
| 19431 | |
| 19432 | if (pcmd && pcmd->virt) |
| 19433 | dma_pool_free(pool: phba->lpfc_drb_pool, vaddr: pcmd->virt, addr: pcmd->phys); |
| 19434 | kfree(objp: pcmd); |
| 19435 | lpfc_sli_release_iocbq(phba, iocbq: cmdiocb); |
| 19436 | lpfc_drain_txq(phba); |
| 19437 | } |
| 19438 | |
| 19439 | static void |
| 19440 | lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, |
| 19441 | struct hbq_dmabuf *dmabuf) |
| 19442 | { |
| 19443 | struct fc_frame_header *fc_hdr; |
| 19444 | struct lpfc_hba *phba = vport->phba; |
| 19445 | struct lpfc_iocbq *iocbq = NULL; |
| 19446 | union lpfc_wqe128 *pwqe; |
| 19447 | struct lpfc_dmabuf *pcmd = NULL; |
| 19448 | uint32_t frame_len; |
| 19449 | int rc; |
| 19450 | unsigned long iflags; |
| 19451 | |
| 19452 | fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; |
| 19453 | frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); |
| 19454 | |
| 19455 | /* Send the received frame back */ |
| 19456 | iocbq = lpfc_sli_get_iocbq(phba); |
| 19457 | if (!iocbq) { |
| 19458 | /* Queue cq event and wakeup worker thread to process it */ |
| 19459 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 19460 | list_add_tail(new: &dmabuf->cq_event.list, |
| 19461 | head: &phba->sli4_hba.sp_queue_event); |
| 19462 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflags); |
| 19463 | set_bit(nr: HBA_SP_QUEUE_EVT, addr: &phba->hba_flag); |
| 19464 | lpfc_worker_wake_up(phba); |
| 19465 | return; |
| 19466 | } |
| 19467 | |
| 19468 | /* Allocate buffer for command payload */ |
| 19469 | pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
| 19470 | if (pcmd) |
| 19471 | pcmd->virt = dma_pool_alloc(pool: phba->lpfc_drb_pool, GFP_KERNEL, |
| 19472 | handle: &pcmd->phys); |
| 19473 | if (!pcmd || !pcmd->virt) |
| 19474 | goto exit; |
| 19475 | |
| 19476 | INIT_LIST_HEAD(list: &pcmd->list); |
| 19477 | |
| 19478 | /* copyin the payload */ |
| 19479 | memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); |
| 19480 | |
| 19481 | iocbq->cmd_dmabuf = pcmd; |
| 19482 | iocbq->vport = vport; |
| 19483 | iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; |
| 19484 | iocbq->cmd_flag |= LPFC_USE_FCPWQIDX; |
| 19485 | iocbq->num_bdes = 0; |
| 19486 | |
| 19487 | pwqe = &iocbq->wqe; |
| 19488 | /* fill in BDE's for command */ |
| 19489 | pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys); |
| 19490 | pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys); |
| 19491 | pwqe->gen_req.bde.tus.f.bdeSize = frame_len; |
| 19492 | pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
| 19493 | |
| 19494 | pwqe->send_frame.frame_len = frame_len; |
| 19495 | pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr)); |
| 19496 | pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1)); |
| 19497 | pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2)); |
| 19498 | pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3)); |
| 19499 | pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4)); |
| 19500 | pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5)); |
| 19501 | |
| 19502 | pwqe->generic.wqe_com.word7 = 0; |
| 19503 | pwqe->generic.wqe_com.word10 = 0; |
| 19504 | |
| 19505 | bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME); |
| 19506 | bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */ |
| 19507 | bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */ |
| 19508 | bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1); |
| 19509 | bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1); |
| 19510 | bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1); |
| 19511 | bf_set(wqe_xc, &pwqe->generic.wqe_com, 1); |
| 19512 | bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA); |
| 19513 | bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
| 19514 | bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag); |
| 19515 | bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag); |
| 19516 | bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3); |
| 19517 | pwqe->generic.wqe_com.abort_tag = iocbq->iotag; |
| 19518 | |
| 19519 | iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl; |
| 19520 | |
| 19521 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, piocb: iocbq, flag: 0); |
| 19522 | if (rc == IOCB_ERROR) |
| 19523 | goto exit; |
| 19524 | |
| 19525 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
| 19526 | return; |
| 19527 | |
| 19528 | exit: |
| 19529 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
| 19530 | "2023 Unable to process MDS loopback frame\n" ); |
| 19531 | if (pcmd && pcmd->virt) |
| 19532 | dma_pool_free(pool: phba->lpfc_drb_pool, vaddr: pcmd->virt, addr: pcmd->phys); |
| 19533 | kfree(objp: pcmd); |
| 19534 | if (iocbq) |
| 19535 | lpfc_sli_release_iocbq(phba, iocbq); |
| 19536 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
| 19537 | } |
| 19538 | |
| 19539 | /** |
| 19540 | * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware |
| 19541 | * @phba: Pointer to HBA context object. |
| 19542 | * @dmabuf: Pointer to a dmabuf that describes the FC sequence. |
| 19543 | * |
| 19544 | * This function is called with no lock held. This function processes all |
| 19545 | * the received buffers and gives it to upper layers when a received buffer |
| 19546 | * indicates that it is the final frame in the sequence. The interrupt |
| 19547 | * service routine processes received buffers at interrupt contexts. |
| 19548 | * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the |
| 19549 | * appropriate receive function when the final frame in a sequence is received. |
| 19550 | **/ |
| 19551 | void |
| 19552 | lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, |
| 19553 | struct hbq_dmabuf *dmabuf) |
| 19554 | { |
| 19555 | struct hbq_dmabuf *seq_dmabuf; |
| 19556 | struct fc_frame_header *fc_hdr; |
| 19557 | struct lpfc_vport *vport; |
| 19558 | uint32_t fcfi; |
| 19559 | uint32_t did; |
| 19560 | |
| 19561 | /* Process each received buffer */ |
| 19562 | fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; |
| 19563 | |
| 19564 | if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || |
| 19565 | fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { |
| 19566 | vport = phba->pport; |
| 19567 | /* Handle MDS Loopback frames */ |
| 19568 | if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) |
| 19569 | lpfc_sli4_handle_mds_loopback(vport, dmabuf); |
| 19570 | else |
| 19571 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
| 19572 | return; |
| 19573 | } |
| 19574 | |
| 19575 | /* check to see if this a valid type of frame */ |
| 19576 | if (lpfc_fc_frame_check(phba, fc_hdr)) { |
| 19577 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
| 19578 | return; |
| 19579 | } |
| 19580 | |
| 19581 | if ((bf_get(lpfc_cqe_code, |
| 19582 | &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) |
| 19583 | fcfi = bf_get(lpfc_rcqe_fcf_id_v1, |
| 19584 | &dmabuf->cq_event.cqe.rcqe_cmpl); |
| 19585 | else |
| 19586 | fcfi = bf_get(lpfc_rcqe_fcf_id, |
| 19587 | &dmabuf->cq_event.cqe.rcqe_cmpl); |
| 19588 | |
| 19589 | if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { |
| 19590 | vport = phba->pport; |
| 19591 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 19592 | "2023 MDS Loopback %d bytes\n" , |
| 19593 | bf_get(lpfc_rcqe_length, |
| 19594 | &dmabuf->cq_event.cqe.rcqe_cmpl)); |
| 19595 | /* Handle MDS Loopback frames */ |
| 19596 | lpfc_sli4_handle_mds_loopback(vport, dmabuf); |
| 19597 | return; |
| 19598 | } |
| 19599 | |
| 19600 | /* d_id this frame is directed to */ |
| 19601 | did = sli4_did_from_fc_hdr(fc_hdr); |
| 19602 | |
| 19603 | vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); |
| 19604 | if (!vport) { |
| 19605 | /* throw out the frame */ |
| 19606 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
| 19607 | return; |
| 19608 | } |
| 19609 | |
| 19610 | /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ |
| 19611 | if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && |
| 19612 | (did != Fabric_DID)) { |
| 19613 | /* |
| 19614 | * Throw out the frame if we are not pt2pt. |
| 19615 | * The pt2pt protocol allows for discovery frames |
| 19616 | * to be received without a registered VPI. |
| 19617 | */ |
| 19618 | if (!test_bit(FC_PT2PT, &vport->fc_flag) || |
| 19619 | phba->link_state == LPFC_HBA_READY) { |
| 19620 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
| 19621 | return; |
| 19622 | } |
| 19623 | } |
| 19624 | |
| 19625 | /* Handle the basic abort sequence (BA_ABTS) event */ |
| 19626 | if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { |
| 19627 | lpfc_sli4_handle_unsol_abort(vport, dmabuf); |
| 19628 | return; |
| 19629 | } |
| 19630 | |
| 19631 | /* Link this frame */ |
| 19632 | seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); |
| 19633 | if (!seq_dmabuf) { |
| 19634 | /* unable to add frame to vport - throw it out */ |
| 19635 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
| 19636 | return; |
| 19637 | } |
| 19638 | /* If not last frame in sequence continue processing frames. */ |
| 19639 | if (!lpfc_seq_complete(dmabuf: seq_dmabuf)) |
| 19640 | return; |
| 19641 | |
| 19642 | /* Send the complete sequence to the upper layer protocol */ |
| 19643 | lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); |
| 19644 | } |
| 19645 | |
| 19646 | /** |
| 19647 | * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port |
| 19648 | * @phba: pointer to lpfc hba data structure. |
| 19649 | * |
| 19650 | * This routine is invoked to post rpi header templates to the |
| 19651 | * HBA consistent with the SLI-4 interface spec. This routine |
| 19652 | * posts a SLI4_PAGE_SIZE memory region to the port to hold up to |
| 19653 | * SLI4_PAGE_SIZE modulo 64 rpi context headers. |
| 19654 | * |
| 19655 | * This routine does not require any locks. It's usage is expected |
| 19656 | * to be driver load or reset recovery when the driver is |
| 19657 | * sequential. |
| 19658 | * |
| 19659 | * Return codes |
| 19660 | * 0 - successful |
| 19661 | * -EIO - The mailbox failed to complete successfully. |
| 19662 | * When this error occurs, the driver is not guaranteed |
| 19663 | * to have any rpi regions posted to the device and |
| 19664 | * must either attempt to repost the regions or take a |
| 19665 | * fatal error. |
| 19666 | **/ |
| 19667 | int |
| 19668 | lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) |
| 19669 | { |
| 19670 | struct lpfc_rpi_hdr *rpi_page; |
| 19671 | uint32_t rc = 0; |
| 19672 | uint16_t lrpi = 0; |
| 19673 | |
| 19674 | /* SLI4 ports that support extents do not require RPI headers. */ |
| 19675 | if (!phba->sli4_hba.rpi_hdrs_in_use) |
| 19676 | goto exit; |
| 19677 | if (phba->sli4_hba.extents_in_use) |
| 19678 | return -EIO; |
| 19679 | |
| 19680 | list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { |
| 19681 | /* |
| 19682 | * Assign the rpi headers a physical rpi only if the driver |
| 19683 | * has not initialized those resources. A port reset only |
| 19684 | * needs the headers posted. |
| 19685 | */ |
| 19686 | if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != |
| 19687 | LPFC_RPI_RSRC_RDY) |
| 19688 | rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; |
| 19689 | |
| 19690 | rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); |
| 19691 | if (rc != MBX_SUCCESS) { |
| 19692 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 19693 | "2008 Error %d posting all rpi " |
| 19694 | "headers\n" , rc); |
| 19695 | rc = -EIO; |
| 19696 | break; |
| 19697 | } |
| 19698 | } |
| 19699 | |
| 19700 | exit: |
| 19701 | bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, |
| 19702 | LPFC_RPI_RSRC_RDY); |
| 19703 | return rc; |
| 19704 | } |
| 19705 | |
| 19706 | /** |
| 19707 | * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port |
| 19708 | * @phba: pointer to lpfc hba data structure. |
| 19709 | * @rpi_page: pointer to the rpi memory region. |
| 19710 | * |
| 19711 | * This routine is invoked to post a single rpi header to the |
| 19712 | * HBA consistent with the SLI-4 interface spec. This memory region |
| 19713 | * maps up to 64 rpi context regions. |
| 19714 | * |
| 19715 | * Return codes |
| 19716 | * 0 - successful |
| 19717 | * -ENOMEM - No available memory |
| 19718 | * -EIO - The mailbox failed to complete successfully. |
| 19719 | **/ |
| 19720 | int |
| 19721 | lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) |
| 19722 | { |
| 19723 | LPFC_MBOXQ_t *mboxq; |
| 19724 | struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; |
| 19725 | uint32_t rc = 0; |
| 19726 | uint32_t shdr_status, shdr_add_status; |
| 19727 | union lpfc_sli4_cfg_shdr *shdr; |
| 19728 | |
| 19729 | /* SLI4 ports that support extents do not require RPI headers. */ |
| 19730 | if (!phba->sli4_hba.rpi_hdrs_in_use) |
| 19731 | return rc; |
| 19732 | if (phba->sli4_hba.extents_in_use) |
| 19733 | return -EIO; |
| 19734 | |
| 19735 | /* The port is notified of the header region via a mailbox command. */ |
| 19736 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 19737 | if (!mboxq) { |
| 19738 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 19739 | "2001 Unable to allocate memory for issuing " |
| 19740 | "SLI_CONFIG_SPECIAL mailbox command\n" ); |
| 19741 | return -ENOMEM; |
| 19742 | } |
| 19743 | |
| 19744 | /* Post all rpi memory regions to the port. */ |
| 19745 | hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; |
| 19746 | lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, |
| 19747 | LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, |
| 19748 | sizeof(struct lpfc_mbx_post_hdr_tmpl) - |
| 19749 | sizeof(struct lpfc_sli4_cfg_mhdr), |
| 19750 | LPFC_SLI4_MBX_EMBED); |
| 19751 | |
| 19752 | |
| 19753 | /* Post the physical rpi to the port for this rpi header. */ |
| 19754 | bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, |
| 19755 | rpi_page->start_rpi); |
| 19756 | bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, |
| 19757 | hdr_tmpl, rpi_page->page_count); |
| 19758 | |
| 19759 | hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); |
| 19760 | hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); |
| 19761 | rc = lpfc_sli_issue_mbox(phba, pmbox: mboxq, MBX_POLL); |
| 19762 | shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; |
| 19763 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 19764 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 19765 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
| 19766 | if (shdr_status || shdr_add_status || rc) { |
| 19767 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 19768 | "2514 POST_RPI_HDR mailbox failed with " |
| 19769 | "status x%x add_status x%x, mbx status x%x\n" , |
| 19770 | shdr_status, shdr_add_status, rc); |
| 19771 | rc = -ENXIO; |
| 19772 | } else { |
| 19773 | /* |
| 19774 | * The next_rpi stores the next logical module-64 rpi value used |
| 19775 | * to post physical rpis in subsequent rpi postings. |
| 19776 | */ |
| 19777 | spin_lock_irq(lock: &phba->hbalock); |
| 19778 | phba->sli4_hba.next_rpi = rpi_page->next_rpi; |
| 19779 | spin_unlock_irq(lock: &phba->hbalock); |
| 19780 | } |
| 19781 | return rc; |
| 19782 | } |
| 19783 | |
| 19784 | /** |
| 19785 | * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range |
| 19786 | * @phba: pointer to lpfc hba data structure. |
| 19787 | * |
| 19788 | * This routine is invoked to post rpi header templates to the |
| 19789 | * HBA consistent with the SLI-4 interface spec. This routine |
| 19790 | * posts a SLI4_PAGE_SIZE memory region to the port to hold up to |
| 19791 | * SLI4_PAGE_SIZE modulo 64 rpi context headers. |
| 19792 | * |
| 19793 | * Returns |
| 19794 | * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful |
| 19795 | * LPFC_RPI_ALLOC_ERROR if no rpis are available. |
| 19796 | **/ |
| 19797 | int |
| 19798 | lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) |
| 19799 | { |
| 19800 | unsigned long rpi; |
| 19801 | uint16_t max_rpi, rpi_limit; |
| 19802 | uint16_t rpi_remaining, lrpi = 0; |
| 19803 | struct lpfc_rpi_hdr *rpi_hdr; |
| 19804 | unsigned long iflag; |
| 19805 | |
| 19806 | /* |
| 19807 | * Fetch the next logical rpi. Because this index is logical, |
| 19808 | * the driver starts at 0 each time. |
| 19809 | */ |
| 19810 | spin_lock_irqsave(&phba->hbalock, iflag); |
| 19811 | max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; |
| 19812 | rpi_limit = phba->sli4_hba.next_rpi; |
| 19813 | |
| 19814 | rpi = find_first_zero_bit(addr: phba->sli4_hba.rpi_bmask, size: rpi_limit); |
| 19815 | if (rpi >= rpi_limit) |
| 19816 | rpi = LPFC_RPI_ALLOC_ERROR; |
| 19817 | else { |
| 19818 | set_bit(nr: rpi, addr: phba->sli4_hba.rpi_bmask); |
| 19819 | phba->sli4_hba.max_cfg_param.rpi_used++; |
| 19820 | phba->sli4_hba.rpi_count++; |
| 19821 | } |
| 19822 | lpfc_printf_log(phba, KERN_INFO, |
| 19823 | LOG_NODE | LOG_DISCOVERY, |
| 19824 | "0001 Allocated rpi:x%x max:x%x lim:x%x\n" , |
| 19825 | (int) rpi, max_rpi, rpi_limit); |
| 19826 | |
| 19827 | /* |
| 19828 | * Don't try to allocate more rpi header regions if the device limit |
| 19829 | * has been exhausted. |
| 19830 | */ |
| 19831 | if ((rpi == LPFC_RPI_ALLOC_ERROR) && |
| 19832 | (phba->sli4_hba.rpi_count >= max_rpi)) { |
| 19833 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 19834 | return rpi; |
| 19835 | } |
| 19836 | |
| 19837 | /* |
| 19838 | * RPI header postings are not required for SLI4 ports capable of |
| 19839 | * extents. |
| 19840 | */ |
| 19841 | if (!phba->sli4_hba.rpi_hdrs_in_use) { |
| 19842 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 19843 | return rpi; |
| 19844 | } |
| 19845 | |
| 19846 | /* |
| 19847 | * If the driver is running low on rpi resources, allocate another |
| 19848 | * page now. Note that the next_rpi value is used because |
| 19849 | * it represents how many are actually in use whereas max_rpi notes |
| 19850 | * how many are supported max by the device. |
| 19851 | */ |
| 19852 | rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; |
| 19853 | spin_unlock_irqrestore(lock: &phba->hbalock, flags: iflag); |
| 19854 | if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { |
| 19855 | rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); |
| 19856 | if (!rpi_hdr) { |
| 19857 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 19858 | "2002 Error Could not grow rpi " |
| 19859 | "count\n" ); |
| 19860 | } else { |
| 19861 | lrpi = rpi_hdr->start_rpi; |
| 19862 | rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; |
| 19863 | lpfc_sli4_post_rpi_hdr(phba, rpi_page: rpi_hdr); |
| 19864 | } |
| 19865 | } |
| 19866 | |
| 19867 | return rpi; |
| 19868 | } |
| 19869 | |
| 19870 | /** |
| 19871 | * __lpfc_sli4_free_rpi - Release an rpi for reuse. |
| 19872 | * @phba: pointer to lpfc hba data structure. |
| 19873 | * @rpi: rpi to free |
| 19874 | * |
| 19875 | * This routine is invoked to release an rpi to the pool of |
| 19876 | * available rpis maintained by the driver. |
| 19877 | **/ |
| 19878 | static void |
| 19879 | __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) |
| 19880 | { |
| 19881 | /* |
| 19882 | * if the rpi value indicates a prior unreg has already |
| 19883 | * been done, skip the unreg. |
| 19884 | */ |
| 19885 | if (rpi == LPFC_RPI_ALLOC_ERROR) |
| 19886 | return; |
| 19887 | |
| 19888 | if (test_and_clear_bit(nr: rpi, addr: phba->sli4_hba.rpi_bmask)) { |
| 19889 | phba->sli4_hba.rpi_count--; |
| 19890 | phba->sli4_hba.max_cfg_param.rpi_used--; |
| 19891 | } else { |
| 19892 | lpfc_printf_log(phba, KERN_INFO, |
| 19893 | LOG_NODE | LOG_DISCOVERY, |
| 19894 | "2016 rpi %x not inuse\n" , |
| 19895 | rpi); |
| 19896 | } |
| 19897 | } |
| 19898 | |
| 19899 | /** |
| 19900 | * lpfc_sli4_free_rpi - Release an rpi for reuse. |
| 19901 | * @phba: pointer to lpfc hba data structure. |
| 19902 | * @rpi: rpi to free |
| 19903 | * |
| 19904 | * This routine is invoked to release an rpi to the pool of |
| 19905 | * available rpis maintained by the driver. |
| 19906 | **/ |
| 19907 | void |
| 19908 | lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) |
| 19909 | { |
| 19910 | spin_lock_irq(lock: &phba->hbalock); |
| 19911 | __lpfc_sli4_free_rpi(phba, rpi); |
| 19912 | spin_unlock_irq(lock: &phba->hbalock); |
| 19913 | } |
| 19914 | |
| 19915 | /** |
| 19916 | * lpfc_sli4_remove_rpis - Remove the rpi bitmask region |
| 19917 | * @phba: pointer to lpfc hba data structure. |
| 19918 | * |
| 19919 | * This routine is invoked to remove the memory region that |
| 19920 | * provided rpi via a bitmask. |
| 19921 | **/ |
| 19922 | void |
| 19923 | lpfc_sli4_remove_rpis(struct lpfc_hba *phba) |
| 19924 | { |
| 19925 | kfree(objp: phba->sli4_hba.rpi_bmask); |
| 19926 | kfree(objp: phba->sli4_hba.rpi_ids); |
| 19927 | bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); |
| 19928 | } |
| 19929 | |
| 19930 | /** |
| 19931 | * lpfc_sli4_resume_rpi - Resume traffic relative to an RPI |
| 19932 | * @ndlp: pointer to lpfc nodelist data structure. |
| 19933 | * @cmpl: completion call-back. |
| 19934 | * @iocbq: data to load as mbox ctx_u information |
| 19935 | * |
| 19936 | * Return codes |
| 19937 | * 0 - successful |
| 19938 | * -ENOMEM - No available memory |
| 19939 | * -EIO - The mailbox failed to complete successfully. |
| 19940 | **/ |
| 19941 | int |
| 19942 | lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, |
| 19943 | void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), |
| 19944 | struct lpfc_iocbq *iocbq) |
| 19945 | { |
| 19946 | LPFC_MBOXQ_t *mboxq; |
| 19947 | struct lpfc_hba *phba = ndlp->phba; |
| 19948 | int rc; |
| 19949 | |
| 19950 | /* The port is notified of the header region via a mailbox command. */ |
| 19951 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 19952 | if (!mboxq) |
| 19953 | return -ENOMEM; |
| 19954 | |
| 19955 | /* If cmpl assigned, then this nlp_get pairs with |
| 19956 | * lpfc_mbx_cmpl_resume_rpi. |
| 19957 | * |
| 19958 | * Else cmpl is NULL, then this nlp_get pairs with |
| 19959 | * lpfc_sli_def_mbox_cmpl. |
| 19960 | */ |
| 19961 | if (!lpfc_nlp_get(ndlp)) { |
| 19962 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 19963 | "2122 %s: Failed to get nlp ref\n" , |
| 19964 | __func__); |
| 19965 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
| 19966 | return -EIO; |
| 19967 | } |
| 19968 | |
| 19969 | lpfc_resume_rpi(mboxq, ndlp); |
| 19970 | if (cmpl) { |
| 19971 | mboxq->mbox_cmpl = cmpl; |
| 19972 | mboxq->ctx_u.save_iocb = iocbq; |
| 19973 | } else |
| 19974 | mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 19975 | mboxq->ctx_ndlp = ndlp; |
| 19976 | mboxq->vport = ndlp->vport; |
| 19977 | rc = lpfc_sli_issue_mbox(phba, pmbox: mboxq, MBX_NOWAIT); |
| 19978 | if (rc == MBX_NOT_FINISHED) { |
| 19979 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 19980 | "2010 Resume RPI Mailbox failed " |
| 19981 | "status %d, mbxStatus x%x\n" , rc, |
| 19982 | bf_get(lpfc_mqe_status, &mboxq->u.mqe)); |
| 19983 | lpfc_nlp_put(ndlp); |
| 19984 | mempool_free(element: mboxq, pool: phba->mbox_mem_pool); |
| 19985 | return -EIO; |
| 19986 | } |
| 19987 | return 0; |
| 19988 | } |
| 19989 | |
| 19990 | /** |
| 19991 | * lpfc_sli4_init_vpi - Initialize a vpi with the port |
| 19992 | * @vport: Pointer to the vport for which the vpi is being initialized |
| 19993 | * |
| 19994 | * This routine is invoked to activate a vpi with the port. |
| 19995 | * |
| 19996 | * Returns: |
| 19997 | * 0 success |
| 19998 | * -Evalue otherwise |
| 19999 | **/ |
| 20000 | int |
| 20001 | lpfc_sli4_init_vpi(struct lpfc_vport *vport) |
| 20002 | { |
| 20003 | LPFC_MBOXQ_t *mboxq; |
| 20004 | int rc = 0; |
| 20005 | int retval = MBX_SUCCESS; |
| 20006 | uint32_t mbox_tmo; |
| 20007 | struct lpfc_hba *phba = vport->phba; |
| 20008 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 20009 | if (!mboxq) |
| 20010 | return -ENOMEM; |
| 20011 | lpfc_init_vpi(phba, mboxq, vport->vpi); |
| 20012 | mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); |
| 20013 | rc = lpfc_sli_issue_mbox_wait(phba, pmboxq: mboxq, timeout: mbox_tmo); |
| 20014 | if (rc != MBX_SUCCESS) { |
| 20015 | lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
| 20016 | "2022 INIT VPI Mailbox failed " |
| 20017 | "status %d, mbxStatus x%x\n" , rc, |
| 20018 | bf_get(lpfc_mqe_status, &mboxq->u.mqe)); |
| 20019 | retval = -EIO; |
| 20020 | } |
| 20021 | if (rc != MBX_TIMEOUT) |
| 20022 | mempool_free(element: mboxq, pool: vport->phba->mbox_mem_pool); |
| 20023 | |
| 20024 | return retval; |
| 20025 | } |
| 20026 | |
| 20027 | /** |
| 20028 | * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. |
| 20029 | * @phba: pointer to lpfc hba data structure. |
| 20030 | * @mboxq: Pointer to mailbox object. |
| 20031 | * |
| 20032 | * This routine is invoked to manually add a single FCF record. The caller |
| 20033 | * must pass a completely initialized FCF_Record. This routine takes |
| 20034 | * care of the nonembedded mailbox operations. |
| 20035 | **/ |
| 20036 | static void |
| 20037 | lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
| 20038 | { |
| 20039 | void *virt_addr; |
| 20040 | union lpfc_sli4_cfg_shdr *shdr; |
| 20041 | uint32_t shdr_status, shdr_add_status; |
| 20042 | |
| 20043 | virt_addr = mboxq->sge_array->addr[0]; |
| 20044 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 20045 | shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; |
| 20046 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 20047 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 20048 | |
| 20049 | if ((shdr_status || shdr_add_status) && |
| 20050 | (shdr_status != STATUS_FCF_IN_USE)) |
| 20051 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 20052 | "2558 ADD_FCF_RECORD mailbox failed with " |
| 20053 | "status x%x add_status x%x\n" , |
| 20054 | shdr_status, shdr_add_status); |
| 20055 | |
| 20056 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
| 20057 | } |
| 20058 | |
| 20059 | /** |
| 20060 | * lpfc_sli4_add_fcf_record - Manually add an FCF Record. |
| 20061 | * @phba: pointer to lpfc hba data structure. |
| 20062 | * @fcf_record: pointer to the initialized fcf record to add. |
| 20063 | * |
| 20064 | * This routine is invoked to manually add a single FCF record. The caller |
| 20065 | * must pass a completely initialized FCF_Record. This routine takes |
| 20066 | * care of the nonembedded mailbox operations. |
| 20067 | **/ |
| 20068 | int |
| 20069 | lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) |
| 20070 | { |
| 20071 | int rc = 0; |
| 20072 | LPFC_MBOXQ_t *mboxq; |
| 20073 | uint8_t *bytep; |
| 20074 | void *virt_addr; |
| 20075 | struct lpfc_mbx_sge sge; |
| 20076 | uint32_t alloc_len, req_len; |
| 20077 | uint32_t fcfindex; |
| 20078 | |
| 20079 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 20080 | if (!mboxq) { |
| 20081 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 20082 | "2009 Failed to allocate mbox for ADD_FCF cmd\n" ); |
| 20083 | return -ENOMEM; |
| 20084 | } |
| 20085 | |
| 20086 | req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + |
| 20087 | sizeof(uint32_t); |
| 20088 | |
| 20089 | /* Allocate DMA memory and set up the non-embedded mailbox command */ |
| 20090 | alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, |
| 20091 | LPFC_MBOX_OPCODE_FCOE_ADD_FCF, |
| 20092 | req_len, LPFC_SLI4_MBX_NEMBED); |
| 20093 | if (alloc_len < req_len) { |
| 20094 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 20095 | "2523 Allocated DMA memory size (x%x) is " |
| 20096 | "less than the requested DMA memory " |
| 20097 | "size (x%x)\n" , alloc_len, req_len); |
| 20098 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
| 20099 | return -ENOMEM; |
| 20100 | } |
| 20101 | |
| 20102 | /* |
| 20103 | * Get the first SGE entry from the non-embedded DMA memory. This |
| 20104 | * routine only uses a single SGE. |
| 20105 | */ |
| 20106 | lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); |
| 20107 | virt_addr = mboxq->sge_array->addr[0]; |
| 20108 | /* |
| 20109 | * Configure the FCF record for FCFI 0. This is the driver's |
| 20110 | * hardcoded default and gets used in nonFIP mode. |
| 20111 | */ |
| 20112 | fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); |
| 20113 | bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); |
| 20114 | lpfc_sli_pcimem_bcopy(srcp: &fcfindex, destp: bytep, cnt: sizeof(uint32_t)); |
| 20115 | |
| 20116 | /* |
| 20117 | * Copy the fcf_index and the FCF Record Data. The data starts after |
| 20118 | * the FCoE header plus word10. The data copy needs to be endian |
| 20119 | * correct. |
| 20120 | */ |
| 20121 | bytep += sizeof(uint32_t); |
| 20122 | lpfc_sli_pcimem_bcopy(srcp: fcf_record, destp: bytep, cnt: sizeof(struct fcf_record)); |
| 20123 | mboxq->vport = phba->pport; |
| 20124 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; |
| 20125 | rc = lpfc_sli_issue_mbox(phba, pmbox: mboxq, MBX_NOWAIT); |
| 20126 | if (rc == MBX_NOT_FINISHED) { |
| 20127 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 20128 | "2515 ADD_FCF_RECORD mailbox failed with " |
| 20129 | "status 0x%x\n" , rc); |
| 20130 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
| 20131 | rc = -EIO; |
| 20132 | } else |
| 20133 | rc = 0; |
| 20134 | |
| 20135 | return rc; |
| 20136 | } |
| 20137 | |
| 20138 | /** |
| 20139 | * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. |
| 20140 | * @phba: pointer to lpfc hba data structure. |
| 20141 | * @fcf_record: pointer to the fcf record to write the default data. |
| 20142 | * @fcf_index: FCF table entry index. |
| 20143 | * |
| 20144 | * This routine is invoked to build the driver's default FCF record. The |
| 20145 | * values used are hardcoded. This routine handles memory initialization. |
| 20146 | * |
| 20147 | **/ |
| 20148 | void |
| 20149 | lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, |
| 20150 | struct fcf_record *fcf_record, |
| 20151 | uint16_t fcf_index) |
| 20152 | { |
| 20153 | memset(fcf_record, 0, sizeof(struct fcf_record)); |
| 20154 | fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; |
| 20155 | fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; |
| 20156 | fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; |
| 20157 | bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); |
| 20158 | bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); |
| 20159 | bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); |
| 20160 | bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); |
| 20161 | bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); |
| 20162 | bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); |
| 20163 | bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); |
| 20164 | bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); |
| 20165 | bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); |
| 20166 | bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); |
| 20167 | bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); |
| 20168 | bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); |
| 20169 | bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, |
| 20170 | LPFC_FCF_FPMA | LPFC_FCF_SPMA); |
| 20171 | /* Set the VLAN bit map */ |
| 20172 | if (phba->valid_vlan) { |
| 20173 | fcf_record->vlan_bitmap[phba->vlan_id / 8] |
| 20174 | = 1 << (phba->vlan_id % 8); |
| 20175 | } |
| 20176 | } |
| 20177 | |
| 20178 | /** |
| 20179 | * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. |
| 20180 | * @phba: pointer to lpfc hba data structure. |
| 20181 | * @fcf_index: FCF table entry offset. |
| 20182 | * |
| 20183 | * This routine is invoked to scan the entire FCF table by reading FCF |
| 20184 | * record and processing it one at a time starting from the @fcf_index |
| 20185 | * for initial FCF discovery or fast FCF failover rediscovery. |
| 20186 | * |
| 20187 | * Return 0 if the mailbox command is submitted successfully, none 0 |
| 20188 | * otherwise. |
| 20189 | **/ |
| 20190 | int |
| 20191 | lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) |
| 20192 | { |
| 20193 | int rc = 0, error; |
| 20194 | LPFC_MBOXQ_t *mboxq; |
| 20195 | |
| 20196 | phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; |
| 20197 | phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; |
| 20198 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 20199 | if (!mboxq) { |
| 20200 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 20201 | "2000 Failed to allocate mbox for " |
| 20202 | "READ_FCF cmd\n" ); |
| 20203 | error = -ENOMEM; |
| 20204 | goto fail_fcf_scan; |
| 20205 | } |
| 20206 | /* Construct the read FCF record mailbox command */ |
| 20207 | rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); |
| 20208 | if (rc) { |
| 20209 | error = -EINVAL; |
| 20210 | goto fail_fcf_scan; |
| 20211 | } |
| 20212 | /* Issue the mailbox command asynchronously */ |
| 20213 | mboxq->vport = phba->pport; |
| 20214 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; |
| 20215 | |
| 20216 | set_bit(nr: FCF_TS_INPROG, addr: &phba->hba_flag); |
| 20217 | |
| 20218 | rc = lpfc_sli_issue_mbox(phba, pmbox: mboxq, MBX_NOWAIT); |
| 20219 | if (rc == MBX_NOT_FINISHED) |
| 20220 | error = -EIO; |
| 20221 | else { |
| 20222 | /* Reset eligible FCF count for new scan */ |
| 20223 | if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) |
| 20224 | phba->fcf.eligible_fcf_cnt = 0; |
| 20225 | error = 0; |
| 20226 | } |
| 20227 | fail_fcf_scan: |
| 20228 | if (error) { |
| 20229 | if (mboxq) |
| 20230 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
| 20231 | /* FCF scan failed, clear FCF_TS_INPROG flag */ |
| 20232 | clear_bit(nr: FCF_TS_INPROG, addr: &phba->hba_flag); |
| 20233 | } |
| 20234 | return error; |
| 20235 | } |
| 20236 | |
| 20237 | /** |
| 20238 | * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. |
| 20239 | * @phba: pointer to lpfc hba data structure. |
| 20240 | * @fcf_index: FCF table entry offset. |
| 20241 | * |
| 20242 | * This routine is invoked to read an FCF record indicated by @fcf_index |
| 20243 | * and to use it for FLOGI roundrobin FCF failover. |
| 20244 | * |
| 20245 | * Return 0 if the mailbox command is submitted successfully, none 0 |
| 20246 | * otherwise. |
| 20247 | **/ |
| 20248 | int |
| 20249 | lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) |
| 20250 | { |
| 20251 | int rc = 0, error; |
| 20252 | LPFC_MBOXQ_t *mboxq; |
| 20253 | |
| 20254 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 20255 | if (!mboxq) { |
| 20256 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, |
| 20257 | "2763 Failed to allocate mbox for " |
| 20258 | "READ_FCF cmd\n" ); |
| 20259 | error = -ENOMEM; |
| 20260 | goto fail_fcf_read; |
| 20261 | } |
| 20262 | /* Construct the read FCF record mailbox command */ |
| 20263 | rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); |
| 20264 | if (rc) { |
| 20265 | error = -EINVAL; |
| 20266 | goto fail_fcf_read; |
| 20267 | } |
| 20268 | /* Issue the mailbox command asynchronously */ |
| 20269 | mboxq->vport = phba->pport; |
| 20270 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; |
| 20271 | rc = lpfc_sli_issue_mbox(phba, pmbox: mboxq, MBX_NOWAIT); |
| 20272 | if (rc == MBX_NOT_FINISHED) |
| 20273 | error = -EIO; |
| 20274 | else |
| 20275 | error = 0; |
| 20276 | |
| 20277 | fail_fcf_read: |
| 20278 | if (error && mboxq) |
| 20279 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
| 20280 | return error; |
| 20281 | } |
| 20282 | |
| 20283 | /** |
| 20284 | * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. |
| 20285 | * @phba: pointer to lpfc hba data structure. |
| 20286 | * @fcf_index: FCF table entry offset. |
| 20287 | * |
| 20288 | * This routine is invoked to read an FCF record indicated by @fcf_index to |
| 20289 | * determine whether it's eligible for FLOGI roundrobin failover list. |
| 20290 | * |
| 20291 | * Return 0 if the mailbox command is submitted successfully, none 0 |
| 20292 | * otherwise. |
| 20293 | **/ |
| 20294 | int |
| 20295 | lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) |
| 20296 | { |
| 20297 | int rc = 0, error; |
| 20298 | LPFC_MBOXQ_t *mboxq; |
| 20299 | |
| 20300 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 20301 | if (!mboxq) { |
| 20302 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, |
| 20303 | "2758 Failed to allocate mbox for " |
| 20304 | "READ_FCF cmd\n" ); |
| 20305 | error = -ENOMEM; |
| 20306 | goto fail_fcf_read; |
| 20307 | } |
| 20308 | /* Construct the read FCF record mailbox command */ |
| 20309 | rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); |
| 20310 | if (rc) { |
| 20311 | error = -EINVAL; |
| 20312 | goto fail_fcf_read; |
| 20313 | } |
| 20314 | /* Issue the mailbox command asynchronously */ |
| 20315 | mboxq->vport = phba->pport; |
| 20316 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; |
| 20317 | rc = lpfc_sli_issue_mbox(phba, pmbox: mboxq, MBX_NOWAIT); |
| 20318 | if (rc == MBX_NOT_FINISHED) |
| 20319 | error = -EIO; |
| 20320 | else |
| 20321 | error = 0; |
| 20322 | |
| 20323 | fail_fcf_read: |
| 20324 | if (error && mboxq) |
| 20325 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
| 20326 | return error; |
| 20327 | } |
| 20328 | |
| 20329 | /** |
| 20330 | * lpfc_check_next_fcf_pri_level |
| 20331 | * @phba: pointer to the lpfc_hba struct for this port. |
| 20332 | * This routine is called from the lpfc_sli4_fcf_rr_next_index_get |
| 20333 | * routine when the rr_bmask is empty. The FCF indecies are put into the |
| 20334 | * rr_bmask based on their priority level. Starting from the highest priority |
| 20335 | * to the lowest. The most likely FCF candidate will be in the highest |
| 20336 | * priority group. When this routine is called it searches the fcf_pri list for |
| 20337 | * next lowest priority group and repopulates the rr_bmask with only those |
| 20338 | * fcf_indexes. |
| 20339 | * returns: |
| 20340 | * 1=success 0=failure |
| 20341 | **/ |
| 20342 | static int |
| 20343 | lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) |
| 20344 | { |
| 20345 | uint16_t next_fcf_pri; |
| 20346 | uint16_t last_index; |
| 20347 | struct lpfc_fcf_pri *fcf_pri; |
| 20348 | int rc; |
| 20349 | int ret = 0; |
| 20350 | |
| 20351 | last_index = find_first_bit(addr: phba->fcf.fcf_rr_bmask, |
| 20352 | LPFC_SLI4_FCF_TBL_INDX_MAX); |
| 20353 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
| 20354 | "3060 Last IDX %d\n" , last_index); |
| 20355 | |
| 20356 | /* Verify the priority list has 2 or more entries */ |
| 20357 | spin_lock_irq(lock: &phba->hbalock); |
| 20358 | if (list_empty(head: &phba->fcf.fcf_pri_list) || |
| 20359 | list_is_singular(head: &phba->fcf.fcf_pri_list)) { |
| 20360 | spin_unlock_irq(lock: &phba->hbalock); |
| 20361 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, |
| 20362 | "3061 Last IDX %d\n" , last_index); |
| 20363 | return 0; /* Empty rr list */ |
| 20364 | } |
| 20365 | spin_unlock_irq(lock: &phba->hbalock); |
| 20366 | |
| 20367 | next_fcf_pri = 0; |
| 20368 | /* |
| 20369 | * Clear the rr_bmask and set all of the bits that are at this |
| 20370 | * priority. |
| 20371 | */ |
| 20372 | memset(phba->fcf.fcf_rr_bmask, 0, |
| 20373 | sizeof(*phba->fcf.fcf_rr_bmask)); |
| 20374 | spin_lock_irq(lock: &phba->hbalock); |
| 20375 | list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { |
| 20376 | if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) |
| 20377 | continue; |
| 20378 | /* |
| 20379 | * the 1st priority that has not FLOGI failed |
| 20380 | * will be the highest. |
| 20381 | */ |
| 20382 | if (!next_fcf_pri) |
| 20383 | next_fcf_pri = fcf_pri->fcf_rec.priority; |
| 20384 | spin_unlock_irq(lock: &phba->hbalock); |
| 20385 | if (fcf_pri->fcf_rec.priority == next_fcf_pri) { |
| 20386 | rc = lpfc_sli4_fcf_rr_index_set(phba, |
| 20387 | fcf_pri->fcf_rec.fcf_index); |
| 20388 | if (rc) |
| 20389 | return 0; |
| 20390 | } |
| 20391 | spin_lock_irq(lock: &phba->hbalock); |
| 20392 | } |
| 20393 | /* |
| 20394 | * if next_fcf_pri was not set above and the list is not empty then |
| 20395 | * we have failed flogis on all of them. So reset flogi failed |
| 20396 | * and start at the beginning. |
| 20397 | */ |
| 20398 | if (!next_fcf_pri && !list_empty(head: &phba->fcf.fcf_pri_list)) { |
| 20399 | list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { |
| 20400 | fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; |
| 20401 | /* |
| 20402 | * the 1st priority that has not FLOGI failed |
| 20403 | * will be the highest. |
| 20404 | */ |
| 20405 | if (!next_fcf_pri) |
| 20406 | next_fcf_pri = fcf_pri->fcf_rec.priority; |
| 20407 | spin_unlock_irq(lock: &phba->hbalock); |
| 20408 | if (fcf_pri->fcf_rec.priority == next_fcf_pri) { |
| 20409 | rc = lpfc_sli4_fcf_rr_index_set(phba, |
| 20410 | fcf_pri->fcf_rec.fcf_index); |
| 20411 | if (rc) |
| 20412 | return 0; |
| 20413 | } |
| 20414 | spin_lock_irq(lock: &phba->hbalock); |
| 20415 | } |
| 20416 | } else |
| 20417 | ret = 1; |
| 20418 | spin_unlock_irq(lock: &phba->hbalock); |
| 20419 | |
| 20420 | return ret; |
| 20421 | } |
| 20422 | /** |
| 20423 | * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index |
| 20424 | * @phba: pointer to lpfc hba data structure. |
| 20425 | * |
| 20426 | * This routine is to get the next eligible FCF record index in a round |
| 20427 | * robin fashion. If the next eligible FCF record index equals to the |
| 20428 | * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) |
| 20429 | * shall be returned, otherwise, the next eligible FCF record's index |
| 20430 | * shall be returned. |
| 20431 | **/ |
| 20432 | uint16_t |
| 20433 | lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) |
| 20434 | { |
| 20435 | uint16_t next_fcf_index; |
| 20436 | |
| 20437 | initial_priority: |
| 20438 | /* Search start from next bit of currently registered FCF index */ |
| 20439 | next_fcf_index = phba->fcf.current_rec.fcf_indx; |
| 20440 | |
| 20441 | next_priority: |
| 20442 | /* Determine the next fcf index to check */ |
| 20443 | next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; |
| 20444 | next_fcf_index = find_next_bit(addr: phba->fcf.fcf_rr_bmask, |
| 20445 | LPFC_SLI4_FCF_TBL_INDX_MAX, |
| 20446 | offset: next_fcf_index); |
| 20447 | |
| 20448 | /* Wrap around condition on phba->fcf.fcf_rr_bmask */ |
| 20449 | if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { |
| 20450 | /* |
| 20451 | * If we have wrapped then we need to clear the bits that |
| 20452 | * have been tested so that we can detect when we should |
| 20453 | * change the priority level. |
| 20454 | */ |
| 20455 | next_fcf_index = find_first_bit(addr: phba->fcf.fcf_rr_bmask, |
| 20456 | LPFC_SLI4_FCF_TBL_INDX_MAX); |
| 20457 | } |
| 20458 | |
| 20459 | |
| 20460 | /* Check roundrobin failover list empty condition */ |
| 20461 | if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || |
| 20462 | next_fcf_index == phba->fcf.current_rec.fcf_indx) { |
| 20463 | /* |
| 20464 | * If next fcf index is not found check if there are lower |
| 20465 | * Priority level fcf's in the fcf_priority list. |
| 20466 | * Set up the rr_bmask with all of the avaiable fcf bits |
| 20467 | * at that level and continue the selection process. |
| 20468 | */ |
| 20469 | if (lpfc_check_next_fcf_pri_level(phba)) |
| 20470 | goto initial_priority; |
| 20471 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
| 20472 | "2844 No roundrobin failover FCF available\n" ); |
| 20473 | |
| 20474 | return LPFC_FCOE_FCF_NEXT_NONE; |
| 20475 | } |
| 20476 | |
| 20477 | if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && |
| 20478 | phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & |
| 20479 | LPFC_FCF_FLOGI_FAILED) { |
| 20480 | if (list_is_singular(head: &phba->fcf.fcf_pri_list)) |
| 20481 | return LPFC_FCOE_FCF_NEXT_NONE; |
| 20482 | |
| 20483 | goto next_priority; |
| 20484 | } |
| 20485 | |
| 20486 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
| 20487 | "2845 Get next roundrobin failover FCF (x%x)\n" , |
| 20488 | next_fcf_index); |
| 20489 | |
| 20490 | return next_fcf_index; |
| 20491 | } |
| 20492 | |
| 20493 | /** |
| 20494 | * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index |
| 20495 | * @phba: pointer to lpfc hba data structure. |
| 20496 | * @fcf_index: index into the FCF table to 'set' |
| 20497 | * |
| 20498 | * This routine sets the FCF record index in to the eligible bmask for |
| 20499 | * roundrobin failover search. It checks to make sure that the index |
| 20500 | * does not go beyond the range of the driver allocated bmask dimension |
| 20501 | * before setting the bit. |
| 20502 | * |
| 20503 | * Returns 0 if the index bit successfully set, otherwise, it returns |
| 20504 | * -EINVAL. |
| 20505 | **/ |
| 20506 | int |
| 20507 | lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) |
| 20508 | { |
| 20509 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { |
| 20510 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, |
| 20511 | "2610 FCF (x%x) reached driver's book " |
| 20512 | "keeping dimension:x%x\n" , |
| 20513 | fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); |
| 20514 | return -EINVAL; |
| 20515 | } |
| 20516 | /* Set the eligible FCF record index bmask */ |
| 20517 | set_bit(nr: fcf_index, addr: phba->fcf.fcf_rr_bmask); |
| 20518 | |
| 20519 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
| 20520 | "2790 Set FCF (x%x) to roundrobin FCF failover " |
| 20521 | "bmask\n" , fcf_index); |
| 20522 | |
| 20523 | return 0; |
| 20524 | } |
| 20525 | |
| 20526 | /** |
| 20527 | * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index |
| 20528 | * @phba: pointer to lpfc hba data structure. |
| 20529 | * @fcf_index: index into the FCF table to 'clear' |
| 20530 | * |
| 20531 | * This routine clears the FCF record index from the eligible bmask for |
| 20532 | * roundrobin failover search. It checks to make sure that the index |
| 20533 | * does not go beyond the range of the driver allocated bmask dimension |
| 20534 | * before clearing the bit. |
| 20535 | **/ |
| 20536 | void |
| 20537 | lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) |
| 20538 | { |
| 20539 | struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; |
| 20540 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { |
| 20541 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, |
| 20542 | "2762 FCF (x%x) reached driver's book " |
| 20543 | "keeping dimension:x%x\n" , |
| 20544 | fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); |
| 20545 | return; |
| 20546 | } |
| 20547 | /* Clear the eligible FCF record index bmask */ |
| 20548 | spin_lock_irq(lock: &phba->hbalock); |
| 20549 | list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, |
| 20550 | list) { |
| 20551 | if (fcf_pri->fcf_rec.fcf_index == fcf_index) { |
| 20552 | list_del_init(entry: &fcf_pri->list); |
| 20553 | break; |
| 20554 | } |
| 20555 | } |
| 20556 | spin_unlock_irq(lock: &phba->hbalock); |
| 20557 | clear_bit(nr: fcf_index, addr: phba->fcf.fcf_rr_bmask); |
| 20558 | |
| 20559 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
| 20560 | "2791 Clear FCF (x%x) from roundrobin failover " |
| 20561 | "bmask\n" , fcf_index); |
| 20562 | } |
| 20563 | |
| 20564 | /** |
| 20565 | * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table |
| 20566 | * @phba: pointer to lpfc hba data structure. |
| 20567 | * @mbox: An allocated pointer to type LPFC_MBOXQ_t |
| 20568 | * |
| 20569 | * This routine is the completion routine for the rediscover FCF table mailbox |
| 20570 | * command. If the mailbox command returned failure, it will try to stop the |
| 20571 | * FCF rediscover wait timer. |
| 20572 | **/ |
| 20573 | static void |
| 20574 | lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) |
| 20575 | { |
| 20576 | struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; |
| 20577 | uint32_t shdr_status, shdr_add_status; |
| 20578 | |
| 20579 | redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; |
| 20580 | |
| 20581 | shdr_status = bf_get(lpfc_mbox_hdr_status, |
| 20582 | &redisc_fcf->header.cfg_shdr.response); |
| 20583 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, |
| 20584 | &redisc_fcf->header.cfg_shdr.response); |
| 20585 | if (shdr_status || shdr_add_status) { |
| 20586 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, |
| 20587 | "2746 Requesting for FCF rediscovery failed " |
| 20588 | "status x%x add_status x%x\n" , |
| 20589 | shdr_status, shdr_add_status); |
| 20590 | if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { |
| 20591 | spin_lock_irq(lock: &phba->hbalock); |
| 20592 | phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; |
| 20593 | spin_unlock_irq(lock: &phba->hbalock); |
| 20594 | /* |
| 20595 | * CVL event triggered FCF rediscover request failed, |
| 20596 | * last resort to re-try current registered FCF entry. |
| 20597 | */ |
| 20598 | lpfc_retry_pport_discovery(phba); |
| 20599 | } else { |
| 20600 | spin_lock_irq(lock: &phba->hbalock); |
| 20601 | phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; |
| 20602 | spin_unlock_irq(lock: &phba->hbalock); |
| 20603 | /* |
| 20604 | * DEAD FCF event triggered FCF rediscover request |
| 20605 | * failed, last resort to fail over as a link down |
| 20606 | * to FCF registration. |
| 20607 | */ |
| 20608 | lpfc_sli4_fcf_dead_failthrough(phba); |
| 20609 | } |
| 20610 | } else { |
| 20611 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
| 20612 | "2775 Start FCF rediscover quiescent timer\n" ); |
| 20613 | /* |
| 20614 | * Start FCF rediscovery wait timer for pending FCF |
| 20615 | * before rescan FCF record table. |
| 20616 | */ |
| 20617 | lpfc_fcf_redisc_wait_start_timer(phba); |
| 20618 | } |
| 20619 | |
| 20620 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 20621 | } |
| 20622 | |
| 20623 | /** |
| 20624 | * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. |
| 20625 | * @phba: pointer to lpfc hba data structure. |
| 20626 | * |
| 20627 | * This routine is invoked to request for rediscovery of the entire FCF table |
| 20628 | * by the port. |
| 20629 | **/ |
| 20630 | int |
| 20631 | lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) |
| 20632 | { |
| 20633 | LPFC_MBOXQ_t *mbox; |
| 20634 | struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; |
| 20635 | int rc, length; |
| 20636 | |
| 20637 | /* Cancel retry delay timers to all vports before FCF rediscover */ |
| 20638 | lpfc_cancel_all_vport_retry_delay_timer(phba); |
| 20639 | |
| 20640 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 20641 | if (!mbox) { |
| 20642 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 20643 | "2745 Failed to allocate mbox for " |
| 20644 | "requesting FCF rediscover.\n" ); |
| 20645 | return -ENOMEM; |
| 20646 | } |
| 20647 | |
| 20648 | length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - |
| 20649 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 20650 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
| 20651 | LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, |
| 20652 | length, LPFC_SLI4_MBX_EMBED); |
| 20653 | |
| 20654 | redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; |
| 20655 | /* Set count to 0 for invalidating the entire FCF database */ |
| 20656 | bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); |
| 20657 | |
| 20658 | /* Issue the mailbox command asynchronously */ |
| 20659 | mbox->vport = phba->pport; |
| 20660 | mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; |
| 20661 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_NOWAIT); |
| 20662 | |
| 20663 | if (rc == MBX_NOT_FINISHED) { |
| 20664 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 20665 | return -EIO; |
| 20666 | } |
| 20667 | return 0; |
| 20668 | } |
| 20669 | |
| 20670 | /** |
| 20671 | * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event |
| 20672 | * @phba: pointer to lpfc hba data structure. |
| 20673 | * |
| 20674 | * This function is the failover routine as a last resort to the FCF DEAD |
| 20675 | * event when driver failed to perform fast FCF failover. |
| 20676 | **/ |
| 20677 | void |
| 20678 | lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) |
| 20679 | { |
| 20680 | uint32_t link_state; |
| 20681 | |
| 20682 | /* |
| 20683 | * Last resort as FCF DEAD event failover will treat this as |
| 20684 | * a link down, but save the link state because we don't want |
| 20685 | * it to be changed to Link Down unless it is already down. |
| 20686 | */ |
| 20687 | link_state = phba->link_state; |
| 20688 | lpfc_linkdown(phba); |
| 20689 | phba->link_state = link_state; |
| 20690 | |
| 20691 | /* Unregister FCF if no devices connected to it */ |
| 20692 | lpfc_unregister_unused_fcf(phba); |
| 20693 | } |
| 20694 | |
| 20695 | /** |
| 20696 | * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. |
| 20697 | * @phba: pointer to lpfc hba data structure. |
| 20698 | * @rgn23_data: pointer to configure region 23 data. |
| 20699 | * |
| 20700 | * This function gets SLI3 port configure region 23 data through memory dump |
| 20701 | * mailbox command. When it successfully retrieves data, the size of the data |
| 20702 | * will be returned, otherwise, 0 will be returned. |
| 20703 | **/ |
| 20704 | static uint32_t |
| 20705 | lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) |
| 20706 | { |
| 20707 | LPFC_MBOXQ_t *pmb = NULL; |
| 20708 | MAILBOX_t *mb; |
| 20709 | uint32_t offset = 0; |
| 20710 | int rc; |
| 20711 | |
| 20712 | if (!rgn23_data) |
| 20713 | return 0; |
| 20714 | |
| 20715 | pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 20716 | if (!pmb) { |
| 20717 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 20718 | "2600 failed to allocate mailbox memory\n" ); |
| 20719 | return 0; |
| 20720 | } |
| 20721 | mb = &pmb->u.mb; |
| 20722 | |
| 20723 | do { |
| 20724 | lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); |
| 20725 | rc = lpfc_sli_issue_mbox(phba, pmbox: pmb, MBX_POLL); |
| 20726 | |
| 20727 | if (rc != MBX_SUCCESS) { |
| 20728 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 20729 | "2601 failed to read config " |
| 20730 | "region 23, rc 0x%x Status 0x%x\n" , |
| 20731 | rc, mb->mbxStatus); |
| 20732 | mb->un.varDmp.word_cnt = 0; |
| 20733 | } |
| 20734 | /* |
| 20735 | * dump mem may return a zero when finished or we got a |
| 20736 | * mailbox error, either way we are done. |
| 20737 | */ |
| 20738 | if (mb->un.varDmp.word_cnt == 0) |
| 20739 | break; |
| 20740 | |
| 20741 | if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) |
| 20742 | mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; |
| 20743 | |
| 20744 | lpfc_sli_pcimem_bcopy(srcp: ((uint8_t *)mb) + DMP_RSP_OFFSET, |
| 20745 | destp: rgn23_data + offset, |
| 20746 | cnt: mb->un.varDmp.word_cnt); |
| 20747 | offset += mb->un.varDmp.word_cnt; |
| 20748 | } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); |
| 20749 | |
| 20750 | mempool_free(element: pmb, pool: phba->mbox_mem_pool); |
| 20751 | return offset; |
| 20752 | } |
| 20753 | |
| 20754 | /** |
| 20755 | * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. |
| 20756 | * @phba: pointer to lpfc hba data structure. |
| 20757 | * @rgn23_data: pointer to configure region 23 data. |
| 20758 | * |
| 20759 | * This function gets SLI4 port configure region 23 data through memory dump |
| 20760 | * mailbox command. When it successfully retrieves data, the size of the data |
| 20761 | * will be returned, otherwise, 0 will be returned. |
| 20762 | **/ |
| 20763 | static uint32_t |
| 20764 | lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) |
| 20765 | { |
| 20766 | LPFC_MBOXQ_t *mboxq = NULL; |
| 20767 | struct lpfc_dmabuf *mp = NULL; |
| 20768 | struct lpfc_mqe *mqe; |
| 20769 | uint32_t data_length = 0; |
| 20770 | int rc; |
| 20771 | |
| 20772 | if (!rgn23_data) |
| 20773 | return 0; |
| 20774 | |
| 20775 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 20776 | if (!mboxq) { |
| 20777 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 20778 | "3105 failed to allocate mailbox memory\n" ); |
| 20779 | return 0; |
| 20780 | } |
| 20781 | |
| 20782 | if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) |
| 20783 | goto out; |
| 20784 | mqe = &mboxq->u.mqe; |
| 20785 | mp = mboxq->ctx_buf; |
| 20786 | rc = lpfc_sli_issue_mbox(phba, pmbox: mboxq, MBX_POLL); |
| 20787 | if (rc) |
| 20788 | goto out; |
| 20789 | data_length = mqe->un.mb_words[5]; |
| 20790 | if (data_length == 0) |
| 20791 | goto out; |
| 20792 | if (data_length > DMP_RGN23_SIZE) { |
| 20793 | data_length = 0; |
| 20794 | goto out; |
| 20795 | } |
| 20796 | lpfc_sli_pcimem_bcopy(srcp: (char *)mp->virt, destp: rgn23_data, cnt: data_length); |
| 20797 | out: |
| 20798 | lpfc_mbox_rsrc_cleanup(phba, mbox: mboxq, locked: MBOX_THD_UNLOCKED); |
| 20799 | return data_length; |
| 20800 | } |
| 20801 | |
| 20802 | /** |
| 20803 | * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. |
| 20804 | * @phba: pointer to lpfc hba data structure. |
| 20805 | * |
| 20806 | * This function read region 23 and parse TLV for port status to |
| 20807 | * decide if the user disaled the port. If the TLV indicates the |
| 20808 | * port is disabled, the hba_flag is set accordingly. |
| 20809 | **/ |
| 20810 | void |
| 20811 | lpfc_sli_read_link_ste(struct lpfc_hba *phba) |
| 20812 | { |
| 20813 | uint8_t *rgn23_data = NULL; |
| 20814 | uint32_t if_type, data_size, sub_tlv_len, tlv_offset; |
| 20815 | uint32_t offset = 0; |
| 20816 | |
| 20817 | /* Get adapter Region 23 data */ |
| 20818 | rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); |
| 20819 | if (!rgn23_data) |
| 20820 | goto out; |
| 20821 | |
| 20822 | if (phba->sli_rev < LPFC_SLI_REV4) |
| 20823 | data_size = lpfc_sli_get_config_region23(phba, rgn23_data); |
| 20824 | else { |
| 20825 | if_type = bf_get(lpfc_sli_intf_if_type, |
| 20826 | &phba->sli4_hba.sli_intf); |
| 20827 | if (if_type == LPFC_SLI_INTF_IF_TYPE_0) |
| 20828 | goto out; |
| 20829 | data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); |
| 20830 | } |
| 20831 | |
| 20832 | if (!data_size) |
| 20833 | goto out; |
| 20834 | |
| 20835 | /* Check the region signature first */ |
| 20836 | if (memcmp(p: &rgn23_data[offset], LPFC_REGION23_SIGNATURE, size: 4)) { |
| 20837 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 20838 | "2619 Config region 23 has bad signature\n" ); |
| 20839 | goto out; |
| 20840 | } |
| 20841 | offset += 4; |
| 20842 | |
| 20843 | /* Check the data structure version */ |
| 20844 | if (rgn23_data[offset] != LPFC_REGION23_VERSION) { |
| 20845 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 20846 | "2620 Config region 23 has bad version\n" ); |
| 20847 | goto out; |
| 20848 | } |
| 20849 | offset += 4; |
| 20850 | |
| 20851 | /* Parse TLV entries in the region */ |
| 20852 | while (offset < data_size) { |
| 20853 | if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) |
| 20854 | break; |
| 20855 | /* |
| 20856 | * If the TLV is not driver specific TLV or driver id is |
| 20857 | * not linux driver id, skip the record. |
| 20858 | */ |
| 20859 | if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || |
| 20860 | (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || |
| 20861 | (rgn23_data[offset + 3] != 0)) { |
| 20862 | offset += rgn23_data[offset + 1] * 4 + 4; |
| 20863 | continue; |
| 20864 | } |
| 20865 | |
| 20866 | /* Driver found a driver specific TLV in the config region */ |
| 20867 | sub_tlv_len = rgn23_data[offset + 1] * 4; |
| 20868 | offset += 4; |
| 20869 | tlv_offset = 0; |
| 20870 | |
| 20871 | /* |
| 20872 | * Search for configured port state sub-TLV. |
| 20873 | */ |
| 20874 | while ((offset < data_size) && |
| 20875 | (tlv_offset < sub_tlv_len)) { |
| 20876 | if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { |
| 20877 | offset += 4; |
| 20878 | tlv_offset += 4; |
| 20879 | break; |
| 20880 | } |
| 20881 | if (rgn23_data[offset] != PORT_STE_TYPE) { |
| 20882 | offset += rgn23_data[offset + 1] * 4 + 4; |
| 20883 | tlv_offset += rgn23_data[offset + 1] * 4 + 4; |
| 20884 | continue; |
| 20885 | } |
| 20886 | |
| 20887 | /* This HBA contains PORT_STE configured */ |
| 20888 | if (!rgn23_data[offset + 2]) |
| 20889 | set_bit(nr: LINK_DISABLED, addr: &phba->hba_flag); |
| 20890 | |
| 20891 | goto out; |
| 20892 | } |
| 20893 | } |
| 20894 | |
| 20895 | out: |
| 20896 | kfree(objp: rgn23_data); |
| 20897 | return; |
| 20898 | } |
| 20899 | |
| 20900 | /** |
| 20901 | * lpfc_log_fw_write_cmpl - logs firmware write completion status |
| 20902 | * @phba: pointer to lpfc hba data structure |
| 20903 | * @shdr_status: wr_object rsp's status field |
| 20904 | * @shdr_add_status: wr_object rsp's add_status field |
| 20905 | * @shdr_add_status_2: wr_object rsp's add_status_2 field |
| 20906 | * @shdr_change_status: wr_object rsp's change_status field |
| 20907 | * @shdr_csf: wr_object rsp's csf bit |
| 20908 | * |
| 20909 | * This routine is intended to be called after a firmware write completes. |
| 20910 | * It will log next action items to be performed by the user to instantiate |
| 20911 | * the newly downloaded firmware or reason for incompatibility. |
| 20912 | **/ |
| 20913 | static void |
| 20914 | lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status, |
| 20915 | u32 shdr_add_status, u32 shdr_add_status_2, |
| 20916 | u32 shdr_change_status, u32 shdr_csf) |
| 20917 | { |
| 20918 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
| 20919 | "4198 %s: flash_id x%02x, asic_rev x%02x, " |
| 20920 | "status x%02x, add_status x%02x, add_status_2 x%02x, " |
| 20921 | "change_status x%02x, csf %01x\n" , __func__, |
| 20922 | phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev, |
| 20923 | shdr_status, shdr_add_status, shdr_add_status_2, |
| 20924 | shdr_change_status, shdr_csf); |
| 20925 | |
| 20926 | if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) { |
| 20927 | switch (shdr_add_status_2) { |
| 20928 | case LPFC_ADD_STATUS_2_INCOMPAT_FLASH: |
| 20929 | lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, |
| 20930 | "4199 Firmware write failed: " |
| 20931 | "image incompatible with flash x%02x\n" , |
| 20932 | phba->sli4_hba.flash_id); |
| 20933 | break; |
| 20934 | case LPFC_ADD_STATUS_2_INCORRECT_ASIC: |
| 20935 | lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, |
| 20936 | "4200 Firmware write failed: " |
| 20937 | "image incompatible with ASIC " |
| 20938 | "architecture x%02x\n" , |
| 20939 | phba->sli4_hba.asic_rev); |
| 20940 | break; |
| 20941 | default: |
| 20942 | lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, |
| 20943 | "4210 Firmware write failed: " |
| 20944 | "add_status_2 x%02x\n" , |
| 20945 | shdr_add_status_2); |
| 20946 | break; |
| 20947 | } |
| 20948 | } else if (!shdr_status && !shdr_add_status) { |
| 20949 | if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || |
| 20950 | shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { |
| 20951 | if (shdr_csf) |
| 20952 | shdr_change_status = |
| 20953 | LPFC_CHANGE_STATUS_PCI_RESET; |
| 20954 | } |
| 20955 | |
| 20956 | switch (shdr_change_status) { |
| 20957 | case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): |
| 20958 | lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, |
| 20959 | "3198 Firmware write complete: System " |
| 20960 | "reboot required to instantiate\n" ); |
| 20961 | break; |
| 20962 | case (LPFC_CHANGE_STATUS_FW_RESET): |
| 20963 | lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, |
| 20964 | "3199 Firmware write complete: " |
| 20965 | "Firmware reset required to " |
| 20966 | "instantiate\n" ); |
| 20967 | break; |
| 20968 | case (LPFC_CHANGE_STATUS_PORT_MIGRATION): |
| 20969 | lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, |
| 20970 | "3200 Firmware write complete: Port " |
| 20971 | "Migration or PCI Reset required to " |
| 20972 | "instantiate\n" ); |
| 20973 | break; |
| 20974 | case (LPFC_CHANGE_STATUS_PCI_RESET): |
| 20975 | lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, |
| 20976 | "3201 Firmware write complete: PCI " |
| 20977 | "Reset required to instantiate\n" ); |
| 20978 | break; |
| 20979 | default: |
| 20980 | break; |
| 20981 | } |
| 20982 | } |
| 20983 | } |
| 20984 | |
| 20985 | /** |
| 20986 | * lpfc_wr_object - write an object to the firmware |
| 20987 | * @phba: HBA structure that indicates port to create a queue on. |
| 20988 | * @dmabuf_list: list of dmabufs to write to the port. |
| 20989 | * @size: the total byte value of the objects to write to the port. |
| 20990 | * @offset: the current offset to be used to start the transfer. |
| 20991 | * |
| 20992 | * This routine will create a wr_object mailbox command to send to the port. |
| 20993 | * the mailbox command will be constructed using the dma buffers described in |
| 20994 | * @dmabuf_list to create a list of BDEs. This routine will fill in as many |
| 20995 | * BDEs that the imbedded mailbox can support. The @offset variable will be |
| 20996 | * used to indicate the starting offset of the transfer and will also return |
| 20997 | * the offset after the write object mailbox has completed. @size is used to |
| 20998 | * determine the end of the object and whether the eof bit should be set. |
| 20999 | * |
| 21000 | * Return 0 is successful and offset will contain the new offset to use |
| 21001 | * for the next write. |
| 21002 | * Return negative value for error cases. |
| 21003 | **/ |
| 21004 | int |
| 21005 | lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, |
| 21006 | uint32_t size, uint32_t *offset) |
| 21007 | { |
| 21008 | struct lpfc_mbx_wr_object *wr_object; |
| 21009 | LPFC_MBOXQ_t *mbox; |
| 21010 | int rc = 0, i = 0; |
| 21011 | int mbox_status = 0; |
| 21012 | uint32_t shdr_status, shdr_add_status, shdr_add_status_2; |
| 21013 | uint32_t shdr_change_status = 0, shdr_csf = 0; |
| 21014 | uint32_t mbox_tmo; |
| 21015 | struct lpfc_dmabuf *dmabuf; |
| 21016 | uint32_t written = 0; |
| 21017 | bool check_change_status = false; |
| 21018 | |
| 21019 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 21020 | if (!mbox) |
| 21021 | return -ENOMEM; |
| 21022 | |
| 21023 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 21024 | LPFC_MBOX_OPCODE_WRITE_OBJECT, |
| 21025 | sizeof(struct lpfc_mbx_wr_object) - |
| 21026 | sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); |
| 21027 | |
| 21028 | wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; |
| 21029 | wr_object->u.request.write_offset = *offset; |
| 21030 | sprintf(buf: (uint8_t *)wr_object->u.request.object_name, fmt: "/" ); |
| 21031 | wr_object->u.request.object_name[0] = |
| 21032 | cpu_to_le32(wr_object->u.request.object_name[0]); |
| 21033 | bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); |
| 21034 | list_for_each_entry(dmabuf, dmabuf_list, list) { |
| 21035 | if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) |
| 21036 | break; |
| 21037 | wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); |
| 21038 | wr_object->u.request.bde[i].addrHigh = |
| 21039 | putPaddrHigh(dmabuf->phys); |
| 21040 | if (written + SLI4_PAGE_SIZE >= size) { |
| 21041 | wr_object->u.request.bde[i].tus.f.bdeSize = |
| 21042 | (size - written); |
| 21043 | written += (size - written); |
| 21044 | bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); |
| 21045 | bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1); |
| 21046 | check_change_status = true; |
| 21047 | } else { |
| 21048 | wr_object->u.request.bde[i].tus.f.bdeSize = |
| 21049 | SLI4_PAGE_SIZE; |
| 21050 | written += SLI4_PAGE_SIZE; |
| 21051 | } |
| 21052 | i++; |
| 21053 | } |
| 21054 | wr_object->u.request.bde_count = i; |
| 21055 | bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); |
| 21056 | if (!phba->sli4_hba.intr_enable) |
| 21057 | mbox_status = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 21058 | else { |
| 21059 | mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); |
| 21060 | mbox_status = lpfc_sli_issue_mbox_wait(phba, pmboxq: mbox, timeout: mbox_tmo); |
| 21061 | } |
| 21062 | |
| 21063 | /* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */ |
| 21064 | rc = mbox_status; |
| 21065 | |
| 21066 | /* The IOCTL status is embedded in the mailbox subheader. */ |
| 21067 | shdr_status = bf_get(lpfc_mbox_hdr_status, |
| 21068 | &wr_object->header.cfg_shdr.response); |
| 21069 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, |
| 21070 | &wr_object->header.cfg_shdr.response); |
| 21071 | shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2, |
| 21072 | &wr_object->header.cfg_shdr.response); |
| 21073 | if (check_change_status) { |
| 21074 | shdr_change_status = bf_get(lpfc_wr_object_change_status, |
| 21075 | &wr_object->u.response); |
| 21076 | shdr_csf = bf_get(lpfc_wr_object_csf, |
| 21077 | &wr_object->u.response); |
| 21078 | } |
| 21079 | |
| 21080 | if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) { |
| 21081 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 21082 | "3025 Write Object mailbox failed with " |
| 21083 | "status x%x add_status x%x, add_status_2 x%x, " |
| 21084 | "mbx status x%x\n" , |
| 21085 | shdr_status, shdr_add_status, shdr_add_status_2, |
| 21086 | rc); |
| 21087 | rc = -ENXIO; |
| 21088 | *offset = shdr_add_status; |
| 21089 | } else { |
| 21090 | *offset += wr_object->u.response.actual_write_length; |
| 21091 | } |
| 21092 | |
| 21093 | if (rc || check_change_status) |
| 21094 | lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status, |
| 21095 | shdr_add_status_2, shdr_change_status, |
| 21096 | shdr_csf); |
| 21097 | |
| 21098 | if (!phba->sli4_hba.intr_enable) |
| 21099 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 21100 | else if (mbox_status != MBX_TIMEOUT) |
| 21101 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 21102 | |
| 21103 | return rc; |
| 21104 | } |
| 21105 | |
| 21106 | /** |
| 21107 | * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. |
| 21108 | * @vport: pointer to vport data structure. |
| 21109 | * |
| 21110 | * This function iterate through the mailboxq and clean up all REG_LOGIN |
| 21111 | * and REG_VPI mailbox commands associated with the vport. This function |
| 21112 | * is called when driver want to restart discovery of the vport due to |
| 21113 | * a Clear Virtual Link event. |
| 21114 | **/ |
| 21115 | void |
| 21116 | lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) |
| 21117 | { |
| 21118 | struct lpfc_hba *phba = vport->phba; |
| 21119 | LPFC_MBOXQ_t *mb, *nextmb; |
| 21120 | struct lpfc_nodelist *ndlp; |
| 21121 | struct lpfc_nodelist *act_mbx_ndlp = NULL; |
| 21122 | LIST_HEAD(mbox_cmd_list); |
| 21123 | uint8_t restart_loop; |
| 21124 | |
| 21125 | /* Clean up internally queued mailbox commands with the vport */ |
| 21126 | spin_lock_irq(lock: &phba->hbalock); |
| 21127 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { |
| 21128 | if (mb->vport != vport) |
| 21129 | continue; |
| 21130 | |
| 21131 | if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && |
| 21132 | (mb->u.mb.mbxCommand != MBX_REG_VPI)) |
| 21133 | continue; |
| 21134 | |
| 21135 | list_move_tail(list: &mb->list, head: &mbox_cmd_list); |
| 21136 | } |
| 21137 | /* Clean up active mailbox command with the vport */ |
| 21138 | mb = phba->sli.mbox_active; |
| 21139 | if (mb && (mb->vport == vport)) { |
| 21140 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || |
| 21141 | (mb->u.mb.mbxCommand == MBX_REG_VPI)) |
| 21142 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 21143 | if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { |
| 21144 | act_mbx_ndlp = mb->ctx_ndlp; |
| 21145 | |
| 21146 | /* This reference is local to this routine. The |
| 21147 | * reference is removed at routine exit. |
| 21148 | */ |
| 21149 | act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); |
| 21150 | |
| 21151 | /* Unregister the RPI when mailbox complete */ |
| 21152 | mb->mbox_flag |= LPFC_MBX_IMED_UNREG; |
| 21153 | } |
| 21154 | } |
| 21155 | /* Cleanup any mailbox completions which are not yet processed */ |
| 21156 | do { |
| 21157 | restart_loop = 0; |
| 21158 | list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { |
| 21159 | /* |
| 21160 | * If this mailox is already processed or it is |
| 21161 | * for another vport ignore it. |
| 21162 | */ |
| 21163 | if ((mb->vport != vport) || |
| 21164 | (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) |
| 21165 | continue; |
| 21166 | |
| 21167 | if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && |
| 21168 | (mb->u.mb.mbxCommand != MBX_REG_VPI)) |
| 21169 | continue; |
| 21170 | |
| 21171 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 21172 | if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { |
| 21173 | ndlp = mb->ctx_ndlp; |
| 21174 | /* Unregister the RPI when mailbox complete */ |
| 21175 | mb->mbox_flag |= LPFC_MBX_IMED_UNREG; |
| 21176 | restart_loop = 1; |
| 21177 | clear_bit(nr: NLP_IGNR_REG_CMPL, addr: &ndlp->nlp_flag); |
| 21178 | break; |
| 21179 | } |
| 21180 | } |
| 21181 | } while (restart_loop); |
| 21182 | |
| 21183 | spin_unlock_irq(lock: &phba->hbalock); |
| 21184 | |
| 21185 | /* Release the cleaned-up mailbox commands */ |
| 21186 | while (!list_empty(head: &mbox_cmd_list)) { |
| 21187 | list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); |
| 21188 | if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { |
| 21189 | ndlp = mb->ctx_ndlp; |
| 21190 | mb->ctx_ndlp = NULL; |
| 21191 | if (ndlp) { |
| 21192 | clear_bit(nr: NLP_IGNR_REG_CMPL, addr: &ndlp->nlp_flag); |
| 21193 | lpfc_nlp_put(ndlp); |
| 21194 | } |
| 21195 | } |
| 21196 | lpfc_mbox_rsrc_cleanup(phba, mbox: mb, locked: MBOX_THD_UNLOCKED); |
| 21197 | } |
| 21198 | |
| 21199 | /* Release the ndlp with the cleaned-up active mailbox command */ |
| 21200 | if (act_mbx_ndlp) { |
| 21201 | clear_bit(nr: NLP_IGNR_REG_CMPL, addr: &act_mbx_ndlp->nlp_flag); |
| 21202 | lpfc_nlp_put(act_mbx_ndlp); |
| 21203 | } |
| 21204 | } |
| 21205 | |
| 21206 | /** |
| 21207 | * lpfc_drain_txq - Drain the txq |
| 21208 | * @phba: Pointer to HBA context object. |
| 21209 | * |
| 21210 | * This function attempt to submit IOCBs on the txq |
| 21211 | * to the adapter. For SLI4 adapters, the txq contains |
| 21212 | * ELS IOCBs that have been deferred because the there |
| 21213 | * are no SGLs. This congestion can occur with large |
| 21214 | * vport counts during node discovery. |
| 21215 | **/ |
| 21216 | |
| 21217 | uint32_t |
| 21218 | lpfc_drain_txq(struct lpfc_hba *phba) |
| 21219 | { |
| 21220 | LIST_HEAD(completions); |
| 21221 | struct lpfc_sli_ring *pring; |
| 21222 | struct lpfc_iocbq *piocbq = NULL; |
| 21223 | unsigned long iflags = 0; |
| 21224 | char *fail_msg = NULL; |
| 21225 | uint32_t txq_cnt = 0; |
| 21226 | struct lpfc_queue *wq; |
| 21227 | int ret = 0; |
| 21228 | |
| 21229 | if (phba->link_flag & LS_MDS_LOOPBACK) { |
| 21230 | /* MDS WQE are posted only to first WQ*/ |
| 21231 | wq = phba->sli4_hba.hdwq[0].io_wq; |
| 21232 | if (unlikely(!wq)) |
| 21233 | return 0; |
| 21234 | pring = wq->pring; |
| 21235 | } else { |
| 21236 | wq = phba->sli4_hba.els_wq; |
| 21237 | if (unlikely(!wq)) |
| 21238 | return 0; |
| 21239 | pring = lpfc_phba_elsring(phba); |
| 21240 | } |
| 21241 | |
| 21242 | if (unlikely(!pring) || list_empty(head: &pring->txq)) |
| 21243 | return 0; |
| 21244 | |
| 21245 | spin_lock_irqsave(&pring->ring_lock, iflags); |
| 21246 | list_for_each_entry(piocbq, &pring->txq, list) { |
| 21247 | txq_cnt++; |
| 21248 | } |
| 21249 | |
| 21250 | if (txq_cnt > pring->txq_max) |
| 21251 | pring->txq_max = txq_cnt; |
| 21252 | |
| 21253 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 21254 | |
| 21255 | while (!list_empty(head: &pring->txq)) { |
| 21256 | spin_lock_irqsave(&pring->ring_lock, iflags); |
| 21257 | |
| 21258 | piocbq = lpfc_sli_ringtx_get(phba, pring); |
| 21259 | if (!piocbq) { |
| 21260 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 21261 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 21262 | "2823 txq empty and txq_cnt is %d\n" , |
| 21263 | txq_cnt); |
| 21264 | break; |
| 21265 | } |
| 21266 | txq_cnt--; |
| 21267 | |
| 21268 | ret = __lpfc_sli_issue_iocb(phba, ring_number: pring->ringno, piocb: piocbq, flag: 0); |
| 21269 | |
| 21270 | if (ret && ret != IOCB_BUSY) { |
| 21271 | fail_msg = " - Cannot send IO " ; |
| 21272 | piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; |
| 21273 | } |
| 21274 | if (fail_msg) { |
| 21275 | piocbq->cmd_flag |= LPFC_DRIVER_ABORTED; |
| 21276 | /* Failed means we can't issue and need to cancel */ |
| 21277 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 21278 | "2822 IOCB failed %s iotag 0x%x " |
| 21279 | "xri 0x%x %d flg x%x\n" , |
| 21280 | fail_msg, piocbq->iotag, |
| 21281 | piocbq->sli4_xritag, ret, |
| 21282 | piocbq->cmd_flag); |
| 21283 | list_add_tail(new: &piocbq->list, head: &completions); |
| 21284 | fail_msg = NULL; |
| 21285 | } |
| 21286 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 21287 | if (txq_cnt == 0 || ret == IOCB_BUSY) |
| 21288 | break; |
| 21289 | } |
| 21290 | /* Cancel all the IOCBs that cannot be issued */ |
| 21291 | lpfc_sli_cancel_iocbs(phba, iocblist: &completions, IOSTAT_LOCAL_REJECT, |
| 21292 | IOERR_SLI_ABORTED); |
| 21293 | |
| 21294 | return txq_cnt; |
| 21295 | } |
| 21296 | |
| 21297 | /** |
| 21298 | * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. |
| 21299 | * @phba: Pointer to HBA context object. |
| 21300 | * @pwqeq: Pointer to command WQE. |
| 21301 | * @sglq: Pointer to the scatter gather queue object. |
| 21302 | * |
| 21303 | * This routine converts the bpl or bde that is in the WQE |
| 21304 | * to a sgl list for the sli4 hardware. The physical address |
| 21305 | * of the bpl/bde is converted back to a virtual address. |
| 21306 | * If the WQE contains a BPL then the list of BDE's is |
| 21307 | * converted to sli4_sge's. If the WQE contains a single |
| 21308 | * BDE then it is converted to a single sli_sge. |
| 21309 | * The WQE is still in cpu endianness so the contents of |
| 21310 | * the bpl can be used without byte swapping. |
| 21311 | * |
| 21312 | * Returns valid XRI = Success, NO_XRI = Failure. |
| 21313 | */ |
| 21314 | static uint16_t |
| 21315 | lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, |
| 21316 | struct lpfc_sglq *sglq) |
| 21317 | { |
| 21318 | uint16_t xritag = NO_XRI; |
| 21319 | struct ulp_bde64 *bpl = NULL; |
| 21320 | struct ulp_bde64 bde; |
| 21321 | struct sli4_sge *sgl = NULL; |
| 21322 | struct lpfc_dmabuf *dmabuf; |
| 21323 | union lpfc_wqe128 *wqe; |
| 21324 | int numBdes = 0; |
| 21325 | int i = 0; |
| 21326 | uint32_t offset = 0; /* accumulated offset in the sg request list */ |
| 21327 | int inbound = 0; /* number of sg reply entries inbound from firmware */ |
| 21328 | uint32_t cmd; |
| 21329 | |
| 21330 | if (!pwqeq || !sglq) |
| 21331 | return xritag; |
| 21332 | |
| 21333 | sgl = (struct sli4_sge *)sglq->sgl; |
| 21334 | wqe = &pwqeq->wqe; |
| 21335 | pwqeq->iocb.ulpIoTag = pwqeq->iotag; |
| 21336 | |
| 21337 | cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); |
| 21338 | if (cmd == CMD_XMIT_BLS_RSP64_WQE) |
| 21339 | return sglq->sli4_xritag; |
| 21340 | numBdes = pwqeq->num_bdes; |
| 21341 | if (numBdes) { |
| 21342 | /* The addrHigh and addrLow fields within the WQE |
| 21343 | * have not been byteswapped yet so there is no |
| 21344 | * need to swap them back. |
| 21345 | */ |
| 21346 | if (pwqeq->bpl_dmabuf) |
| 21347 | dmabuf = pwqeq->bpl_dmabuf; |
| 21348 | else |
| 21349 | return xritag; |
| 21350 | |
| 21351 | bpl = (struct ulp_bde64 *)dmabuf->virt; |
| 21352 | if (!bpl) |
| 21353 | return xritag; |
| 21354 | |
| 21355 | for (i = 0; i < numBdes; i++) { |
| 21356 | /* Should already be byte swapped. */ |
| 21357 | sgl->addr_hi = bpl->addrHigh; |
| 21358 | sgl->addr_lo = bpl->addrLow; |
| 21359 | |
| 21360 | sgl->word2 = le32_to_cpu(sgl->word2); |
| 21361 | if ((i+1) == numBdes) |
| 21362 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
| 21363 | else |
| 21364 | bf_set(lpfc_sli4_sge_last, sgl, 0); |
| 21365 | /* swap the size field back to the cpu so we |
| 21366 | * can assign it to the sgl. |
| 21367 | */ |
| 21368 | bde.tus.w = le32_to_cpu(bpl->tus.w); |
| 21369 | sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); |
| 21370 | /* The offsets in the sgl need to be accumulated |
| 21371 | * separately for the request and reply lists. |
| 21372 | * The request is always first, the reply follows. |
| 21373 | */ |
| 21374 | switch (cmd) { |
| 21375 | case CMD_GEN_REQUEST64_WQE: |
| 21376 | /* add up the reply sg entries */ |
| 21377 | if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) |
| 21378 | inbound++; |
| 21379 | /* first inbound? reset the offset */ |
| 21380 | if (inbound == 1) |
| 21381 | offset = 0; |
| 21382 | bf_set(lpfc_sli4_sge_offset, sgl, offset); |
| 21383 | bf_set(lpfc_sli4_sge_type, sgl, |
| 21384 | LPFC_SGE_TYPE_DATA); |
| 21385 | offset += bde.tus.f.bdeSize; |
| 21386 | break; |
| 21387 | case CMD_FCP_TRSP64_WQE: |
| 21388 | bf_set(lpfc_sli4_sge_offset, sgl, 0); |
| 21389 | bf_set(lpfc_sli4_sge_type, sgl, |
| 21390 | LPFC_SGE_TYPE_DATA); |
| 21391 | break; |
| 21392 | case CMD_FCP_TSEND64_WQE: |
| 21393 | case CMD_FCP_TRECEIVE64_WQE: |
| 21394 | bf_set(lpfc_sli4_sge_type, sgl, |
| 21395 | bpl->tus.f.bdeFlags); |
| 21396 | if (i < 3) |
| 21397 | offset = 0; |
| 21398 | else |
| 21399 | offset += bde.tus.f.bdeSize; |
| 21400 | bf_set(lpfc_sli4_sge_offset, sgl, offset); |
| 21401 | break; |
| 21402 | } |
| 21403 | sgl->word2 = cpu_to_le32(sgl->word2); |
| 21404 | bpl++; |
| 21405 | sgl++; |
| 21406 | } |
| 21407 | } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { |
| 21408 | /* The addrHigh and addrLow fields of the BDE have not |
| 21409 | * been byteswapped yet so they need to be swapped |
| 21410 | * before putting them in the sgl. |
| 21411 | */ |
| 21412 | sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); |
| 21413 | sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); |
| 21414 | sgl->word2 = le32_to_cpu(sgl->word2); |
| 21415 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
| 21416 | sgl->word2 = cpu_to_le32(sgl->word2); |
| 21417 | sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); |
| 21418 | } |
| 21419 | return sglq->sli4_xritag; |
| 21420 | } |
| 21421 | |
| 21422 | /** |
| 21423 | * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) |
| 21424 | * @phba: Pointer to HBA context object. |
| 21425 | * @qp: Pointer to HDW queue. |
| 21426 | * @pwqe: Pointer to command WQE. |
| 21427 | **/ |
| 21428 | int |
| 21429 | lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, |
| 21430 | struct lpfc_iocbq *pwqe) |
| 21431 | { |
| 21432 | union lpfc_wqe128 *wqe = &pwqe->wqe; |
| 21433 | struct lpfc_async_xchg_ctx *ctxp; |
| 21434 | struct lpfc_queue *wq; |
| 21435 | struct lpfc_sglq *sglq; |
| 21436 | struct lpfc_sli_ring *pring; |
| 21437 | unsigned long iflags; |
| 21438 | int ret = 0; |
| 21439 | |
| 21440 | /* NVME_LS and NVME_LS ABTS requests. */ |
| 21441 | if (pwqe->cmd_flag & LPFC_IO_NVME_LS) { |
| 21442 | pring = phba->sli4_hba.nvmels_wq->pring; |
| 21443 | lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, |
| 21444 | qp, wq_access); |
| 21445 | sglq = __lpfc_sli_get_els_sglq(phba, piocbq: pwqe); |
| 21446 | if (!sglq) { |
| 21447 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 21448 | return WQE_BUSY; |
| 21449 | } |
| 21450 | pwqe->sli4_lxritag = sglq->sli4_lxritag; |
| 21451 | pwqe->sli4_xritag = sglq->sli4_xritag; |
| 21452 | if (lpfc_wqe_bpl2sgl(phba, pwqeq: pwqe, sglq) == NO_XRI) { |
| 21453 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 21454 | return WQE_ERROR; |
| 21455 | } |
| 21456 | bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, |
| 21457 | pwqe->sli4_xritag); |
| 21458 | ret = lpfc_sli4_wq_put(q: phba->sli4_hba.nvmels_wq, wqe); |
| 21459 | if (ret) { |
| 21460 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 21461 | return ret; |
| 21462 | } |
| 21463 | |
| 21464 | lpfc_sli_ringtxcmpl_put(phba, pring, piocb: pwqe); |
| 21465 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 21466 | |
| 21467 | lpfc_sli4_poll_eq(eq: qp->hba_eq); |
| 21468 | return 0; |
| 21469 | } |
| 21470 | |
| 21471 | /* NVME_FCREQ and NVME_ABTS requests */ |
| 21472 | if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) { |
| 21473 | /* Get the IO distribution (hba_wqidx) for WQ assignment. */ |
| 21474 | wq = qp->io_wq; |
| 21475 | pring = wq->pring; |
| 21476 | |
| 21477 | bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); |
| 21478 | |
| 21479 | lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, |
| 21480 | qp, wq_access); |
| 21481 | ret = lpfc_sli4_wq_put(q: wq, wqe); |
| 21482 | if (ret) { |
| 21483 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 21484 | return ret; |
| 21485 | } |
| 21486 | lpfc_sli_ringtxcmpl_put(phba, pring, piocb: pwqe); |
| 21487 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 21488 | |
| 21489 | lpfc_sli4_poll_eq(eq: qp->hba_eq); |
| 21490 | return 0; |
| 21491 | } |
| 21492 | |
| 21493 | /* NVMET requests */ |
| 21494 | if (pwqe->cmd_flag & LPFC_IO_NVMET) { |
| 21495 | /* Get the IO distribution (hba_wqidx) for WQ assignment. */ |
| 21496 | wq = qp->io_wq; |
| 21497 | pring = wq->pring; |
| 21498 | |
| 21499 | ctxp = pwqe->context_un.axchg; |
| 21500 | sglq = ctxp->ctxbuf->sglq; |
| 21501 | if (pwqe->sli4_xritag == NO_XRI) { |
| 21502 | pwqe->sli4_lxritag = sglq->sli4_lxritag; |
| 21503 | pwqe->sli4_xritag = sglq->sli4_xritag; |
| 21504 | } |
| 21505 | bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, |
| 21506 | pwqe->sli4_xritag); |
| 21507 | bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); |
| 21508 | |
| 21509 | lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, |
| 21510 | qp, wq_access); |
| 21511 | ret = lpfc_sli4_wq_put(q: wq, wqe); |
| 21512 | if (ret) { |
| 21513 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 21514 | return ret; |
| 21515 | } |
| 21516 | lpfc_sli_ringtxcmpl_put(phba, pring, piocb: pwqe); |
| 21517 | spin_unlock_irqrestore(lock: &pring->ring_lock, flags: iflags); |
| 21518 | |
| 21519 | lpfc_sli4_poll_eq(eq: qp->hba_eq); |
| 21520 | return 0; |
| 21521 | } |
| 21522 | return WQE_ERROR; |
| 21523 | } |
| 21524 | |
| 21525 | /** |
| 21526 | * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort |
| 21527 | * @phba: Pointer to HBA context object. |
| 21528 | * @cmdiocb: Pointer to driver command iocb object. |
| 21529 | * @cmpl: completion function. |
| 21530 | * |
| 21531 | * Fill the appropriate fields for the abort WQE and call |
| 21532 | * internal routine lpfc_sli4_issue_wqe to send the WQE |
| 21533 | * This function is called with hbalock held and no ring_lock held. |
| 21534 | * |
| 21535 | * RETURNS 0 - SUCCESS |
| 21536 | **/ |
| 21537 | |
| 21538 | int |
| 21539 | lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, |
| 21540 | void *cmpl) |
| 21541 | { |
| 21542 | struct lpfc_vport *vport = cmdiocb->vport; |
| 21543 | struct lpfc_iocbq *abtsiocb = NULL; |
| 21544 | union lpfc_wqe128 *abtswqe; |
| 21545 | struct lpfc_io_buf *lpfc_cmd; |
| 21546 | int retval = IOCB_ERROR; |
| 21547 | u16 xritag = cmdiocb->sli4_xritag; |
| 21548 | |
| 21549 | /* |
| 21550 | * The scsi command can not be in txq and it is in flight because the |
| 21551 | * pCmd is still pointing at the SCSI command we have to abort. There |
| 21552 | * is no need to search the txcmplq. Just send an abort to the FW. |
| 21553 | */ |
| 21554 | |
| 21555 | abtsiocb = __lpfc_sli_get_iocbq(phba); |
| 21556 | if (!abtsiocb) |
| 21557 | return WQE_NORESOURCE; |
| 21558 | |
| 21559 | /* Indicate the IO is being aborted by the driver. */ |
| 21560 | cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED; |
| 21561 | |
| 21562 | abtswqe = &abtsiocb->wqe; |
| 21563 | memset(abtswqe, 0, sizeof(*abtswqe)); |
| 21564 | |
| 21565 | if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK)) |
| 21566 | bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1); |
| 21567 | bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG); |
| 21568 | abtswqe->abort_cmd.rsrvd5 = 0; |
| 21569 | abtswqe->abort_cmd.wqe_com.abort_tag = xritag; |
| 21570 | bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag); |
| 21571 | bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); |
| 21572 | bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0); |
| 21573 | bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1); |
| 21574 | bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); |
| 21575 | bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND); |
| 21576 | |
| 21577 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ |
| 21578 | abtsiocb->hba_wqidx = cmdiocb->hba_wqidx; |
| 21579 | abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX; |
| 21580 | if (cmdiocb->cmd_flag & LPFC_IO_FCP) |
| 21581 | abtsiocb->cmd_flag |= LPFC_IO_FCP; |
| 21582 | if (cmdiocb->cmd_flag & LPFC_IO_NVME) |
| 21583 | abtsiocb->cmd_flag |= LPFC_IO_NVME; |
| 21584 | if (cmdiocb->cmd_flag & LPFC_IO_FOF) |
| 21585 | abtsiocb->cmd_flag |= LPFC_IO_FOF; |
| 21586 | abtsiocb->vport = vport; |
| 21587 | abtsiocb->cmd_cmpl = cmpl; |
| 21588 | |
| 21589 | lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq); |
| 21590 | retval = lpfc_sli4_issue_wqe(phba, qp: lpfc_cmd->hdwq, pwqe: abtsiocb); |
| 21591 | |
| 21592 | lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP, |
| 21593 | "0359 Abort xri x%x, original iotag x%x, " |
| 21594 | "abort cmd iotag x%x retval x%x\n" , |
| 21595 | xritag, cmdiocb->iotag, abtsiocb->iotag, retval); |
| 21596 | |
| 21597 | if (retval) { |
| 21598 | cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; |
| 21599 | __lpfc_sli_release_iocbq(phba, iocbq: abtsiocb); |
| 21600 | } |
| 21601 | |
| 21602 | return retval; |
| 21603 | } |
| 21604 | |
| 21605 | #ifdef LPFC_MXP_STAT |
| 21606 | /** |
| 21607 | * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count |
| 21608 | * @phba: pointer to lpfc hba data structure. |
| 21609 | * @hwqid: belong to which HWQ. |
| 21610 | * |
| 21611 | * The purpose of this routine is to take a snapshot of pbl, pvt and busy count |
| 21612 | * 15 seconds after a test case is running. |
| 21613 | * |
| 21614 | * The user should call lpfc_debugfs_multixripools_write before running a test |
| 21615 | * case to clear stat_snapshot_taken. Then the user starts a test case. During |
| 21616 | * test case is running, stat_snapshot_taken is incremented by 1 every time when |
| 21617 | * this routine is called from heartbeat timer. When stat_snapshot_taken is |
| 21618 | * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken. |
| 21619 | **/ |
| 21620 | void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) |
| 21621 | { |
| 21622 | struct lpfc_sli4_hdw_queue *qp; |
| 21623 | struct lpfc_multixri_pool *multixri_pool; |
| 21624 | struct lpfc_pvt_pool *pvt_pool; |
| 21625 | struct lpfc_pbl_pool *pbl_pool; |
| 21626 | u32 txcmplq_cnt; |
| 21627 | |
| 21628 | qp = &phba->sli4_hba.hdwq[hwqid]; |
| 21629 | multixri_pool = qp->p_multixri_pool; |
| 21630 | if (!multixri_pool) |
| 21631 | return; |
| 21632 | |
| 21633 | if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { |
| 21634 | pvt_pool = &qp->p_multixri_pool->pvt_pool; |
| 21635 | pbl_pool = &qp->p_multixri_pool->pbl_pool; |
| 21636 | txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; |
| 21637 | |
| 21638 | multixri_pool->stat_pbl_count = pbl_pool->count; |
| 21639 | multixri_pool->stat_pvt_count = pvt_pool->count; |
| 21640 | multixri_pool->stat_busy_count = txcmplq_cnt; |
| 21641 | } |
| 21642 | |
| 21643 | multixri_pool->stat_snapshot_taken++; |
| 21644 | } |
| 21645 | #endif |
| 21646 | |
| 21647 | /** |
| 21648 | * lpfc_adjust_pvt_pool_count - Adjust private pool count |
| 21649 | * @phba: pointer to lpfc hba data structure. |
| 21650 | * @hwqid: belong to which HWQ. |
| 21651 | * |
| 21652 | * This routine moves some XRIs from private to public pool when private pool |
| 21653 | * is not busy. |
| 21654 | **/ |
| 21655 | void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid) |
| 21656 | { |
| 21657 | struct lpfc_multixri_pool *multixri_pool; |
| 21658 | u32 io_req_count; |
| 21659 | u32 prev_io_req_count; |
| 21660 | |
| 21661 | multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; |
| 21662 | if (!multixri_pool) |
| 21663 | return; |
| 21664 | io_req_count = multixri_pool->io_req_count; |
| 21665 | prev_io_req_count = multixri_pool->prev_io_req_count; |
| 21666 | |
| 21667 | if (prev_io_req_count != io_req_count) { |
| 21668 | /* Private pool is busy */ |
| 21669 | multixri_pool->prev_io_req_count = io_req_count; |
| 21670 | } else { |
| 21671 | /* Private pool is not busy. |
| 21672 | * Move XRIs from private to public pool. |
| 21673 | */ |
| 21674 | lpfc_move_xri_pvt_to_pbl(phba, hwqid); |
| 21675 | } |
| 21676 | } |
| 21677 | |
| 21678 | /** |
| 21679 | * lpfc_adjust_high_watermark - Adjust high watermark |
| 21680 | * @phba: pointer to lpfc hba data structure. |
| 21681 | * @hwqid: belong to which HWQ. |
| 21682 | * |
| 21683 | * This routine sets high watermark as number of outstanding XRIs, |
| 21684 | * but make sure the new value is between xri_limit/2 and xri_limit. |
| 21685 | **/ |
| 21686 | void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) |
| 21687 | { |
| 21688 | u32 new_watermark; |
| 21689 | u32 watermark_max; |
| 21690 | u32 watermark_min; |
| 21691 | u32 xri_limit; |
| 21692 | u32 txcmplq_cnt; |
| 21693 | u32 abts_io_bufs; |
| 21694 | struct lpfc_multixri_pool *multixri_pool; |
| 21695 | struct lpfc_sli4_hdw_queue *qp; |
| 21696 | |
| 21697 | qp = &phba->sli4_hba.hdwq[hwqid]; |
| 21698 | multixri_pool = qp->p_multixri_pool; |
| 21699 | if (!multixri_pool) |
| 21700 | return; |
| 21701 | xri_limit = multixri_pool->xri_limit; |
| 21702 | |
| 21703 | watermark_max = xri_limit; |
| 21704 | watermark_min = xri_limit / 2; |
| 21705 | |
| 21706 | txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; |
| 21707 | abts_io_bufs = qp->abts_scsi_io_bufs; |
| 21708 | abts_io_bufs += qp->abts_nvme_io_bufs; |
| 21709 | |
| 21710 | new_watermark = txcmplq_cnt + abts_io_bufs; |
| 21711 | new_watermark = min(watermark_max, new_watermark); |
| 21712 | new_watermark = max(watermark_min, new_watermark); |
| 21713 | multixri_pool->pvt_pool.high_watermark = new_watermark; |
| 21714 | |
| 21715 | #ifdef LPFC_MXP_STAT |
| 21716 | multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm, |
| 21717 | new_watermark); |
| 21718 | #endif |
| 21719 | } |
| 21720 | |
| 21721 | /** |
| 21722 | * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool |
| 21723 | * @phba: pointer to lpfc hba data structure. |
| 21724 | * @hwqid: belong to which HWQ. |
| 21725 | * |
| 21726 | * This routine is called from hearbeat timer when pvt_pool is idle. |
| 21727 | * All free XRIs are moved from private to public pool on hwqid with 2 steps. |
| 21728 | * The first step moves (all - low_watermark) amount of XRIs. |
| 21729 | * The second step moves the rest of XRIs. |
| 21730 | **/ |
| 21731 | void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) |
| 21732 | { |
| 21733 | struct lpfc_pbl_pool *pbl_pool; |
| 21734 | struct lpfc_pvt_pool *pvt_pool; |
| 21735 | struct lpfc_sli4_hdw_queue *qp; |
| 21736 | struct lpfc_io_buf *lpfc_ncmd; |
| 21737 | struct lpfc_io_buf *lpfc_ncmd_next; |
| 21738 | unsigned long iflag; |
| 21739 | struct list_head tmp_list; |
| 21740 | u32 tmp_count; |
| 21741 | |
| 21742 | qp = &phba->sli4_hba.hdwq[hwqid]; |
| 21743 | pbl_pool = &qp->p_multixri_pool->pbl_pool; |
| 21744 | pvt_pool = &qp->p_multixri_pool->pvt_pool; |
| 21745 | tmp_count = 0; |
| 21746 | |
| 21747 | lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); |
| 21748 | lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); |
| 21749 | |
| 21750 | if (pvt_pool->count > pvt_pool->low_watermark) { |
| 21751 | /* Step 1: move (all - low_watermark) from pvt_pool |
| 21752 | * to pbl_pool |
| 21753 | */ |
| 21754 | |
| 21755 | /* Move low watermark of bufs from pvt_pool to tmp_list */ |
| 21756 | INIT_LIST_HEAD(list: &tmp_list); |
| 21757 | list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
| 21758 | &pvt_pool->list, list) { |
| 21759 | list_move_tail(list: &lpfc_ncmd->list, head: &tmp_list); |
| 21760 | tmp_count++; |
| 21761 | if (tmp_count >= pvt_pool->low_watermark) |
| 21762 | break; |
| 21763 | } |
| 21764 | |
| 21765 | /* Move all bufs from pvt_pool to pbl_pool */ |
| 21766 | list_splice_init(list: &pvt_pool->list, head: &pbl_pool->list); |
| 21767 | |
| 21768 | /* Move all bufs from tmp_list to pvt_pool */ |
| 21769 | list_splice(list: &tmp_list, head: &pvt_pool->list); |
| 21770 | |
| 21771 | pbl_pool->count += (pvt_pool->count - tmp_count); |
| 21772 | pvt_pool->count = tmp_count; |
| 21773 | } else { |
| 21774 | /* Step 2: move the rest from pvt_pool to pbl_pool */ |
| 21775 | list_splice_init(list: &pvt_pool->list, head: &pbl_pool->list); |
| 21776 | pbl_pool->count += pvt_pool->count; |
| 21777 | pvt_pool->count = 0; |
| 21778 | } |
| 21779 | |
| 21780 | spin_unlock(lock: &pvt_pool->lock); |
| 21781 | spin_unlock_irqrestore(lock: &pbl_pool->lock, flags: iflag); |
| 21782 | } |
| 21783 | |
| 21784 | /** |
| 21785 | * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool |
| 21786 | * @phba: pointer to lpfc hba data structure |
| 21787 | * @qp: pointer to HDW queue |
| 21788 | * @pbl_pool: specified public free XRI pool |
| 21789 | * @pvt_pool: specified private free XRI pool |
| 21790 | * @count: number of XRIs to move |
| 21791 | * |
| 21792 | * This routine tries to move some free common bufs from the specified pbl_pool |
| 21793 | * to the specified pvt_pool. It might move less than count XRIs if there's not |
| 21794 | * enough in public pool. |
| 21795 | * |
| 21796 | * Return: |
| 21797 | * true - if XRIs are successfully moved from the specified pbl_pool to the |
| 21798 | * specified pvt_pool |
| 21799 | * false - if the specified pbl_pool is empty or locked by someone else |
| 21800 | **/ |
| 21801 | static bool |
| 21802 | _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, |
| 21803 | struct lpfc_pbl_pool *pbl_pool, |
| 21804 | struct lpfc_pvt_pool *pvt_pool, u32 count) |
| 21805 | { |
| 21806 | struct lpfc_io_buf *lpfc_ncmd; |
| 21807 | struct lpfc_io_buf *lpfc_ncmd_next; |
| 21808 | unsigned long iflag; |
| 21809 | int ret; |
| 21810 | |
| 21811 | ret = spin_trylock_irqsave(&pbl_pool->lock, iflag); |
| 21812 | if (ret) { |
| 21813 | if (pbl_pool->count) { |
| 21814 | /* Move a batch of XRIs from public to private pool */ |
| 21815 | lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); |
| 21816 | list_for_each_entry_safe(lpfc_ncmd, |
| 21817 | lpfc_ncmd_next, |
| 21818 | &pbl_pool->list, |
| 21819 | list) { |
| 21820 | list_move_tail(list: &lpfc_ncmd->list, |
| 21821 | head: &pvt_pool->list); |
| 21822 | pvt_pool->count++; |
| 21823 | pbl_pool->count--; |
| 21824 | count--; |
| 21825 | if (count == 0) |
| 21826 | break; |
| 21827 | } |
| 21828 | |
| 21829 | spin_unlock(lock: &pvt_pool->lock); |
| 21830 | spin_unlock_irqrestore(lock: &pbl_pool->lock, flags: iflag); |
| 21831 | return true; |
| 21832 | } |
| 21833 | spin_unlock_irqrestore(lock: &pbl_pool->lock, flags: iflag); |
| 21834 | } |
| 21835 | |
| 21836 | return false; |
| 21837 | } |
| 21838 | |
| 21839 | /** |
| 21840 | * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool |
| 21841 | * @phba: pointer to lpfc hba data structure. |
| 21842 | * @hwqid: belong to which HWQ. |
| 21843 | * @count: number of XRIs to move |
| 21844 | * |
| 21845 | * This routine tries to find some free common bufs in one of public pools with |
| 21846 | * Round Robin method. The search always starts from local hwqid, then the next |
| 21847 | * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found, |
| 21848 | * a batch of free common bufs are moved to private pool on hwqid. |
| 21849 | * It might move less than count XRIs if there's not enough in public pool. |
| 21850 | **/ |
| 21851 | void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) |
| 21852 | { |
| 21853 | struct lpfc_multixri_pool *multixri_pool; |
| 21854 | struct lpfc_multixri_pool *next_multixri_pool; |
| 21855 | struct lpfc_pvt_pool *pvt_pool; |
| 21856 | struct lpfc_pbl_pool *pbl_pool; |
| 21857 | struct lpfc_sli4_hdw_queue *qp; |
| 21858 | u32 next_hwqid; |
| 21859 | u32 hwq_count; |
| 21860 | int ret; |
| 21861 | |
| 21862 | qp = &phba->sli4_hba.hdwq[hwqid]; |
| 21863 | multixri_pool = qp->p_multixri_pool; |
| 21864 | pvt_pool = &multixri_pool->pvt_pool; |
| 21865 | pbl_pool = &multixri_pool->pbl_pool; |
| 21866 | |
| 21867 | /* Check if local pbl_pool is available */ |
| 21868 | ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); |
| 21869 | if (ret) { |
| 21870 | #ifdef LPFC_MXP_STAT |
| 21871 | multixri_pool->local_pbl_hit_count++; |
| 21872 | #endif |
| 21873 | return; |
| 21874 | } |
| 21875 | |
| 21876 | hwq_count = phba->cfg_hdw_queue; |
| 21877 | |
| 21878 | /* Get the next hwqid which was found last time */ |
| 21879 | next_hwqid = multixri_pool->rrb_next_hwqid; |
| 21880 | |
| 21881 | do { |
| 21882 | /* Go to next hwq */ |
| 21883 | next_hwqid = (next_hwqid + 1) % hwq_count; |
| 21884 | |
| 21885 | next_multixri_pool = |
| 21886 | phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool; |
| 21887 | pbl_pool = &next_multixri_pool->pbl_pool; |
| 21888 | |
| 21889 | /* Check if the public free xri pool is available */ |
| 21890 | ret = _lpfc_move_xri_pbl_to_pvt( |
| 21891 | phba, qp, pbl_pool, pvt_pool, count); |
| 21892 | |
| 21893 | /* Exit while-loop if success or all hwqid are checked */ |
| 21894 | } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); |
| 21895 | |
| 21896 | /* Starting point for the next time */ |
| 21897 | multixri_pool->rrb_next_hwqid = next_hwqid; |
| 21898 | |
| 21899 | if (!ret) { |
| 21900 | /* stats: all public pools are empty*/ |
| 21901 | multixri_pool->pbl_empty_count++; |
| 21902 | } |
| 21903 | |
| 21904 | #ifdef LPFC_MXP_STAT |
| 21905 | if (ret) { |
| 21906 | if (next_hwqid == hwqid) |
| 21907 | multixri_pool->local_pbl_hit_count++; |
| 21908 | else |
| 21909 | multixri_pool->other_pbl_hit_count++; |
| 21910 | } |
| 21911 | #endif |
| 21912 | } |
| 21913 | |
| 21914 | /** |
| 21915 | * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark |
| 21916 | * @phba: pointer to lpfc hba data structure. |
| 21917 | * @hwqid: belong to which HWQ. |
| 21918 | * |
| 21919 | * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than |
| 21920 | * low watermark. |
| 21921 | **/ |
| 21922 | void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid) |
| 21923 | { |
| 21924 | struct lpfc_multixri_pool *multixri_pool; |
| 21925 | struct lpfc_pvt_pool *pvt_pool; |
| 21926 | |
| 21927 | multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; |
| 21928 | pvt_pool = &multixri_pool->pvt_pool; |
| 21929 | |
| 21930 | if (pvt_pool->count < pvt_pool->low_watermark) |
| 21931 | lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); |
| 21932 | } |
| 21933 | |
| 21934 | /** |
| 21935 | * lpfc_release_io_buf - Return one IO buf back to free pool |
| 21936 | * @phba: pointer to lpfc hba data structure. |
| 21937 | * @lpfc_ncmd: IO buf to be returned. |
| 21938 | * @qp: belong to which HWQ. |
| 21939 | * |
| 21940 | * This routine returns one IO buf back to free pool. If this is an urgent IO, |
| 21941 | * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1, |
| 21942 | * the IO buf is returned to pbl_pool or pvt_pool based on watermark and |
| 21943 | * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to |
| 21944 | * lpfc_io_buf_list_put. |
| 21945 | **/ |
| 21946 | void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, |
| 21947 | struct lpfc_sli4_hdw_queue *qp) |
| 21948 | { |
| 21949 | unsigned long iflag; |
| 21950 | struct lpfc_pbl_pool *pbl_pool; |
| 21951 | struct lpfc_pvt_pool *pvt_pool; |
| 21952 | struct lpfc_epd_pool *epd_pool; |
| 21953 | u32 txcmplq_cnt; |
| 21954 | u32 xri_owned; |
| 21955 | u32 xri_limit; |
| 21956 | u32 abts_io_bufs; |
| 21957 | |
| 21958 | /* MUST zero fields if buffer is reused by another protocol */ |
| 21959 | lpfc_ncmd->nvmeCmd = NULL; |
| 21960 | lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL; |
| 21961 | |
| 21962 | if (phba->cfg_xpsgl && !phba->nvmet_support && |
| 21963 | !list_empty(head: &lpfc_ncmd->dma_sgl_xtra_list)) |
| 21964 | lpfc_put_sgl_per_hdwq(phba, buf: lpfc_ncmd); |
| 21965 | |
| 21966 | if (!list_empty(head: &lpfc_ncmd->dma_cmd_rsp_list)) |
| 21967 | lpfc_put_cmd_rsp_buf_per_hdwq(phba, buf: lpfc_ncmd); |
| 21968 | |
| 21969 | if (phba->cfg_xri_rebalancing) { |
| 21970 | if (lpfc_ncmd->expedite) { |
| 21971 | /* Return to expedite pool */ |
| 21972 | epd_pool = &phba->epd_pool; |
| 21973 | spin_lock_irqsave(&epd_pool->lock, iflag); |
| 21974 | list_add_tail(new: &lpfc_ncmd->list, head: &epd_pool->list); |
| 21975 | epd_pool->count++; |
| 21976 | spin_unlock_irqrestore(lock: &epd_pool->lock, flags: iflag); |
| 21977 | return; |
| 21978 | } |
| 21979 | |
| 21980 | /* Avoid invalid access if an IO sneaks in and is being rejected |
| 21981 | * just _after_ xri pools are destroyed in lpfc_offline. |
| 21982 | * Nothing much can be done at this point. |
| 21983 | */ |
| 21984 | if (!qp->p_multixri_pool) |
| 21985 | return; |
| 21986 | |
| 21987 | pbl_pool = &qp->p_multixri_pool->pbl_pool; |
| 21988 | pvt_pool = &qp->p_multixri_pool->pvt_pool; |
| 21989 | |
| 21990 | txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; |
| 21991 | abts_io_bufs = qp->abts_scsi_io_bufs; |
| 21992 | abts_io_bufs += qp->abts_nvme_io_bufs; |
| 21993 | |
| 21994 | xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; |
| 21995 | xri_limit = qp->p_multixri_pool->xri_limit; |
| 21996 | |
| 21997 | #ifdef LPFC_MXP_STAT |
| 21998 | if (xri_owned <= xri_limit) |
| 21999 | qp->p_multixri_pool->below_limit_count++; |
| 22000 | else |
| 22001 | qp->p_multixri_pool->above_limit_count++; |
| 22002 | #endif |
| 22003 | |
| 22004 | /* XRI goes to either public or private free xri pool |
| 22005 | * based on watermark and xri_limit |
| 22006 | */ |
| 22007 | if ((pvt_pool->count < pvt_pool->low_watermark) || |
| 22008 | (xri_owned < xri_limit && |
| 22009 | pvt_pool->count < pvt_pool->high_watermark)) { |
| 22010 | lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, |
| 22011 | qp, free_pvt_pool); |
| 22012 | list_add_tail(new: &lpfc_ncmd->list, |
| 22013 | head: &pvt_pool->list); |
| 22014 | pvt_pool->count++; |
| 22015 | spin_unlock_irqrestore(lock: &pvt_pool->lock, flags: iflag); |
| 22016 | } else { |
| 22017 | lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, |
| 22018 | qp, free_pub_pool); |
| 22019 | list_add_tail(new: &lpfc_ncmd->list, |
| 22020 | head: &pbl_pool->list); |
| 22021 | pbl_pool->count++; |
| 22022 | spin_unlock_irqrestore(lock: &pbl_pool->lock, flags: iflag); |
| 22023 | } |
| 22024 | } else { |
| 22025 | lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, |
| 22026 | qp, free_xri); |
| 22027 | list_add_tail(new: &lpfc_ncmd->list, |
| 22028 | head: &qp->lpfc_io_buf_list_put); |
| 22029 | qp->put_io_bufs++; |
| 22030 | spin_unlock_irqrestore(lock: &qp->io_buf_list_put_lock, |
| 22031 | flags: iflag); |
| 22032 | } |
| 22033 | } |
| 22034 | |
| 22035 | /** |
| 22036 | * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool |
| 22037 | * @phba: pointer to lpfc hba data structure. |
| 22038 | * @qp: pointer to HDW queue |
| 22039 | * @pvt_pool: pointer to private pool data structure. |
| 22040 | * @ndlp: pointer to lpfc nodelist data structure. |
| 22041 | * |
| 22042 | * This routine tries to get one free IO buf from private pool. |
| 22043 | * |
| 22044 | * Return: |
| 22045 | * pointer to one free IO buf - if private pool is not empty |
| 22046 | * NULL - if private pool is empty |
| 22047 | **/ |
| 22048 | static struct lpfc_io_buf * |
| 22049 | lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, |
| 22050 | struct lpfc_sli4_hdw_queue *qp, |
| 22051 | struct lpfc_pvt_pool *pvt_pool, |
| 22052 | struct lpfc_nodelist *ndlp) |
| 22053 | { |
| 22054 | struct lpfc_io_buf *lpfc_ncmd; |
| 22055 | struct lpfc_io_buf *lpfc_ncmd_next; |
| 22056 | unsigned long iflag; |
| 22057 | |
| 22058 | lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); |
| 22059 | list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
| 22060 | &pvt_pool->list, list) { |
| 22061 | if (lpfc_test_rrq_active( |
| 22062 | phba, ndlp, xritag: lpfc_ncmd->cur_iocbq.sli4_lxritag)) |
| 22063 | continue; |
| 22064 | list_del(entry: &lpfc_ncmd->list); |
| 22065 | pvt_pool->count--; |
| 22066 | spin_unlock_irqrestore(lock: &pvt_pool->lock, flags: iflag); |
| 22067 | return lpfc_ncmd; |
| 22068 | } |
| 22069 | spin_unlock_irqrestore(lock: &pvt_pool->lock, flags: iflag); |
| 22070 | |
| 22071 | return NULL; |
| 22072 | } |
| 22073 | |
| 22074 | /** |
| 22075 | * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool |
| 22076 | * @phba: pointer to lpfc hba data structure. |
| 22077 | * |
| 22078 | * This routine tries to get one free IO buf from expedite pool. |
| 22079 | * |
| 22080 | * Return: |
| 22081 | * pointer to one free IO buf - if expedite pool is not empty |
| 22082 | * NULL - if expedite pool is empty |
| 22083 | **/ |
| 22084 | static struct lpfc_io_buf * |
| 22085 | lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba) |
| 22086 | { |
| 22087 | struct lpfc_io_buf *lpfc_ncmd = NULL, *iter; |
| 22088 | struct lpfc_io_buf *lpfc_ncmd_next; |
| 22089 | unsigned long iflag; |
| 22090 | struct lpfc_epd_pool *epd_pool; |
| 22091 | |
| 22092 | epd_pool = &phba->epd_pool; |
| 22093 | |
| 22094 | spin_lock_irqsave(&epd_pool->lock, iflag); |
| 22095 | if (epd_pool->count > 0) { |
| 22096 | list_for_each_entry_safe(iter, lpfc_ncmd_next, |
| 22097 | &epd_pool->list, list) { |
| 22098 | list_del(entry: &iter->list); |
| 22099 | epd_pool->count--; |
| 22100 | lpfc_ncmd = iter; |
| 22101 | break; |
| 22102 | } |
| 22103 | } |
| 22104 | spin_unlock_irqrestore(lock: &epd_pool->lock, flags: iflag); |
| 22105 | |
| 22106 | return lpfc_ncmd; |
| 22107 | } |
| 22108 | |
| 22109 | /** |
| 22110 | * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs |
| 22111 | * @phba: pointer to lpfc hba data structure. |
| 22112 | * @ndlp: pointer to lpfc nodelist data structure. |
| 22113 | * @hwqid: belong to which HWQ |
| 22114 | * @expedite: 1 means this request is urgent. |
| 22115 | * |
| 22116 | * This routine will do the following actions and then return a pointer to |
| 22117 | * one free IO buf. |
| 22118 | * |
| 22119 | * 1. If private free xri count is empty, move some XRIs from public to |
| 22120 | * private pool. |
| 22121 | * 2. Get one XRI from private free xri pool. |
| 22122 | * 3. If we fail to get one from pvt_pool and this is an expedite request, |
| 22123 | * get one free xri from expedite pool. |
| 22124 | * |
| 22125 | * Note: ndlp is only used on SCSI side for RRQ testing. |
| 22126 | * The caller should pass NULL for ndlp on NVME side. |
| 22127 | * |
| 22128 | * Return: |
| 22129 | * pointer to one free IO buf - if private pool is not empty |
| 22130 | * NULL - if private pool is empty |
| 22131 | **/ |
| 22132 | static struct lpfc_io_buf * |
| 22133 | lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, |
| 22134 | struct lpfc_nodelist *ndlp, |
| 22135 | int hwqid, int expedite) |
| 22136 | { |
| 22137 | struct lpfc_sli4_hdw_queue *qp; |
| 22138 | struct lpfc_multixri_pool *multixri_pool; |
| 22139 | struct lpfc_pvt_pool *pvt_pool; |
| 22140 | struct lpfc_io_buf *lpfc_ncmd; |
| 22141 | |
| 22142 | qp = &phba->sli4_hba.hdwq[hwqid]; |
| 22143 | lpfc_ncmd = NULL; |
| 22144 | if (!qp) { |
| 22145 | lpfc_printf_log(phba, KERN_INFO, |
| 22146 | LOG_SLI | LOG_NVME_ABTS | LOG_FCP, |
| 22147 | "5556 NULL qp for hwqid x%x\n" , hwqid); |
| 22148 | return lpfc_ncmd; |
| 22149 | } |
| 22150 | multixri_pool = qp->p_multixri_pool; |
| 22151 | if (!multixri_pool) { |
| 22152 | lpfc_printf_log(phba, KERN_INFO, |
| 22153 | LOG_SLI | LOG_NVME_ABTS | LOG_FCP, |
| 22154 | "5557 NULL multixri for hwqid x%x\n" , hwqid); |
| 22155 | return lpfc_ncmd; |
| 22156 | } |
| 22157 | pvt_pool = &multixri_pool->pvt_pool; |
| 22158 | if (!pvt_pool) { |
| 22159 | lpfc_printf_log(phba, KERN_INFO, |
| 22160 | LOG_SLI | LOG_NVME_ABTS | LOG_FCP, |
| 22161 | "5558 NULL pvt_pool for hwqid x%x\n" , hwqid); |
| 22162 | return lpfc_ncmd; |
| 22163 | } |
| 22164 | multixri_pool->io_req_count++; |
| 22165 | |
| 22166 | /* If pvt_pool is empty, move some XRIs from public to private pool */ |
| 22167 | if (pvt_pool->count == 0) |
| 22168 | lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); |
| 22169 | |
| 22170 | /* Get one XRI from private free xri pool */ |
| 22171 | lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); |
| 22172 | |
| 22173 | if (lpfc_ncmd) { |
| 22174 | lpfc_ncmd->hdwq = qp; |
| 22175 | lpfc_ncmd->hdwq_no = hwqid; |
| 22176 | } else if (expedite) { |
| 22177 | /* If we fail to get one from pvt_pool and this is an expedite |
| 22178 | * request, get one free xri from expedite pool. |
| 22179 | */ |
| 22180 | lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba); |
| 22181 | } |
| 22182 | |
| 22183 | return lpfc_ncmd; |
| 22184 | } |
| 22185 | |
| 22186 | static inline struct lpfc_io_buf * |
| 22187 | lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx) |
| 22188 | { |
| 22189 | struct lpfc_sli4_hdw_queue *qp; |
| 22190 | struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next; |
| 22191 | |
| 22192 | qp = &phba->sli4_hba.hdwq[idx]; |
| 22193 | list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, |
| 22194 | &qp->lpfc_io_buf_list_get, list) { |
| 22195 | if (lpfc_test_rrq_active(phba, ndlp, |
| 22196 | xritag: lpfc_cmd->cur_iocbq.sli4_lxritag)) |
| 22197 | continue; |
| 22198 | |
| 22199 | if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED) |
| 22200 | continue; |
| 22201 | |
| 22202 | list_del_init(entry: &lpfc_cmd->list); |
| 22203 | qp->get_io_bufs--; |
| 22204 | lpfc_cmd->hdwq = qp; |
| 22205 | lpfc_cmd->hdwq_no = idx; |
| 22206 | return lpfc_cmd; |
| 22207 | } |
| 22208 | return NULL; |
| 22209 | } |
| 22210 | |
| 22211 | /** |
| 22212 | * lpfc_get_io_buf - Get one IO buffer from free pool |
| 22213 | * @phba: The HBA for which this call is being executed. |
| 22214 | * @ndlp: pointer to lpfc nodelist data structure. |
| 22215 | * @hwqid: belong to which HWQ |
| 22216 | * @expedite: 1 means this request is urgent. |
| 22217 | * |
| 22218 | * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1, |
| 22219 | * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes |
| 22220 | * a IO buffer from head of @hdwq io_buf_list and returns to caller. |
| 22221 | * |
| 22222 | * Note: ndlp is only used on SCSI side for RRQ testing. |
| 22223 | * The caller should pass NULL for ndlp on NVME side. |
| 22224 | * |
| 22225 | * Return codes: |
| 22226 | * NULL - Error |
| 22227 | * Pointer to lpfc_io_buf - Success |
| 22228 | **/ |
| 22229 | struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, |
| 22230 | struct lpfc_nodelist *ndlp, |
| 22231 | u32 hwqid, int expedite) |
| 22232 | { |
| 22233 | struct lpfc_sli4_hdw_queue *qp; |
| 22234 | unsigned long iflag; |
| 22235 | struct lpfc_io_buf *lpfc_cmd; |
| 22236 | |
| 22237 | qp = &phba->sli4_hba.hdwq[hwqid]; |
| 22238 | lpfc_cmd = NULL; |
| 22239 | if (!qp) { |
| 22240 | lpfc_printf_log(phba, KERN_WARNING, |
| 22241 | LOG_SLI | LOG_NVME_ABTS | LOG_FCP, |
| 22242 | "5555 NULL qp for hwqid x%x\n" , hwqid); |
| 22243 | return lpfc_cmd; |
| 22244 | } |
| 22245 | |
| 22246 | if (phba->cfg_xri_rebalancing) |
| 22247 | lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( |
| 22248 | phba, ndlp, hwqid, expedite); |
| 22249 | else { |
| 22250 | lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, |
| 22251 | qp, alloc_xri_get); |
| 22252 | if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) |
| 22253 | lpfc_cmd = lpfc_io_buf(phba, ndlp, idx: hwqid); |
| 22254 | if (!lpfc_cmd) { |
| 22255 | lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, |
| 22256 | qp, alloc_xri_put); |
| 22257 | list_splice(list: &qp->lpfc_io_buf_list_put, |
| 22258 | head: &qp->lpfc_io_buf_list_get); |
| 22259 | qp->get_io_bufs += qp->put_io_bufs; |
| 22260 | INIT_LIST_HEAD(list: &qp->lpfc_io_buf_list_put); |
| 22261 | qp->put_io_bufs = 0; |
| 22262 | spin_unlock(lock: &qp->io_buf_list_put_lock); |
| 22263 | if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || |
| 22264 | expedite) |
| 22265 | lpfc_cmd = lpfc_io_buf(phba, ndlp, idx: hwqid); |
| 22266 | } |
| 22267 | spin_unlock_irqrestore(lock: &qp->io_buf_list_get_lock, flags: iflag); |
| 22268 | } |
| 22269 | |
| 22270 | return lpfc_cmd; |
| 22271 | } |
| 22272 | |
| 22273 | /** |
| 22274 | * lpfc_read_object - Retrieve object data from HBA |
| 22275 | * @phba: The HBA for which this call is being executed. |
| 22276 | * @rdobject: Pathname of object data we want to read. |
| 22277 | * @datap: Pointer to where data will be copied to. |
| 22278 | * @datasz: size of data area |
| 22279 | * |
| 22280 | * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less. |
| 22281 | * The data will be truncated if datasz is not large enough. |
| 22282 | * Version 1 is not supported with Embedded mbox cmd, so we must use version 0. |
| 22283 | * Returns the actual bytes read from the object. |
| 22284 | * |
| 22285 | * This routine is hard coded to use a poll completion. Unlike other |
| 22286 | * sli4_config mailboxes, it uses lpfc_mbuf memory which is not |
| 22287 | * cleaned up in lpfc_sli4_cmd_mbox_free. If this routine is modified |
| 22288 | * to use interrupt-based completions, code is needed to fully cleanup |
| 22289 | * the memory. |
| 22290 | */ |
| 22291 | int |
| 22292 | lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap, |
| 22293 | uint32_t datasz) |
| 22294 | { |
| 22295 | struct lpfc_mbx_read_object *read_object; |
| 22296 | LPFC_MBOXQ_t *mbox; |
| 22297 | int rc, length, eof, j, byte_cnt = 0; |
| 22298 | uint32_t shdr_status, shdr_add_status; |
| 22299 | union lpfc_sli4_cfg_shdr *shdr; |
| 22300 | struct lpfc_dmabuf *pcmd; |
| 22301 | u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0}; |
| 22302 | |
| 22303 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
| 22304 | if (!mbox) |
| 22305 | return -ENOMEM; |
| 22306 | length = (sizeof(struct lpfc_mbx_read_object) - |
| 22307 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
| 22308 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
| 22309 | LPFC_MBOX_OPCODE_READ_OBJECT, |
| 22310 | length, LPFC_SLI4_MBX_EMBED); |
| 22311 | read_object = &mbox->u.mqe.un.read_object; |
| 22312 | shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr; |
| 22313 | |
| 22314 | bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0); |
| 22315 | bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz); |
| 22316 | read_object->u.request.rd_object_offset = 0; |
| 22317 | read_object->u.request.rd_object_cnt = 1; |
| 22318 | |
| 22319 | memset((void *)read_object->u.request.rd_object_name, 0, |
| 22320 | LPFC_OBJ_NAME_SZ); |
| 22321 | scnprintf(buf: (char *)rd_object_name, size: sizeof(rd_object_name), fmt: rdobject); |
| 22322 | for (j = 0; j < strlen(rdobject); j++) |
| 22323 | read_object->u.request.rd_object_name[j] = |
| 22324 | cpu_to_le32(rd_object_name[j]); |
| 22325 | |
| 22326 | pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); |
| 22327 | if (pcmd) |
| 22328 | pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); |
| 22329 | if (!pcmd || !pcmd->virt) { |
| 22330 | kfree(objp: pcmd); |
| 22331 | mempool_free(element: mbox, pool: phba->mbox_mem_pool); |
| 22332 | return -ENOMEM; |
| 22333 | } |
| 22334 | memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE); |
| 22335 | read_object->u.request.rd_object_hbuf[0].pa_lo = |
| 22336 | putPaddrLow(pcmd->phys); |
| 22337 | read_object->u.request.rd_object_hbuf[0].pa_hi = |
| 22338 | putPaddrHigh(pcmd->phys); |
| 22339 | read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE; |
| 22340 | |
| 22341 | mbox->vport = phba->pport; |
| 22342 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
| 22343 | mbox->ctx_ndlp = NULL; |
| 22344 | |
| 22345 | rc = lpfc_sli_issue_mbox(phba, pmbox: mbox, MBX_POLL); |
| 22346 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
| 22347 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
| 22348 | |
| 22349 | if (shdr_status == STATUS_FAILED && |
| 22350 | shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) { |
| 22351 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, |
| 22352 | "4674 No port cfg file in FW.\n" ); |
| 22353 | byte_cnt = -ENOENT; |
| 22354 | } else if (shdr_status || shdr_add_status || rc) { |
| 22355 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT, |
| 22356 | "2625 READ_OBJECT mailbox failed with " |
| 22357 | "status x%x add_status x%x, mbx status x%x\n" , |
| 22358 | shdr_status, shdr_add_status, rc); |
| 22359 | byte_cnt = -ENXIO; |
| 22360 | } else { |
| 22361 | /* Success */ |
| 22362 | length = read_object->u.response.rd_object_actual_rlen; |
| 22363 | eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response); |
| 22364 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT, |
| 22365 | "2626 READ_OBJECT Success len %d:%d, EOF %d\n" , |
| 22366 | length, datasz, eof); |
| 22367 | |
| 22368 | /* Detect the port config file exists but is empty */ |
| 22369 | if (!length && eof) { |
| 22370 | byte_cnt = 0; |
| 22371 | goto exit; |
| 22372 | } |
| 22373 | |
| 22374 | byte_cnt = length; |
| 22375 | lpfc_sli_pcimem_bcopy(srcp: pcmd->virt, destp: datap, cnt: byte_cnt); |
| 22376 | } |
| 22377 | |
| 22378 | exit: |
| 22379 | /* This is an embedded SLI4 mailbox with an external buffer allocated. |
| 22380 | * Free the pcmd and then cleanup with the correct routine. |
| 22381 | */ |
| 22382 | lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); |
| 22383 | kfree(objp: pcmd); |
| 22384 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
| 22385 | return byte_cnt; |
| 22386 | } |
| 22387 | |
| 22388 | /** |
| 22389 | * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool |
| 22390 | * @phba: The HBA for which this call is being executed. |
| 22391 | * @lpfc_buf: IO buf structure to append the SGL chunk |
| 22392 | * |
| 22393 | * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool, |
| 22394 | * and will allocate an SGL chunk if the pool is empty. |
| 22395 | * |
| 22396 | * Return codes: |
| 22397 | * NULL - Error |
| 22398 | * Pointer to sli4_hybrid_sgl - Success |
| 22399 | **/ |
| 22400 | struct sli4_hybrid_sgl * |
| 22401 | lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) |
| 22402 | { |
| 22403 | struct sli4_hybrid_sgl *list_entry = NULL; |
| 22404 | struct sli4_hybrid_sgl *tmp = NULL; |
| 22405 | struct sli4_hybrid_sgl *allocated_sgl = NULL; |
| 22406 | struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; |
| 22407 | struct list_head *buf_list = &hdwq->sgl_list; |
| 22408 | unsigned long iflags; |
| 22409 | |
| 22410 | spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
| 22411 | |
| 22412 | if (likely(!list_empty(buf_list))) { |
| 22413 | /* break off 1 chunk from the sgl_list */ |
| 22414 | list_for_each_entry_safe(list_entry, tmp, |
| 22415 | buf_list, list_node) { |
| 22416 | list_move_tail(list: &list_entry->list_node, |
| 22417 | head: &lpfc_buf->dma_sgl_xtra_list); |
| 22418 | break; |
| 22419 | } |
| 22420 | } else { |
| 22421 | /* allocate more */ |
| 22422 | spin_unlock_irqrestore(lock: &hdwq->hdwq_lock, flags: iflags); |
| 22423 | tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, |
| 22424 | cpu_to_node(hdwq->io_wq->chann)); |
| 22425 | if (!tmp) { |
| 22426 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 22427 | "8353 error kmalloc memory for HDWQ " |
| 22428 | "%d %s\n" , |
| 22429 | lpfc_buf->hdwq_no, __func__); |
| 22430 | return NULL; |
| 22431 | } |
| 22432 | |
| 22433 | tmp->dma_sgl = dma_pool_alloc(pool: phba->lpfc_sg_dma_buf_pool, |
| 22434 | GFP_ATOMIC, handle: &tmp->dma_phys_sgl); |
| 22435 | if (!tmp->dma_sgl) { |
| 22436 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 22437 | "8354 error pool_alloc memory for HDWQ " |
| 22438 | "%d %s\n" , |
| 22439 | lpfc_buf->hdwq_no, __func__); |
| 22440 | kfree(objp: tmp); |
| 22441 | return NULL; |
| 22442 | } |
| 22443 | |
| 22444 | spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
| 22445 | list_add_tail(new: &tmp->list_node, head: &lpfc_buf->dma_sgl_xtra_list); |
| 22446 | } |
| 22447 | |
| 22448 | allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list, |
| 22449 | struct sli4_hybrid_sgl, |
| 22450 | list_node); |
| 22451 | |
| 22452 | spin_unlock_irqrestore(lock: &hdwq->hdwq_lock, flags: iflags); |
| 22453 | |
| 22454 | return allocated_sgl; |
| 22455 | } |
| 22456 | |
| 22457 | /** |
| 22458 | * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool |
| 22459 | * @phba: The HBA for which this call is being executed. |
| 22460 | * @lpfc_buf: IO buf structure with the SGL chunk |
| 22461 | * |
| 22462 | * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool. |
| 22463 | * |
| 22464 | * Return codes: |
| 22465 | * 0 - Success |
| 22466 | * -EINVAL - Error |
| 22467 | **/ |
| 22468 | int |
| 22469 | lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) |
| 22470 | { |
| 22471 | int rc = 0; |
| 22472 | struct sli4_hybrid_sgl *list_entry = NULL; |
| 22473 | struct sli4_hybrid_sgl *tmp = NULL; |
| 22474 | struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; |
| 22475 | struct list_head *buf_list = &hdwq->sgl_list; |
| 22476 | unsigned long iflags; |
| 22477 | |
| 22478 | spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
| 22479 | |
| 22480 | if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) { |
| 22481 | list_for_each_entry_safe(list_entry, tmp, |
| 22482 | &lpfc_buf->dma_sgl_xtra_list, |
| 22483 | list_node) { |
| 22484 | list_move_tail(list: &list_entry->list_node, |
| 22485 | head: buf_list); |
| 22486 | } |
| 22487 | } else { |
| 22488 | rc = -EINVAL; |
| 22489 | } |
| 22490 | |
| 22491 | spin_unlock_irqrestore(lock: &hdwq->hdwq_lock, flags: iflags); |
| 22492 | return rc; |
| 22493 | } |
| 22494 | |
| 22495 | /** |
| 22496 | * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool |
| 22497 | * @phba: phba object |
| 22498 | * @hdwq: hdwq to cleanup sgl buff resources on |
| 22499 | * |
| 22500 | * This routine frees all SGL chunks of hdwq SGL chunk pool. |
| 22501 | * |
| 22502 | * Return codes: |
| 22503 | * None |
| 22504 | **/ |
| 22505 | void |
| 22506 | lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, |
| 22507 | struct lpfc_sli4_hdw_queue *hdwq) |
| 22508 | { |
| 22509 | struct list_head *buf_list = &hdwq->sgl_list; |
| 22510 | struct sli4_hybrid_sgl *list_entry = NULL; |
| 22511 | struct sli4_hybrid_sgl *tmp = NULL; |
| 22512 | unsigned long iflags; |
| 22513 | |
| 22514 | spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
| 22515 | |
| 22516 | /* Free sgl pool */ |
| 22517 | list_for_each_entry_safe(list_entry, tmp, |
| 22518 | buf_list, list_node) { |
| 22519 | list_del(entry: &list_entry->list_node); |
| 22520 | dma_pool_free(pool: phba->lpfc_sg_dma_buf_pool, |
| 22521 | vaddr: list_entry->dma_sgl, |
| 22522 | addr: list_entry->dma_phys_sgl); |
| 22523 | kfree(objp: list_entry); |
| 22524 | } |
| 22525 | |
| 22526 | spin_unlock_irqrestore(lock: &hdwq->hdwq_lock, flags: iflags); |
| 22527 | } |
| 22528 | |
| 22529 | /** |
| 22530 | * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq |
| 22531 | * @phba: The HBA for which this call is being executed. |
| 22532 | * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer |
| 22533 | * |
| 22534 | * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool, |
| 22535 | * and will allocate an CMD/RSP buffer if the pool is empty. |
| 22536 | * |
| 22537 | * Return codes: |
| 22538 | * NULL - Error |
| 22539 | * Pointer to fcp_cmd_rsp_buf - Success |
| 22540 | **/ |
| 22541 | struct fcp_cmd_rsp_buf * |
| 22542 | lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, |
| 22543 | struct lpfc_io_buf *lpfc_buf) |
| 22544 | { |
| 22545 | struct fcp_cmd_rsp_buf *list_entry = NULL; |
| 22546 | struct fcp_cmd_rsp_buf *tmp = NULL; |
| 22547 | struct fcp_cmd_rsp_buf *allocated_buf = NULL; |
| 22548 | struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; |
| 22549 | struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; |
| 22550 | unsigned long iflags; |
| 22551 | |
| 22552 | spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
| 22553 | |
| 22554 | if (likely(!list_empty(buf_list))) { |
| 22555 | /* break off 1 chunk from the list */ |
| 22556 | list_for_each_entry_safe(list_entry, tmp, |
| 22557 | buf_list, |
| 22558 | list_node) { |
| 22559 | list_move_tail(list: &list_entry->list_node, |
| 22560 | head: &lpfc_buf->dma_cmd_rsp_list); |
| 22561 | break; |
| 22562 | } |
| 22563 | } else { |
| 22564 | /* allocate more */ |
| 22565 | spin_unlock_irqrestore(lock: &hdwq->hdwq_lock, flags: iflags); |
| 22566 | tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, |
| 22567 | cpu_to_node(hdwq->io_wq->chann)); |
| 22568 | if (!tmp) { |
| 22569 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 22570 | "8355 error kmalloc memory for HDWQ " |
| 22571 | "%d %s\n" , |
| 22572 | lpfc_buf->hdwq_no, __func__); |
| 22573 | return NULL; |
| 22574 | } |
| 22575 | |
| 22576 | tmp->fcp_cmnd = dma_pool_zalloc(pool: phba->lpfc_cmd_rsp_buf_pool, |
| 22577 | GFP_ATOMIC, |
| 22578 | handle: &tmp->fcp_cmd_rsp_dma_handle); |
| 22579 | |
| 22580 | if (!tmp->fcp_cmnd) { |
| 22581 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
| 22582 | "8356 error pool_alloc memory for HDWQ " |
| 22583 | "%d %s\n" , |
| 22584 | lpfc_buf->hdwq_no, __func__); |
| 22585 | kfree(objp: tmp); |
| 22586 | return NULL; |
| 22587 | } |
| 22588 | |
| 22589 | tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + |
| 22590 | sizeof(struct fcp_cmnd32)); |
| 22591 | |
| 22592 | spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
| 22593 | list_add_tail(new: &tmp->list_node, head: &lpfc_buf->dma_cmd_rsp_list); |
| 22594 | } |
| 22595 | |
| 22596 | allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list, |
| 22597 | struct fcp_cmd_rsp_buf, |
| 22598 | list_node); |
| 22599 | |
| 22600 | spin_unlock_irqrestore(lock: &hdwq->hdwq_lock, flags: iflags); |
| 22601 | |
| 22602 | return allocated_buf; |
| 22603 | } |
| 22604 | |
| 22605 | /** |
| 22606 | * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool |
| 22607 | * @phba: The HBA for which this call is being executed. |
| 22608 | * @lpfc_buf: IO buf structure with the CMD/RSP buf |
| 22609 | * |
| 22610 | * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool. |
| 22611 | * |
| 22612 | * Return codes: |
| 22613 | * 0 - Success |
| 22614 | * -EINVAL - Error |
| 22615 | **/ |
| 22616 | int |
| 22617 | lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, |
| 22618 | struct lpfc_io_buf *lpfc_buf) |
| 22619 | { |
| 22620 | int rc = 0; |
| 22621 | struct fcp_cmd_rsp_buf *list_entry = NULL; |
| 22622 | struct fcp_cmd_rsp_buf *tmp = NULL; |
| 22623 | struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; |
| 22624 | struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; |
| 22625 | unsigned long iflags; |
| 22626 | |
| 22627 | spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
| 22628 | |
| 22629 | if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) { |
| 22630 | list_for_each_entry_safe(list_entry, tmp, |
| 22631 | &lpfc_buf->dma_cmd_rsp_list, |
| 22632 | list_node) { |
| 22633 | list_move_tail(list: &list_entry->list_node, |
| 22634 | head: buf_list); |
| 22635 | } |
| 22636 | } else { |
| 22637 | rc = -EINVAL; |
| 22638 | } |
| 22639 | |
| 22640 | spin_unlock_irqrestore(lock: &hdwq->hdwq_lock, flags: iflags); |
| 22641 | return rc; |
| 22642 | } |
| 22643 | |
| 22644 | /** |
| 22645 | * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool |
| 22646 | * @phba: phba object |
| 22647 | * @hdwq: hdwq to cleanup cmd rsp buff resources on |
| 22648 | * |
| 22649 | * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool. |
| 22650 | * |
| 22651 | * Return codes: |
| 22652 | * None |
| 22653 | **/ |
| 22654 | void |
| 22655 | lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, |
| 22656 | struct lpfc_sli4_hdw_queue *hdwq) |
| 22657 | { |
| 22658 | struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; |
| 22659 | struct fcp_cmd_rsp_buf *list_entry = NULL; |
| 22660 | struct fcp_cmd_rsp_buf *tmp = NULL; |
| 22661 | unsigned long iflags; |
| 22662 | |
| 22663 | spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
| 22664 | |
| 22665 | /* Free cmd_rsp buf pool */ |
| 22666 | list_for_each_entry_safe(list_entry, tmp, |
| 22667 | buf_list, |
| 22668 | list_node) { |
| 22669 | list_del(entry: &list_entry->list_node); |
| 22670 | dma_pool_free(pool: phba->lpfc_cmd_rsp_buf_pool, |
| 22671 | vaddr: list_entry->fcp_cmnd, |
| 22672 | addr: list_entry->fcp_cmd_rsp_dma_handle); |
| 22673 | kfree(objp: list_entry); |
| 22674 | } |
| 22675 | |
| 22676 | spin_unlock_irqrestore(lock: &hdwq->hdwq_lock, flags: iflags); |
| 22677 | } |
| 22678 | |
| 22679 | /** |
| 22680 | * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted |
| 22681 | * @phba: phba object |
| 22682 | * @job: job entry of the command to be posted. |
| 22683 | * |
| 22684 | * Fill the common fields of the wqe for each of the command. |
| 22685 | * |
| 22686 | * Return codes: |
| 22687 | * None |
| 22688 | **/ |
| 22689 | void |
| 22690 | lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job) |
| 22691 | { |
| 22692 | u8 cmnd; |
| 22693 | u32 *pcmd; |
| 22694 | u32 if_type = 0; |
| 22695 | u32 abort_tag; |
| 22696 | bool fip; |
| 22697 | struct lpfc_nodelist *ndlp = NULL; |
| 22698 | union lpfc_wqe128 *wqe = &job->wqe; |
| 22699 | u8 command_type = ELS_COMMAND_NON_FIP; |
| 22700 | |
| 22701 | fip = test_bit(HBA_FIP_SUPPORT, &phba->hba_flag); |
| 22702 | /* The fcp commands will set command type */ |
| 22703 | if (job->cmd_flag & LPFC_IO_FCP) |
| 22704 | command_type = FCP_COMMAND; |
| 22705 | else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK)) |
| 22706 | command_type = ELS_COMMAND_FIP; |
| 22707 | else |
| 22708 | command_type = ELS_COMMAND_NON_FIP; |
| 22709 | |
| 22710 | abort_tag = job->iotag; |
| 22711 | cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com); |
| 22712 | |
| 22713 | switch (cmnd) { |
| 22714 | case CMD_ELS_REQUEST64_WQE: |
| 22715 | ndlp = job->ndlp; |
| 22716 | |
| 22717 | if_type = bf_get(lpfc_sli_intf_if_type, |
| 22718 | &phba->sli4_hba.sli_intf); |
| 22719 | if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { |
| 22720 | pcmd = (u32 *)job->cmd_dmabuf->virt; |
| 22721 | if (pcmd && (*pcmd == ELS_CMD_FLOGI || |
| 22722 | *pcmd == ELS_CMD_SCR || |
| 22723 | *pcmd == ELS_CMD_RDF || |
| 22724 | *pcmd == ELS_CMD_EDC || |
| 22725 | *pcmd == ELS_CMD_RSCN_XMT || |
| 22726 | *pcmd == ELS_CMD_FDISC || |
| 22727 | *pcmd == ELS_CMD_LOGO || |
| 22728 | *pcmd == ELS_CMD_QFPA || |
| 22729 | *pcmd == ELS_CMD_UVEM || |
| 22730 | *pcmd == ELS_CMD_PLOGI)) { |
| 22731 | bf_set(els_req64_sp, &wqe->els_req, 1); |
| 22732 | bf_set(els_req64_sid, &wqe->els_req, |
| 22733 | job->vport->fc_myDID); |
| 22734 | |
| 22735 | if ((*pcmd == ELS_CMD_FLOGI) && |
| 22736 | !(phba->fc_topology == |
| 22737 | LPFC_TOPOLOGY_LOOP)) |
| 22738 | bf_set(els_req64_sid, &wqe->els_req, 0); |
| 22739 | |
| 22740 | bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); |
| 22741 | bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, |
| 22742 | phba->vpi_ids[job->vport->vpi]); |
| 22743 | } else if (pcmd) { |
| 22744 | bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); |
| 22745 | bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, |
| 22746 | phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); |
| 22747 | } |
| 22748 | } |
| 22749 | |
| 22750 | bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, |
| 22751 | phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); |
| 22752 | |
| 22753 | bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); |
| 22754 | bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); |
| 22755 | bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); |
| 22756 | bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); |
| 22757 | bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); |
| 22758 | break; |
| 22759 | case CMD_XMIT_ELS_RSP64_WQE: |
| 22760 | ndlp = job->ndlp; |
| 22761 | |
| 22762 | /* word4 */ |
| 22763 | wqe->xmit_els_rsp.word4 = 0; |
| 22764 | |
| 22765 | if_type = bf_get(lpfc_sli_intf_if_type, |
| 22766 | &phba->sli4_hba.sli_intf); |
| 22767 | if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { |
| 22768 | if (test_bit(FC_PT2PT, &job->vport->fc_flag)) { |
| 22769 | bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); |
| 22770 | bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, |
| 22771 | job->vport->fc_myDID); |
| 22772 | if (job->vport->fc_myDID == Fabric_DID) { |
| 22773 | bf_set(wqe_els_did, |
| 22774 | &wqe->xmit_els_rsp.wqe_dest, 0); |
| 22775 | } |
| 22776 | } |
| 22777 | } |
| 22778 | |
| 22779 | bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); |
| 22780 | bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); |
| 22781 | bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); |
| 22782 | bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, |
| 22783 | LPFC_WQE_LENLOC_WORD3); |
| 22784 | bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); |
| 22785 | |
| 22786 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
| 22787 | bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); |
| 22788 | bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, |
| 22789 | job->vport->fc_myDID); |
| 22790 | bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); |
| 22791 | } |
| 22792 | |
| 22793 | if (phba->sli_rev == LPFC_SLI_REV4) { |
| 22794 | bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, |
| 22795 | phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); |
| 22796 | |
| 22797 | if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com)) |
| 22798 | bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, |
| 22799 | phba->vpi_ids[job->vport->vpi]); |
| 22800 | } |
| 22801 | command_type = OTHER_COMMAND; |
| 22802 | break; |
| 22803 | case CMD_GEN_REQUEST64_WQE: |
| 22804 | /* Word 10 */ |
| 22805 | bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); |
| 22806 | bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); |
| 22807 | bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); |
| 22808 | bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); |
| 22809 | bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); |
| 22810 | command_type = OTHER_COMMAND; |
| 22811 | break; |
| 22812 | case CMD_XMIT_SEQUENCE64_WQE: |
| 22813 | if (phba->link_flag & LS_LOOPBACK_MODE) |
| 22814 | bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); |
| 22815 | |
| 22816 | wqe->xmit_sequence.rsvd3 = 0; |
| 22817 | bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); |
| 22818 | bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); |
| 22819 | bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, |
| 22820 | LPFC_WQE_IOD_WRITE); |
| 22821 | bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, |
| 22822 | LPFC_WQE_LENLOC_WORD12); |
| 22823 | bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); |
| 22824 | command_type = OTHER_COMMAND; |
| 22825 | break; |
| 22826 | case CMD_XMIT_BLS_RSP64_WQE: |
| 22827 | bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); |
| 22828 | bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); |
| 22829 | bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); |
| 22830 | bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, |
| 22831 | phba->vpi_ids[phba->pport->vpi]); |
| 22832 | bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); |
| 22833 | bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, |
| 22834 | LPFC_WQE_LENLOC_NONE); |
| 22835 | /* Overwrite the pre-set comnd type with OTHER_COMMAND */ |
| 22836 | command_type = OTHER_COMMAND; |
| 22837 | break; |
| 22838 | case CMD_FCP_ICMND64_WQE: /* task mgmt commands */ |
| 22839 | case CMD_ABORT_XRI_WQE: /* abort iotag */ |
| 22840 | case CMD_SEND_FRAME: /* mds loopback */ |
| 22841 | /* cases already formatted for sli4 wqe - no chgs necessary */ |
| 22842 | return; |
| 22843 | default: |
| 22844 | dump_stack(); |
| 22845 | lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
| 22846 | "6207 Invalid command 0x%x\n" , |
| 22847 | cmnd); |
| 22848 | break; |
| 22849 | } |
| 22850 | |
| 22851 | wqe->generic.wqe_com.abort_tag = abort_tag; |
| 22852 | bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag); |
| 22853 | bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); |
| 22854 | bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
| 22855 | } |
| 22856 | |