blob: 023b9d42ad9a229ebca4876de4336e25d611f59d [file] [log] [blame]
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/uaccess.h>
#include "bfad_drv.h"
#include "bfad_im.h"
#include "bfad_bsg.h"
BFA_TRC_FILE(LDRV, BSG);
int
bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
/* If IOC is not in disabled state - return */
if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
return 0;
}
init_completion(&bfad->enable_comp);
bfa_iocfc_enable(&bfad->bfa);
iocmd->status = BFA_STATUS_OK;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
wait_for_completion(&bfad->enable_comp);
return 0;
}
int
bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
return 0;
}
if (bfad->disable_active) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return -EBUSY;
}
bfad->disable_active = BFA_TRUE;
init_completion(&bfad->disable_comp);
bfa_iocfc_disable(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
wait_for_completion(&bfad->disable_comp);
bfad->disable_active = BFA_FALSE;
iocmd->status = BFA_STATUS_OK;
return 0;
}
static int
bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
{
int i;
struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
struct bfad_im_port_s *im_port;
struct bfa_port_attr_s pattr;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcport_get_attr(&bfad->bfa, &pattr);
iocmd->nwwn = pattr.nwwn;
iocmd->pwwn = pattr.pwwn;
iocmd->ioc_type = bfa_get_type(&bfad->bfa);
iocmd->mac = bfa_get_mac(&bfad->bfa);
iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
iocmd->factorynwwn = pattr.factorynwwn;
iocmd->factorypwwn = pattr.factorypwwn;
iocmd->bfad_num = bfad->inst_no;
im_port = bfad->pport.im_port;
iocmd->host = im_port->shost->host_no;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
strcpy(iocmd->name, bfad->adapter_name);
strcpy(iocmd->port_name, bfad->port_name);
strcpy(iocmd->hwpath, bfad->pci_name);
/* set adapter hw path */
strcpy(iocmd->adapter_hwpath, bfad->pci_name);
for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++)
;
for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; )
;
iocmd->adapter_hwpath[i] = '\0';
iocmd->status = BFA_STATUS_OK;
return 0;
}
static int
bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
/* fill in driver attr info */
strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
iocmd->ioc_attr.adapter_attr.fw_ver);
strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
iocmd->ioc_attr.adapter_attr.optrom_ver);
/* copy chip rev info first otherwise it will be overwritten */
memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
sizeof(bfad->pci_attr.chip_rev));
memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
sizeof(struct bfa_ioc_pci_attr_s));
iocmd->status = BFA_STATUS_OK;
return 0;
}
int
bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
iocmd->status = BFA_STATUS_OK;
return 0;
}
int
bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
struct bfa_bsg_ioc_fwstats_s *iocmd =
(struct bfa_bsg_ioc_fwstats_s *)cmd;
void *iocmd_bufptr;
unsigned long flags;
if (bfad_chk_iocmd_sz(payload_len,
sizeof(struct bfa_bsg_ioc_fwstats_s),
sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
iocmd->status = BFA_STATUS_VERSION_FAIL;
goto out;
}
iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK) {
bfa_trc(bfad, iocmd->status);
goto out;
}
out:
bfa_trc(bfad, 0x6666);
return 0;
}
int
bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
unsigned long flags;
if (v_cmd == IOCMD_IOC_RESET_STATS) {
bfa_ioc_clear_stats(&bfad->bfa);
iocmd->status = BFA_STATUS_OK;
} else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
return 0;
}
int
bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
strcpy(bfad->adapter_name, iocmd->name);
else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
strcpy(bfad->port_name, iocmd->name);
iocmd->status = BFA_STATUS_OK;
return 0;
}
int
bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
iocmd->status = BFA_STATUS_OK;
bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
return 0;
}
int
bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK) {
bfa_trc(bfad, iocmd->status);
return 0;
}
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
return 0;
}
int
bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK) {
bfa_trc(bfad, iocmd->status);
return 0;
}
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
return 0;
}
static int
bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
struct bfa_lport_attr_s port_attr;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
iocmd->attr.pid = port_attr.pid;
else
iocmd->attr.pid = 0;
iocmd->attr.port_type = port_attr.port_type;
iocmd->attr.loopback = port_attr.loopback;
iocmd->attr.authfail = port_attr.authfail;
strncpy(iocmd->attr.port_symname.symname,
port_attr.port_cfg.sym_name.symname,
sizeof(port_attr.port_cfg.sym_name.symname));
iocmd->status = BFA_STATUS_OK;
return 0;
}
int
bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
struct bfad_hal_comp fcomp;
void *iocmd_bufptr;
unsigned long flags;
if (bfad_chk_iocmd_sz(payload_len,
sizeof(struct bfa_bsg_port_stats_s),
sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
iocmd->status = BFA_STATUS_VERSION_FAIL;
return 0;
}
iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
iocmd_bufptr, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK) {
bfa_trc(bfad, iocmd->status);
goto out;
}
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK) {
bfa_trc(bfad, iocmd->status);
return 0;
}
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
return 0;
}
int
bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
{
struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (v_cmd == IOCMD_PORT_CFG_TOPO)
cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
else if (v_cmd == IOCMD_PORT_CFG_SPEED)
cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
else if (v_cmd == IOCMD_PORT_CFG_ALPA)
cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
else if (v_cmd == IOCMD_PORT_CLR_ALPA)
cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
(struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
{
struct bfa_bsg_bbcr_enable_s *iocmd =
(struct bfa_bsg_bbcr_enable_s *)pcmd;
unsigned long flags;
int rc;
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (cmd == IOCMD_PORT_BBCR_ENABLE)
rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn);
else if (cmd == IOCMD_PORT_BBCR_DISABLE)
rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0);
else {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return -EINVAL;
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = rc;
return 0;
}
int
bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd)
{
struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status =
bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
static int
bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_lport_s *fcs_port;
struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->pwwn);
if (fcs_port == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
goto out;
}
bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
int
bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_lport_s *fcs_port;
struct bfa_bsg_lport_stats_s *iocmd =
(struct bfa_bsg_lport_stats_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->pwwn);
if (fcs_port == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
goto out;
}
bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
int
bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_lport_s *fcs_port;
struct bfa_bsg_reset_stats_s *iocmd =
(struct bfa_bsg_reset_stats_s *)cmd;
struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
struct list_head *qe, *qen;
struct bfa_itnim_s *itnim;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->vpwwn);
if (fcs_port == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
goto out;
}
bfa_fcs_lport_clear_stats(fcs_port);
/* clear IO stats from all active itnims */
list_for_each_safe(qe, qen, &fcpim->itnim_q) {
itnim = (struct bfa_itnim_s *) qe;
if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
continue;
bfa_itnim_clear_stats(itnim);
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
int
bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_lport_s *fcs_port;
struct bfa_bsg_lport_iostats_s *iocmd =
(struct bfa_bsg_lport_iostats_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->pwwn);
if (fcs_port == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
goto out;
}
bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
fcs_port->lp_tag);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
int
bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
struct bfa_bsg_lport_get_rports_s *iocmd =
(struct bfa_bsg_lport_get_rports_s *)cmd;
struct bfa_fcs_lport_s *fcs_port;
unsigned long flags;
void *iocmd_bufptr;
if (iocmd->nrports == 0)
return -EINVAL;
if (bfad_chk_iocmd_sz(payload_len,
sizeof(struct bfa_bsg_lport_get_rports_s),
sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
!= BFA_STATUS_OK) {
iocmd->status = BFA_STATUS_VERSION_FAIL;
return 0;
}
iocmd_bufptr = (char *)iocmd +
sizeof(struct bfa_bsg_lport_get_rports_s);
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->pwwn);
if (fcs_port == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfa_trc(bfad, 0);
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
goto out;
}
bfa_fcs_lport_get_rport_quals(fcs_port,
(struct bfa_rport_qualifier_s *)iocmd_bufptr,
&iocmd->nrports);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
int
bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_rport_s *fcs_rport;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->pwwn);
if (fcs_port == NULL) {
bfa_trc(bfad, 0);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
goto out;
}
if (iocmd->pid)
fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
iocmd->rpwwn, iocmd->pid);
else
fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
if (fcs_rport == NULL) {
bfa_trc(bfad, 0);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
goto out;
}
bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
static int
bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_rport_scsi_addr_s *iocmd =
(struct bfa_bsg_rport_scsi_addr_s *)cmd;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_itnim_s *fcs_itnim;
struct bfad_itnim_s *drv_itnim;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->pwwn);
if (fcs_port == NULL) {
bfa_trc(bfad, 0);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
goto out;
}
fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
if (fcs_itnim == NULL) {
bfa_trc(bfad, 0);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
goto out;
}
drv_itnim = fcs_itnim->itnim_drv;
if (drv_itnim && drv_itnim->im_port)
iocmd->host = drv_itnim->im_port->shost->host_no;
else {
bfa_trc(bfad, 0);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
goto out;
}
iocmd->target = drv_itnim->scsi_tgt_id;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->bus = 0;
iocmd->lun = 0;
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
int
bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_rport_stats_s *iocmd =
(struct bfa_bsg_rport_stats_s *)cmd;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_rport_s *fcs_rport;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->pwwn);
if (fcs_port == NULL) {
bfa_trc(bfad, 0);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
goto out;
}
fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
if (fcs_rport == NULL) {
bfa_trc(bfad, 0);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
goto out;
}
memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
sizeof(struct bfa_rport_stats_s));
if (bfa_fcs_rport_get_halrport(fcs_rport)) {
memcpy((void *)&iocmd->stats.hal_stats,
(void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
sizeof(struct bfa_rport_hal_stats_s));
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
int
bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_rport_reset_stats_s *iocmd =
(struct bfa_bsg_rport_reset_stats_s *)cmd;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_rport_s *fcs_rport;
struct bfa_rport_s *rport;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->pwwn);
if (fcs_port == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
goto out;
}
fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
if (fcs_rport == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
goto out;
}
memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
rport = bfa_fcs_rport_get_halrport(fcs_rport);
if (rport)
memset(&rport->stats, 0, sizeof(rport->stats));
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
int
bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_rport_set_speed_s *iocmd =
(struct bfa_bsg_rport_set_speed_s *)cmd;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_rport_s *fcs_rport;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->pwwn);
if (fcs_port == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
goto out;
}
fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
if (fcs_rport == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
goto out;
}
fcs_rport->rpf.assigned_speed = iocmd->speed;
/* Set this speed in f/w only if the RPSC speed is not available */
if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
if (fcs_rport->bfa_rport)
bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
int
bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_vport_s *fcs_vport;
struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->vpwwn);
if (fcs_vport == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
goto out;
}
bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
int
bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_vport_s *fcs_vport;
struct bfa_bsg_vport_stats_s *iocmd =
(struct bfa_bsg_vport_stats_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->vpwwn);
if (fcs_vport == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
goto out;
}
memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
sizeof(struct bfa_vport_stats_s));
memcpy((void *)&iocmd->vport_stats.port_stats,
(void *)&fcs_vport->lport.stats,
sizeof(struct bfa_lport_stats_s));
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
int
bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_vport_s *fcs_vport;
struct bfa_bsg_reset_stats_s *iocmd =
(struct bfa_bsg_reset_stats_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->vpwwn);
if (fcs_vport == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
goto out;
}
memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
static int
bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
struct bfa_bsg_fabric_get_lports_s *iocmd =
(struct bfa_bsg_fabric_get_lports_s *)cmd;
bfa_fcs_vf_t *fcs_vf;
uint32_t nports = iocmd->nports;
unsigned long flags;
void *iocmd_bufptr;
if (nports == 0) {
iocmd->status = BFA_STATUS_EINVAL;
goto out;
}
if (bfad_chk_iocmd_sz(payload_len,
sizeof(struct bfa_bsg_fabric_get_lports_s),
sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
iocmd->status = BFA_STATUS_VERSION_FAIL;
goto out;
}
iocmd_bufptr = (char *)iocmd +
sizeof(struct bfa_bsg_fabric_get_lports_s);
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
if (fcs_vf == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_VFID;
goto out;
}
bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->nports = nports;
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
int
bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
{
struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
else {
if (cmd == IOCMD_RATELIM_ENABLE)
fcport->cfg.ratelimit = BFA_TRUE;
else if (cmd == IOCMD_RATELIM_DISABLE)
fcport->cfg.ratelimit = BFA_FALSE;
if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
iocmd->status = BFA_STATUS_OK;
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
{
struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
/* Auto and speeds greater than the supported speed, are invalid */
if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
(iocmd->speed > fcport->speed_sup)) {
iocmd->status = BFA_STATUS_UNSUPP_SPEED;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
else {
fcport->cfg.trl_def_speed = iocmd->speed;
iocmd->status = BFA_STATUS_OK;
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
return 0;
}
int
bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_modstats_s *iocmd =
(struct bfa_bsg_fcpim_modstats_s *)cmd;
struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
struct list_head *qe, *qen;
struct bfa_itnim_s *itnim;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
/* accumulate IO stats from itnim */
memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
list_for_each_safe(qe, qen, &fcpim->itnim_q) {
itnim = (struct bfa_itnim_s *) qe;
bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
return 0;
}
int
bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
(struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
struct list_head *qe, *qen;
struct bfa_itnim_s *itnim;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
list_for_each_safe(qe, qen, &fcpim->itnim_q) {
itnim = (struct bfa_itnim_s *) qe;
bfa_itnim_clear_stats(itnim);
}
memset(&fcpim->del_itn_stats, 0,
sizeof(struct bfa_fcpim_del_itn_stats_s));
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
return 0;
}
int
bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
(struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
sizeof(struct bfa_fcpim_del_itn_stats_s));
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
return 0;
}
static int
bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
struct bfa_fcs_lport_s *fcs_port;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->lpwwn);
if (!fcs_port)
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
else
iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
iocmd->rpwwn, &iocmd->attr);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
static int
bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_itnim_iostats_s *iocmd =
(struct bfa_bsg_itnim_iostats_s *)cmd;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_itnim_s *itnim;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->lpwwn);
if (!fcs_port) {
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
bfa_trc(bfad, 0);
} else {
itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
if (itnim == NULL)
iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
else {
iocmd->status = BFA_STATUS_OK;
if (bfa_fcs_itnim_get_halitn(itnim))
memcpy((void *)&iocmd->iostats, (void *)
&(bfa_fcs_itnim_get_halitn(itnim)->stats),
sizeof(struct bfa_itnim_iostats_s));
}
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
static int
bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_rport_reset_stats_s *iocmd =
(struct bfa_bsg_rport_reset_stats_s *)cmd;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_itnim_s *itnim;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->pwwn);
if (!fcs_port)
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
else {
itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
if (itnim == NULL)
iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
else {
iocmd->status = BFA_STATUS_OK;
bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
}
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
static int
bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_itnim_itnstats_s *iocmd =
(struct bfa_bsg_itnim_itnstats_s *)cmd;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_itnim_s *itnim;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->lpwwn);
if (!fcs_port) {
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
bfa_trc(bfad, 0);
} else {
itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
if (itnim == NULL)
iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
else {
iocmd->status = BFA_STATUS_OK;
bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
&iocmd->itnstats);
}
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcport_enable(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcport_disable(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
&iocmd->pcifn_cfg,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
&iocmd->pcifn_id, iocmd->port,
iocmd->pcifn_class, iocmd->bw_min,
iocmd->bw_max, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
iocmd->pcifn_id,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
iocmd->pcifn_id, iocmd->bw_min,
iocmd->bw_max, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfa_trc(bfad, iocmd->status);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
bfa_trc(bfad, iocmd->status);
out:
return 0;
}
int
bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_adapter_cfg_mode_s *iocmd =
(struct bfa_bsg_adapter_cfg_mode_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags = 0;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
iocmd->cfg.mode, iocmd->cfg.max_pf,
iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_port_cfg_mode_s *iocmd =
(struct bfa_bsg_port_cfg_mode_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags = 0;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
iocmd->instance, iocmd->cfg.mode,
iocmd->cfg.max_pf, iocmd->cfg.max_vf,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
bfad_hcb_comp, &fcomp);
else
iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
iocmd->status = BFA_STATUS_OK;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
{
struct bfa_bsg_cee_attr_s *iocmd =
(struct bfa_bsg_cee_attr_s *)cmd;
void *iocmd_bufptr;
struct bfad_hal_comp cee_comp;
unsigned long flags;
if (bfad_chk_iocmd_sz(payload_len,
sizeof(struct bfa_bsg_cee_attr_s),
sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
iocmd->status = BFA_STATUS_VERSION_FAIL;
return 0;
}
iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
cee_comp.status = 0;
init_completion(&cee_comp.comp);
mutex_lock(&bfad_mutex);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
bfad_hcb_comp, &cee_comp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK) {
mutex_unlock(&bfad_mutex);
bfa_trc(bfad, 0x5555);
goto out;
}
wait_for_completion(&cee_comp.comp);
mutex_unlock(&bfad_mutex);
out:
return 0;
}
int
bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
struct bfa_bsg_cee_stats_s *iocmd =
(struct bfa_bsg_cee_stats_s *)cmd;
void *iocmd_bufptr;
struct bfad_hal_comp cee_comp;
unsigned long flags;
if (bfad_chk_iocmd_sz(payload_len,
sizeof(struct bfa_bsg_cee_stats_s),
sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
iocmd->status = BFA_STATUS_VERSION_FAIL;
return 0;
}
iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
cee_comp.status = 0;
init_completion(&cee_comp.comp);
mutex_lock(&bfad_mutex);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
bfad_hcb_comp, &cee_comp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK) {
mutex_unlock(&bfad_mutex);
bfa_trc(bfad, 0x5555);
goto out;
}
wait_for_completion(&cee_comp.comp);
mutex_unlock(&bfad_mutex);
out:
return 0;
}
int
bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
bfa_trc(bfad, 0x5555);
return 0;
}
int
bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfa_trc(bfad, iocmd->status);
if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfa_trc(bfad, iocmd->status);
if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_flash_attr_s *iocmd =
(struct bfa_bsg_flash_attr_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
iocmd->instance, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
void *iocmd_bufptr;
struct bfad_hal_comp fcomp;
unsigned long flags;
if (bfad_chk_iocmd_sz(payload_len,
sizeof(struct bfa_bsg_flash_s),
iocmd->bufsz) != BFA_STATUS_OK) {
iocmd->status = BFA_STATUS_VERSION_FAIL;
return 0;
}
iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
iocmd->type, iocmd->instance, iocmd_bufptr,
iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
struct bfad_hal_comp fcomp;
void *iocmd_bufptr;
unsigned long flags;
if (bfad_chk_iocmd_sz(payload_len,
sizeof(struct bfa_bsg_flash_s),
iocmd->bufsz) != BFA_STATUS_OK) {
iocmd->status = BFA_STATUS_VERSION_FAIL;
return 0;
}
iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_get_temp_s *iocmd =
(struct bfa_bsg_diag_get_temp_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
&iocmd->result, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfa_trc(bfad, iocmd->status);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_memtest_s *iocmd =
(struct bfa_bsg_diag_memtest_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
&iocmd->memtest, iocmd->pat,
&iocmd->result, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfa_trc(bfad, iocmd->status);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_loopback_s *iocmd =
(struct bfa_bsg_diag_loopback_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
iocmd->speed, iocmd->lpcnt, iocmd->pat,
&iocmd->result, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfa_trc(bfad, iocmd->status);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_fwping_s *iocmd =
(struct bfa_bsg_diag_fwping_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
iocmd->pattern, &iocmd->result,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfa_trc(bfad, iocmd->status);
if (iocmd->status != BFA_STATUS_OK)
goto out;
bfa_trc(bfad, 0x77771);
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
iocmd->queue, &iocmd->result,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_sfp_show_s *iocmd =
(struct bfa_bsg_sfp_show_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfa_trc(bfad, iocmd->status);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
bfa_trc(bfad, iocmd->status);
out:
return 0;
}
int
bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
&iocmd->ledtest);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_beacon_s *iocmd =
(struct bfa_bsg_diag_beacon_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
iocmd->beacon, iocmd->link_e2e_beacon,
iocmd->second);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_diag_lb_stat_s *iocmd =
(struct bfa_bsg_diag_lb_stat_s *)cmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfa_trc(bfad, iocmd->status);
return 0;
}
int
bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd)
{
struct bfa_bsg_dport_enable_s *iocmd =
(struct bfa_bsg_dport_enable_s *)pcmd;
unsigned long flags;
struct bfad_hal_comp fcomp;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt,
iocmd->pat, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
bfa_trc(bfad, iocmd->status);
else {
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
}
return 0;
}
int
bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
unsigned long flags;
struct bfad_hal_comp fcomp;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
bfa_trc(bfad, iocmd->status);
else {
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
}
return 0;
}
int
bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd)
{
struct bfa_bsg_dport_enable_s *iocmd =
(struct bfa_bsg_dport_enable_s *)pcmd;
unsigned long flags;
struct bfad_hal_comp fcomp;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt,
iocmd->pat, bfad_hcb_comp,
&fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK) {
bfa_trc(bfad, iocmd->status);
} else {
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
}
return 0;
}
int
bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd)
{
struct bfa_bsg_diag_dport_show_s *iocmd =
(struct bfa_bsg_diag_dport_show_s *)pcmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_phy_attr_s *iocmd =
(struct bfa_bsg_phy_attr_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
&iocmd->attr, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_phy_stats_s *iocmd =
(struct bfa_bsg_phy_stats_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
&iocmd->stats, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
{
struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
struct bfad_hal_comp fcomp;
void *iocmd_bufptr;
unsigned long flags;
if (bfad_chk_iocmd_sz(payload_len,
sizeof(struct bfa_bsg_phy_s),
iocmd->bufsz) != BFA_STATUS_OK) {
iocmd->status = BFA_STATUS_VERSION_FAIL;
return 0;
}
iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
iocmd->instance, iocmd_bufptr, iocmd->bufsz,
0, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
if (iocmd->status != BFA_STATUS_OK)
goto out;
out:
return 0;
}
int
bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_vhba_attr_s *iocmd =
(struct bfa_bsg_vhba_attr_s *)cmd;
struct bfa_vhba_attr_s *attr = &iocmd->attr;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
attr->pwwn = bfad->bfa.ioc.attr->pwwn;
attr->nwwn = bfad->bfa.ioc.attr->nwwn;
attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
iocmd->status = BFA_STATUS_OK;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
{
struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
void *iocmd_bufptr;
struct bfad_hal_comp fcomp;
unsigned long flags;
if (bfad_chk_iocmd_sz(payload_len,
sizeof(struct bfa_bsg_phy_s),
iocmd->bufsz) != BFA_STATUS_OK) {
iocmd->status = BFA_STATUS_VERSION_FAIL;
return 0;
}
iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
iocmd->instance, iocmd_bufptr, iocmd->bufsz,
0, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
void *iocmd_bufptr;
if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
bfa_trc(bfad, sizeof(struct bfa_plog_s));
iocmd->status = BFA_STATUS_EINVAL;
goto out;
}
iocmd->status = BFA_STATUS_OK;
iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
out:
return 0;
}
#define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
int
bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
{
struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
void *iocmd_bufptr;
unsigned long flags;
u32 offset;
if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
iocmd->status = BFA_STATUS_VERSION_FAIL;
return 0;
}
if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
!IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
!IS_ALIGNED(iocmd->offset, sizeof(u32))) {
bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
iocmd->status = BFA_STATUS_EINVAL;
goto out;
}
iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
spin_lock_irqsave(&bfad->bfad_lock, flags);
offset = iocmd->offset;
iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
&offset, &iocmd->bufsz);
iocmd->offset = offset;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
out:
return 0;
}
int
bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
unsigned long flags;
if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
} else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
bfad->plog_buf.head = bfad->plog_buf.tail = 0;
else if (v_cmd == IOCMD_DEBUG_START_DTRC)
bfa_trc_init(bfad->trcmod);
else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
bfa_trc_stop(bfad->trcmod);
iocmd->status = BFA_STATUS_OK;
return 0;
}
int
bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
if (iocmd->ctl == BFA_TRUE)
bfad->plog_buf.plog_enabled = 1;
else
bfad->plog_buf.plog_enabled = 0;
iocmd->status = BFA_STATUS_OK;
return 0;
}
int
bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_fcpim_profile_s *iocmd =
(struct bfa_bsg_fcpim_profile_s *)cmd;
struct timeval tv;
unsigned long flags;
do_gettimeofday(&tv);
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
static int
bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_itnim_ioprofile_s *iocmd =
(struct bfa_bsg_itnim_ioprofile_s *)cmd;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_itnim_s *itnim;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
iocmd->vf_id, iocmd->lpwwn);
if (!fcs_port)
iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
else {
itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
if (itnim == NULL)
iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
else
iocmd->status = bfa_itnim_get_ioprofile(
bfa_fcs_itnim_get_halitn(itnim),
&iocmd->ioprofile);
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcport_stats_s *iocmd =
(struct bfa_bsg_fcport_stats_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
struct bfa_cb_pending_q_s cb_qe;
init_completion(&fcomp.comp);
bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
&fcomp, &iocmd->stats);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK) {
bfa_trc(bfad, iocmd->status);
goto out;
}
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
struct bfa_cb_pending_q_s cb_qe;
init_completion(&fcomp.comp);
bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK) {
bfa_trc(bfad, iocmd->status);
goto out;
}
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
iocmd->status = BFA_STATUS_OK;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
BFA_FLASH_PART_PXECFG,
bfad->bfa.ioc.port_id, &iocmd->cfg,
sizeof(struct bfa_ethboot_cfg_s), 0,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
BFA_FLASH_PART_PXECFG,
bfad->bfa.ioc.port_id, &iocmd->cfg,
sizeof(struct bfa_ethboot_cfg_s), 0,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK)
goto out;
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (bfa_fcport_is_dport(&bfad->bfa)) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return BFA_STATUS_DPORT_ERR;
}
if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
else {
if (v_cmd == IOCMD_TRUNK_ENABLE) {
trunk->attr.state = BFA_TRUNK_OFFLINE;
bfa_fcport_disable(&bfad->bfa);
fcport->cfg.trunked = BFA_TRUE;
} else if (v_cmd == IOCMD_TRUNK_DISABLE) {
trunk->attr.state = BFA_TRUNK_DISABLED;
bfa_fcport_disable(&bfad->bfa);
fcport->cfg.trunked = BFA_FALSE;
}
if (!bfa_fcport_is_disabled(&bfad->bfa))
bfa_fcport_enable(&bfad->bfa);
iocmd->status = BFA_STATUS_OK;
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
else {
memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
sizeof(struct bfa_trunk_attr_s));
iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
iocmd->status = BFA_STATUS_OK;
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
else {
if (v_cmd == IOCMD_QOS_ENABLE)
fcport->cfg.qos_enabled = BFA_TRUE;
else if (v_cmd == IOCMD_QOS_DISABLE) {
fcport->cfg.qos_enabled = BFA_FALSE;
fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH;
fcport->cfg.qos_bw.med = BFA_QOS_BW_MED;
fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW;
}
}
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
else {
iocmd->attr.state = fcport->qos_attr.state;
iocmd->attr.total_bb_cr =
be32_to_cpu(fcport->qos_attr.total_bb_cr);
iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high;
iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med;
iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low;
iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op;
iocmd->status = BFA_STATUS_OK;
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_qos_vc_attr_s *iocmd =
(struct bfa_bsg_qos_vc_attr_s *)cmd;
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
unsigned long flags;
u32 i = 0;
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
iocmd->attr.elp_opmode_flags =
be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
/* Individual VC info */
while (i < iocmd->attr.total_vc_count) {
iocmd->attr.vc_info[i].vc_credit =
bfa_vc_attr->vc_info[i].vc_credit;
iocmd->attr.vc_info[i].borrow_credit =
bfa_vc_attr->vc_info[i].borrow_credit;
iocmd->attr.vc_info[i].priority =
bfa_vc_attr->vc_info[i].priority;
i++;
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
return 0;
}
int
bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcport_stats_s *iocmd =
(struct bfa_bsg_fcport_stats_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
struct bfa_cb_pending_q_s cb_qe;
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
init_completion(&fcomp.comp);
bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
&fcomp, &iocmd->stats);
spin_lock_irqsave(&bfad->bfad_lock, flags);
WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
else
iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK) {
bfa_trc(bfad, iocmd->status);
goto out;
}
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
struct bfad_hal_comp fcomp;
unsigned long flags;
struct bfa_cb_pending_q_s cb_qe;
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
init_completion(&fcomp.comp);
bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
&fcomp, NULL);
spin_lock_irqsave(&bfad->bfad_lock, flags);
WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
else
iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (iocmd->status != BFA_STATUS_OK) {
bfa_trc(bfad, iocmd->status);
goto out;
}
wait_for_completion(&fcomp.comp);
iocmd->status = fcomp.status;
out:
return 0;
}
int
bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_vf_stats_s *iocmd =
(struct bfa_bsg_vf_stats_s *)cmd;
struct bfa_fcs_fabric_s *fcs_vf;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
if (fcs_vf == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_VFID;
goto out;
}
memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
sizeof(struct bfa_vf_stats_s));
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
int
bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_vf_reset_stats_s *iocmd =
(struct bfa_bsg_vf_reset_stats_s *)cmd;
struct bfa_fcs_fabric_s *fcs_vf;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
if (fcs_vf == NULL) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_UNKNOWN_VFID;
goto out;
}
memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
return 0;
}
/* Function to reset the LUN SCAN mode */
static void
bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
{
struct bfad_im_port_s *pport_im = bfad->pport.im_port;
struct bfad_vport_s *vport = NULL;
/* Set the scsi device LUN SCAN flags for base port */
bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
/* Set the scsi device LUN SCAN flags for the vports */
list_for_each_entry(vport, &bfad->vport_list, list_entry)
bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
}
int
bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
/* Set the LUN Scanning mode to be Sequential scan */
if (iocmd->status == BFA_STATUS_OK)
bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
} else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
/* Set the LUN Scanning mode to default REPORT_LUNS scan */
if (iocmd->status == BFA_STATUS_OK)
bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
} else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
}
int
bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
(struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
unsigned long flags;