blob: 21c81c1feac59466a66cd8856853adaefb77b6c2 [file] [log] [blame]
/*
* Universal Flash Storage Host controller driver Core
*
* This code is based on drivers/scsi/ufs/ufshcd.c
* Copyright (C) 2011-2013 Samsung India Software Operations
* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
* Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* See the COPYING file in the top-level directory or visit
* <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This program is provided "AS IS" and "WITH ALL FAULTS" and
* without warranty of any kind. You are solely responsible for
* determining the appropriateness of using and distributing
* the program and assume all risks associated with your exercise
* of rights with respect to the program, including but not limited
* to infringement of third party rights, the risks and costs of
* program errors, damage to or loss of data, programs or equipment,
* and unavailability or interruption of operations. Under no
* circumstances will the contributor of this Program be liable for
* any damages of any kind arising from your use or distribution of
* this program.
*
* The Linux Foundation chooses to take subject only to the GPLv2
* license terms, and distributes only under these terms.
*/
#include <linux/async.h>
#include <linux/devfreq.h>
#include <linux/nls.h>
#include <linux/of.h>
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
#define UFSHCD_REQ_SENSE_SIZE 18
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
/* UIC command timeout, unit: ms */
#define UIC_CMD_TIMEOUT 500
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
/* Timeout after 30 msecs if NOP OUT hangs without response */
#define NOP_OUT_TIMEOUT 30 /* msecs */
/* Query request retries */
#define QUERY_REQ_RETRIES 3
/* Query request timeout */
#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
/* maximum number of retries for a general UIC command */
#define UFS_UIC_COMMAND_RETRIES 3
/* maximum number of link-startup retries */
#define DME_LINKSTARTUP_RETRIES 3
/* Maximum retries for Hibern8 enter */
#define UIC_HIBERN8_ENTER_RETRIES 3
/* maximum number of reset retries before giving up */
#define MAX_HOST_RESET_RETRIES 5
/* Expose the flag value from utp_upiu_query.value */
#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
/* Interrupt aggregation default timeout, unit: 40us */
#define INT_AGGR_DEF_TO 0x02
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
if (_on) \
_ret = ufshcd_enable_vreg(_dev, _vreg); \
else \
_ret = ufshcd_disable_vreg(_dev, _vreg); \
_ret; \
})
#define ufshcd_hex_dump(prefix_str, buf, len) \
print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
UFSHCD_CMD_PER_LUN = 32,
UFSHCD_CAN_QUEUE = 32,
};
/* UFSHCD states */
enum {
UFSHCD_STATE_RESET,
UFSHCD_STATE_ERROR,
UFSHCD_STATE_OPERATIONAL,
UFSHCD_STATE_EH_SCHEDULED,
};
/* UFSHCD error handling flags */
enum {
UFSHCD_EH_IN_PROGRESS = (1 << 0),
};
/* UFSHCD UIC layer error flags */
enum {
UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
};
#define ufshcd_set_eh_in_progress(h) \
((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
#define ufshcd_eh_in_progress(h) \
((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
#define ufshcd_clear_eh_in_progress(h) \
((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
#define ufshcd_set_ufs_dev_active(h) \
((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
#define ufshcd_set_ufs_dev_sleep(h) \
((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
#define ufshcd_set_ufs_dev_poweroff(h) \
((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
#define ufshcd_is_ufs_dev_active(h) \
((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
#define ufshcd_is_ufs_dev_sleep(h) \
((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
#define ufshcd_is_ufs_dev_poweroff(h) \
((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
{UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
{UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
{UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
{UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
{UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
{UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
};
static inline enum ufs_dev_pwr_mode
ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
{
return ufs_pm_lvl_states[lvl].dev_state;
}
static inline enum uic_link_state
ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
{
return ufs_pm_lvl_states[lvl].link_state;
}
static inline enum ufs_pm_level
ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
enum uic_link_state link_state)
{
enum ufs_pm_level lvl;
for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
(ufs_pm_lvl_states[lvl].link_state == link_state))
return lvl;
}
/* if no match found, return the level 0 */
return UFS_PM_LVL_0;
}
static struct ufs_dev_fix ufs_fixups[] = {
/* UFS cards deviations table */
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_NO_FASTAUTO),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
UFS_DEVICE_QUIRK_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
UFS_DEVICE_QUIRK_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
END_FIX
};
static void ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
static int ufshcd_probe_hba(struct ufs_hba *hba);
static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
bool skip_ref_clk);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *desired_pwr_mode);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
{
return tag >= 0 && tag < hba->nutrs;
}
static inline int ufshcd_enable_irq(struct ufs_hba *hba)
{
int ret = 0;
if (!hba->is_irq_enabled) {
ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
hba);
if (ret)
dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
__func__, ret);
hba->is_irq_enabled = true;
}
return ret;
}
static inline void ufshcd_disable_irq(struct ufs_hba *hba)
{
if (hba->is_irq_enabled) {
free_irq(hba->irq, hba);
hba->is_irq_enabled = false;
}
}
/* replace non-printable or non-ASCII characters with spaces */
static inline void ufshcd_remove_non_printable(char *val)
{
if (!val)
return;
if (*val < 0x20 || *val > 0x7e)
*val = ' ';
}
static void ufshcd_add_command_trace(struct ufs_hba *hba,
unsigned int tag, const char *str)
{
sector_t lba = -1;
u8 opcode = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp;
int transfer_len = -1;
if (!trace_ufshcd_command_enabled())
return;
lrbp = &hba->lrb[tag];
if (lrbp->cmd) { /* data phase exists */
opcode = (u8)(*lrbp->cmd->cmnd);
if ((opcode == READ_10) || (opcode == WRITE_10)) {
/*
* Currently we only fully trace read(10) and write(10)
* commands
*/
if (lrbp->cmd->request && lrbp->cmd->request->bio)
lba =
lrbp->cmd->request->bio->bi_iter.bi_sector;
transfer_len = be32_to_cpu(
lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
}
}
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
trace_ufshcd_command(dev_name(hba->dev), str, tag,
doorbell, transfer_len, intr, lba, opcode);
}
static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
{
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
if (list_empty(head))
return;
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
clki->max_freq)
dev_err(hba->dev, "clk: %s, rate: %u\n",
clki->name, clki->curr_freq);
}
}
static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
struct ufs_uic_err_reg_hist *err_hist, char *err_name)
{
int i;
for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
if (err_hist->reg[p] == 0)
continue;
dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
}
}
static void ufshcd_print_host_regs(struct ufs_hba *hba)
{
/*
* hex_dump reads its data without the readl macro. This might
* cause inconsistency issues on some platform, as the printed
* values may be from cache and not the most recent value.
* To know whether you are looking at an un-cached version verify
* that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
* during platform/pci probe function.
*/
ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
hba->ufs_version, hba->capabilities);
dev_err(hba->dev,
"hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
(u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
dev_err(hba->dev,
"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
hba->ufs_stats.hibern8_exit_cnt);
ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
ufshcd_print_clk_freqs(hba);
if (hba->vops && hba->vops->dbg_register_dump)
hba->vops->dbg_register_dump(hba);
}
static
void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
{
struct ufshcd_lrb *lrbp;
int prdt_length;
int tag;
for_each_set_bit(tag, &bitmap, hba->nutrs) {
lrbp = &hba->lrb[tag];
dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
tag, ktime_to_us(lrbp->issue_time_stamp));
dev_err(hba->dev,
"UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
tag, (u64)lrbp->utrd_dma_addr);
ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
sizeof(struct utp_transfer_req_desc));
dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
(u64)lrbp->ucd_req_dma_addr);
ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
sizeof(struct utp_upiu_req));
dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
(u64)lrbp->ucd_rsp_dma_addr);
ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
sizeof(struct utp_upiu_rsp));
prdt_length = le16_to_cpu(
lrbp->utr_descriptor_ptr->prd_table_length);
dev_err(hba->dev,
"UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
tag, prdt_length,
(u64)lrbp->ucd_prdt_dma_addr);
if (pr_prdt)
ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
sizeof(struct ufshcd_sg_entry) * prdt_length);
}
}
static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
{
struct utp_task_req_desc *tmrdp;
int tag;
for_each_set_bit(tag, &bitmap, hba->nutmrs) {
tmrdp = &hba->utmrdl_base_addr[tag];
dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
sizeof(struct request_desc_header));
dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
tag);
ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
sizeof(struct utp_upiu_req));
dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
tag);
ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
sizeof(struct utp_task_req_desc));
}
}
static void ufshcd_print_host_state(struct ufs_hba *hba)
{
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
hba->curr_dev_pwr_mode, hba->uic_link_state);
dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
hba->pm_op_in_progress, hba->is_sys_suspended);
dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
hba->auto_bkops_enabled, hba->host->host_self_blocked);
dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
hba->capabilities, hba->caps);
dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
hba->dev_quirks);
}
/**
* ufshcd_print_pwr_info - print power params as saved in hba
* power info
* @hba: per-adapter instance
*/
static void ufshcd_print_pwr_info(struct ufs_hba *hba)
{
static const char * const names[] = {
"INVALID MODE",
"FAST MODE",
"SLOW_MODE",
"INVALID MODE",
"FASTAUTO_MODE",
"SLOWAUTO_MODE",
"INVALID MODE",
};
dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
__func__,
hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
names[hba->pwr_info.pwr_rx],
names[hba->pwr_info.pwr_tx],
hba->pwr_info.hs_rate);
}
/*
* ufshcd_wait_for_register - wait for register value to change
* @hba - per-adapter interface
* @reg - mmio register offset
* @mask - mask to apply to read register value
* @val - wait condition
* @interval_us - polling interval in microsecs
* @timeout_ms - timeout in millisecs
* @can_sleep - perform sleep or just spin
*
* Returns -ETIMEDOUT on error, zero on success
*/
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
unsigned long timeout_ms, bool can_sleep)
{
int err = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
/* ignore bits that we don't intend to wait on */
val = val & mask;
while ((ufshcd_readl(hba, reg) & mask) != val) {
if (can_sleep)
usleep_range(interval_us, interval_us + 50);
else
udelay(interval_us);
if (time_after(jiffies, timeout)) {
if ((ufshcd_readl(hba, reg) & mask) != val)
err = -ETIMEDOUT;
break;
}
}
return err;
}
/**
* ufshcd_get_intr_mask - Get the interrupt bit mask
* @hba - Pointer to adapter instance
*
* Returns interrupt bit mask per version
*/
static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{
u32 intr_mask = 0;
switch (hba->ufs_version) {
case UFSHCI_VERSION_10:
intr_mask = INTERRUPT_MASK_ALL_VER_10;
break;
case UFSHCI_VERSION_11:
case UFSHCI_VERSION_20:
intr_mask = INTERRUPT_MASK_ALL_VER_11;
break;
case UFSHCI_VERSION_21:
default:
intr_mask = INTERRUPT_MASK_ALL_VER_21;
break;
}
return intr_mask;
}
/**
* ufshcd_get_ufs_version - Get the UFS version supported by the HBA
* @hba - Pointer to adapter instance
*
* Returns UFSHCI version supported by the controller
*/
static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
{
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
return ufshcd_vops_get_ufs_hci_version(hba);
return ufshcd_readl(hba, REG_UFS_VERSION);
}
/**
* ufshcd_is_device_present - Check if any device connected to
* the host controller
* @hba: pointer to adapter instance
*
* Returns true if device present, false if no device detected
*/
static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
{
return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
DEVICE_PRESENT) ? true : false;
}
/**
* ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
* @lrb: pointer to local command reference block
*
* This function is used to get the OCS field from UTRD
* Returns the OCS field in the UTRD
*/
static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
{
return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
}
/**
* ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
* @task_req_descp: pointer to utp_task_req_desc structure
*
* This function is used to get the OCS field from UTMRD
* Returns the OCS field in the UTMRD
*/
static inline int
ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
{
return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
}
/**
* ufshcd_get_tm_free_slot - get a free slot for task management request
* @hba: per adapter instance
* @free_slot: pointer to variable with available slot value
*
* Get a free tag and lock it until ufshcd_put_tm_slot() is called.
* Returns 0 if free slot is not available, else return 1 with tag value
* in @free_slot.
*/
static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
{
int tag;
bool ret = false;
if (!free_slot)
goto out;
do {
tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
if (tag >= hba->nutmrs)
goto out;
} while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
*free_slot = tag;
ret = true;
out:
return ret;
}
static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
{
clear_bit_unlock(slot, &hba->tm_slots_in_use);
}
/**
* ufshcd_utrl_clear - Clear a bit in UTRLCLR register
* @hba: per adapter instance
* @pos: position of the bit to be cleared
*/
static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
{
ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
}
/**
* ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
* @hba: per adapter instance
* @tag: position of the bit to be cleared
*/
static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
{
__clear_bit(tag, &hba->outstanding_reqs);
}
/**
* ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
* @reg: Register value of host controller status
*
* Returns integer, 0 on Success and positive value if failed
*/
static inline int ufshcd_get_lists_status(u32 reg)
{
return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
}
/**
* ufshcd_get_uic_cmd_result - Get the UIC command result
* @hba: Pointer to adapter instance
*
* This function gets the result of UIC command completion
* Returns 0 on success, non zero value on error
*/
static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
{
return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
MASK_UIC_COMMAND_RESULT;
}
/**
* ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
* @hba: Pointer to adapter instance
*
* This function gets UIC command argument3
* Returns 0 on success, non zero value on error
*/
static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
{
return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
}
/**
* ufshcd_get_req_rsp - returns the TR response transaction type
* @ucd_rsp_ptr: pointer to response UPIU
*/
static inline int
ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
{
return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
}
/**
* ufshcd_get_rsp_upiu_result - Get the result from response UPIU
* @ucd_rsp_ptr: pointer to response UPIU
*
* This function gets the response status and scsi_status from response UPIU
* Returns the response result code.
*/
static inline int
ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
{
return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
}
/*
* ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
* from response UPIU
* @ucd_rsp_ptr: pointer to response UPIU
*
* Return the data segment length.
*/
static inline unsigned int
ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
{
return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
MASK_RSP_UPIU_DATA_SEG_LEN;
}
/**
* ufshcd_is_exception_event - Check if the device raised an exception event
* @ucd_rsp_ptr: pointer to response UPIU
*
* The function checks if the device raised an exception event indicated in
* the Device Information field of response UPIU.
*
* Returns true if exception is raised, false otherwise.
*/
static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
{
return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
MASK_RSP_EXCEPTION_EVENT ? true : false;
}
/**
* ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
* @hba: per adapter instance
*/
static inline void
ufshcd_reset_intr_aggr(struct ufs_hba *hba)
{
ufshcd_writel(hba, INT_AGGR_ENABLE |
INT_AGGR_COUNTER_AND_TIMER_RESET,
REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
}
/**
* ufshcd_config_intr_aggr - Configure interrupt aggregation values.
* @hba: per adapter instance
* @cnt: Interrupt aggregation counter threshold
* @tmout: Interrupt aggregation timeout value
*/
static inline void
ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
{
ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
INT_AGGR_COUNTER_THLD_VAL(cnt) |
INT_AGGR_TIMEOUT_VAL(tmout),
REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
}
/**
* ufshcd_disable_intr_aggr - Disables interrupt aggregation.
* @hba: per adapter instance
*/
static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
{
ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
}
/**
* ufshcd_enable_run_stop_reg - Enable run-stop registers,
* When run-stop registers are set to 1, it indicates the
* host controller that it can process the requests
* @hba: per adapter instance
*/
static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
{
ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
REG_UTP_TASK_REQ_LIST_RUN_STOP);
ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
}
/**
* ufshcd_hba_start - Start controller initialization sequence
* @hba: per adapter instance
*/
static inline void ufshcd_hba_start(struct ufs_hba *hba)
{
ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
}
/**
* ufshcd_is_hba_active - Get controller state
* @hba: per adapter instance
*
* Returns false if controller is active, true otherwise
*/
static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
{
return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
? false : true;
}
static const char *ufschd_uic_link_state_to_string(
enum uic_link_state state)
{
switch (state) {
case UIC_LINK_OFF_STATE: return "OFF";
case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
default: return "UNKNOWN";
}
}
static const char *ufschd_ufs_dev_pwr_mode_to_string(
enum ufs_dev_pwr_mode state)
{
switch (state) {
case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
case UFS_SLEEP_PWR_MODE: return "SLEEP";
case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
default: return "UNKNOWN";
}
}
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
{
/* HCI version 1.0 and 1.1 supports UniPro 1.41 */
if ((hba->ufs_version == UFSHCI_VERSION_10) ||
(hba->ufs_version == UFSHCI_VERSION_11))
return UFS_UNIPRO_VER_1_41;
else
return UFS_UNIPRO_VER_1_6;
}
EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
{
/*
* If both host and device support UniPro ver1.6 or later, PA layer
* parameters tuning happens during link startup itself.
*
* We can manually tune PA layer parameters if either host or device
* doesn't support UniPro ver 1.6 or later. But to keep manual tuning
* logic simple, we will only do manual tuning if local unipro version
* doesn't support ver1.6 or later.
*/
if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
return true;
else
return false;
}
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
ktime_t start = ktime_get();
bool clk_state_changed = false;
if (list_empty(head))
goto out;
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
if (ret)
return ret;
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (scale_up && clki->max_freq) {
if (clki->curr_freq == clki->max_freq)
continue;
clk_state_changed = true;
ret = clk_set_rate(clki->clk, clki->max_freq);
if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
__func__, clki->name,
clki->max_freq, ret);
break;
}
trace_ufshcd_clk_scaling(dev_name(hba->dev),
"scaled up", clki->name,
clki->curr_freq,
clki->max_freq);
clki->curr_freq = clki->max_freq;
} else if (!scale_up && clki->min_freq) {
if (clki->curr_freq == clki->min_freq)
continue;
clk_state_changed = true;
ret = clk_set_rate(clki->clk, clki->min_freq);
if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
__func__, clki->name,
clki->min_freq, ret);
break;
}
trace_ufshcd_clk_scaling(dev_name(hba->dev),
"scaled down", clki->name,
clki->curr_freq,
clki->min_freq);
clki->curr_freq = clki->min_freq;
}
}
dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
clki->name, clk_get_rate(clki->clk));
}
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
out:
if (clk_state_changed)
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
}
/**
* ufshcd_is_devfreq_scaling_required - check if scaling is required or not
* @hba: per adapter instance
* @scale_up: True if scaling up and false if scaling down
*
* Returns true if scaling is required, false otherwise.
*/
static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
bool scale_up)
{
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
if (list_empty(head))
return false;
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (scale_up && clki->max_freq) {
if (clki->curr_freq == clki->max_freq)
continue;
return true;
} else if (!scale_up && clki->min_freq) {
if (clki->curr_freq == clki->min_freq)
continue;
return true;
}
}
}
return false;
}
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us)
{
unsigned long flags;
int ret = 0;
u32 tm_doorbell;
u32 tr_doorbell;
bool timeout = false, do_last_check = false;
ktime_t start;
ufshcd_hold(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
/*
* Wait for all the outstanding tasks/transfer requests.
* Verify by checking the doorbell registers are clear.
*/
start = ktime_get();
do {
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
ret = -EBUSY;
goto out;
}
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (!tm_doorbell && !tr_doorbell) {
timeout = false;
break;
} else if (do_last_check) {
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
schedule();
if (ktime_to_us(ktime_sub(ktime_get(), start)) >
wait_timeout_us) {
timeout = true;
/*
* We might have scheduled out for long time so make
* sure to check if doorbells are cleared by this time
* or not.
*/
do_last_check = true;
}
spin_lock_irqsave(hba->host->host_lock, flags);
} while (tm_doorbell || tr_doorbell);
if (timeout) {
dev_err(hba->dev,
"%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
__func__, tm_doorbell, tr_doorbell);
ret = -EBUSY;
}
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_release(hba);
return ret;
}
/**
* ufshcd_scale_gear - scale up/down UFS gear
* @hba: per adapter instance
* @scale_up: True for scaling up gear and false for scaling down
*
* Returns 0 for success,
* Returns -EBUSY if scaling can't happen at this time
* Returns non-zero for any other errors
*/
static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
{
#define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
int ret = 0;
struct ufs_pa_layer_attr new_pwr_info;
if (scale_up) {
memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
sizeof(struct ufs_pa_layer_attr));
} else {
memcpy(&new_pwr_info, &hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
|| hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
/* save the current power mode */
memcpy(&hba->clk_scaling.saved_pwr_info.info,
&hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
/* scale down gear */
new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
}
}
/* check if the power mode needs to be changed or not? */
ret = ufshcd_change_power_mode(hba, &new_pwr_info);
if (ret)
dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
__func__, ret,
hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
new_pwr_info.gear_tx, new_pwr_info.gear_rx);
return ret;
}
static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
{
#define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
int ret = 0;
/*
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
scsi_block_requests(hba->host);
down_write(&hba->clk_scaling_lock);
if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
scsi_unblock_requests(hba->host);
}
return ret;
}
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
{
up_write(&hba->clk_scaling_lock);
scsi_unblock_requests(hba->host);
}
/**
* ufshcd_devfreq_scale - scale up/down UFS clocks and gear
* @hba: per adapter instance
* @scale_up: True for scaling up and false for scalin down
*
* Returns 0 for success,
* Returns -EBUSY if scaling can't happen at this time
* Returns non-zero for any other errors
*/
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
/* let's not get into low power until clock scaling is completed */
ufshcd_hold(hba, false);
ret = ufshcd_clock_scaling_prepare(hba);
if (ret)
return ret;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
ret = ufshcd_scale_gear(hba, false);
if (ret)
goto out;
}
ret = ufshcd_scale_clks(hba, scale_up);
if (ret) {
if (!scale_up)
ufshcd_scale_gear(hba, true);
goto out;
}
/* scale up the gear after scaling up clocks */
if (scale_up) {
ret = ufshcd_scale_gear(hba, true);
if (ret) {
ufshcd_scale_clks(hba, false);
goto out;
}
}
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
out:
ufshcd_clock_scaling_unprepare(hba);
ufshcd_release(hba);
return ret;
}
static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.suspend_work);
unsigned long irq_flags;
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
return;
}
hba->clk_scaling.is_suspended = true;
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
__ufshcd_suspend_clkscaling(hba);
}
static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.resume_work);
unsigned long irq_flags;
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (!hba->clk_scaling.is_suspended) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
return;
}
hba->clk_scaling.is_suspended = false;
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_resume_device(hba->devfreq);
}
static int ufshcd_devfreq_target(struct device *dev,
unsigned long *freq, u32 flags)
{
int ret = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
ktime_t start;
bool scale_up, sched_clk_scaling_suspend_work = false;
unsigned long irq_flags;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
if ((*freq > 0) && (*freq < UINT_MAX)) {
dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
return -EINVAL;
}
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (ufshcd_eh_in_progress(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
return 0;
}
if (!hba->clk_scaling.active_reqs)
sched_clk_scaling_suspend_work = true;
scale_up = (*freq == UINT_MAX) ? true : false;
if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
ret = 0;
goto out; /* no state change required */
}
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, scale_up);
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
out:
if (sched_clk_scaling_suspend_work)
queue_work(hba->clk_scaling.workq,
&hba->clk_scaling.suspend_work);
return ret;
}
static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
memset(stat, 0, sizeof(*stat));
spin_lock_irqsave(hba->host->host_lock, flags);
if (!scaling->window_start_t)
goto start_window;
if (scaling->is_busy_started)
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t));
stat->total_time = jiffies_to_usecs((long)jiffies -
(long)scaling->window_start_t);
stat->busy_time = scaling->tot_busy_t;
start_window:
scaling->window_start_t = jiffies;
scaling->tot_busy_t = 0;
if (hba->outstanding_reqs) {
scaling->busy_start_t = ktime_get();
scaling->is_busy_started = true;
} else {
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
return 0;
}
static struct devfreq_dev_profile ufs_devfreq_profile = {
.polling_ms = 100,
.target = ufshcd_devfreq_target,
.get_dev_status = ufshcd_devfreq_get_dev_status,
};
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
unsigned long flags;
devfreq_suspend_device(hba->devfreq);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_scaling.window_start_t = 0;
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
unsigned long flags;
bool suspend = false;
if (!ufshcd_is_clkscaling_supported(hba))
return;
spin_lock_irqsave(hba->host->host_lock, flags);
if (!hba->clk_scaling.is_suspended) {
suspend = true;
hba->clk_scaling.is_suspended = true;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (suspend)
__ufshcd_suspend_clkscaling(hba);
}
static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
{
unsigned long flags;
bool resume = false;
if (!ufshcd_is_clkscaling_supported(hba))
return;
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_scaling.is_suspended) {
resume = true;
hba->clk_scaling.is_suspended = false;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (resume)
devfreq_resume_device(hba->devfreq);
}
static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
}
static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
u32 value;
int err;
if (kstrtou32(buf, 0, &value))
return -EINVAL;
value = !!value;
if (value == hba->clk_scaling.is_allowed)
goto out;
pm_runtime_get_sync(hba->dev);
ufshcd_hold(hba, false);
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
hba->clk_scaling.is_allowed = value;
if (value) {
ufshcd_resume_clkscaling(hba);
} else {
ufshcd_suspend_clkscaling(hba);
err = ufshcd_devfreq_scale(hba, true);
if (err)
dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
__func__, err);
}
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
out:
return count;
}
static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
{
hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
hba->clk_scaling.enable_attr.attr.mode = 0644;
if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
}
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
unsigned long flags;
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == CLKS_ON) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
goto unblock_reqs;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_setup_clocks(hba, true);
/* Exit from hibern8 */
if (ufshcd_can_hibern8_during_gating(hba)) {
/* Prevent gating in this path */
hba->clk_gating.is_suspended = true;
if (ufshcd_is_link_hibern8(hba)) {
ret = ufshcd_uic_hibern8_exit(hba);
if (ret)
dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
__func__, ret);
else
ufshcd_set_link_active(hba);
}
hba->clk_gating.is_suspended = false;
}
unblock_reqs:
scsi_unblock_requests(hba->host);
}
/**
* ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
* Also, exit from hibern8 mode and set the link as active.
* @hba: per adapter instance
* @async: This indicates whether caller should ungate clocks asynchronously.
*/
int ufshcd_hold(struct ufs_hba *hba, bool async)
{
int rc = 0;
unsigned long flags;
if (!ufshcd_is_clkgating_allowed(hba))
goto out;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
if (ufshcd_eh_in_progress(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
return 0;
}
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
/*
* Wait for the ungate work to complete if in progress.
* Though the clocks may be in ON state, the link could
* still be in hibner8 state if hibern8 is allowed
* during clock gating.
* Make sure we exit hibern8 state also in addition to
* clocks being ON.
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work);
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
}
break;
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
break;
}
/*
* If we are here, it means gating work is either done or
* currently running. Hence, fall through to cancel gating
* work and to enable clocks.
*/
case CLKS_OFF:
scsi_block_requests(hba->host);
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
schedule_work(&hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
* work to be done or not.
*/
case REQ_CLKS_ON:
if (async) {
rc = -EAGAIN;
hba->clk_gating.active_reqs--;
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
default:
dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
__func__, hba->clk_gating.state);
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
return rc;
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
static void ufshcd_gate_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.gate_work.work);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
/*
* In case you are here to cancel this work the gating state
* would be marked as REQ_CLKS_ON. In this case save time by
* skipping the gating work and exit after changing the clock
* state to CLKS_ON.
*/
if (hba->clk_gating.is_suspended ||
(hba->clk_gating.state == REQ_CLKS_ON)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
goto rel_lock;
}
if (hba->clk_gating.active_reqs
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|| hba->lrb_in_use || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done)
goto rel_lock;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) {
if (ufshcd_uic_hibern8_enter(hba)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
goto out;
}
ufshcd_set_link_hibern8(hba);
}
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
else
/* If link is active, device ref_clk can't be switched off */
__ufshcd_setup_clocks(hba, false, true);
/*
* In case you are here to cancel this work the gating state
* would be marked as REQ_CLKS_ON. In this case keep the state
* as REQ_CLKS_ON which would anyway imply that clocks are off
* and a request to turn them on is pending. By doing this way,
* we keep the state machine in tact and this would ultimately
* prevent from doing cancel work multiple times when there are
* new requests arriving before the current cancel work is done.
*/
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
rel_lock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
return;
}
/* host lock must be held before calling this variant */
static void __ufshcd_release(struct ufs_hba *hba)
{
if (!ufshcd_is_clkgating_allowed(hba))
return;
hba->clk_gating.active_reqs--;
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|| hba->lrb_in_use || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done
|| ufshcd_eh_in_progress(hba))
return;
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
schedule_delayed_work(&hba->clk_gating.gate_work,
msecs_to_jiffies(hba->clk_gating.delay_ms));
}
void ufshcd_release(struct ufs_hba *hba)
{
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
__ufshcd_release(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_release);
static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
}
static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned long flags, value;
if (kstrtoul(buf, 0, &value))
return -EINVAL;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.delay_ms = value;
spin_unlock_irqrestore(hba->host->host_lock, flags);
return count;
}
static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
}
static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned long flags;
u32 value;
if (kstrtou32(buf, 0, &value))
return -EINVAL;
value = !!value;
if (value == hba->clk_gating.is_enabled)
goto out;
if (value) {
ufshcd_release(hba);
} else {
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
hba->clk_gating.is_enabled = value;
out:
return count;
}
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
if (!ufshcd_is_clkgating_allowed(hba))
return;
hba->clk_gating.delay_ms = 150;
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
hba->clk_gating.is_enabled = true;
hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
hba->clk_gating.delay_attr.attr.mode = 0644;
if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
hba->clk_gating.enable_attr.attr.mode = 0644;
if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
}
static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
{
if (!ufshcd_is_clkgating_allowed(hba))
return;
device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
cancel_work_sync(&hba->clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
}
/* Must be called with host lock acquired */
static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
if (!ufshcd_is_clkscaling_supported(hba))
return;
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
return;
if (queue_resume_work)
queue_work(hba->clk_scaling.workq,
&hba->clk_scaling.resume_work);
if (!hba->clk_scaling.window_start_t) {
hba->clk_scaling.window_start_t = jiffies;
hba->clk_scaling.tot_busy_t = 0;
hba->clk_scaling.is_busy_started = false;
}
if (!hba->clk_scaling.is_busy_started) {
hba->clk_scaling.busy_start_t = ktime_get();
hba->clk_scaling.is_busy_started = true;
}
}
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
if (!ufshcd_is_clkscaling_supported(hba))
return;
if (!hba->outstanding_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t));
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
}
/**
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
* @task_tag: Task tag of the command
*/
static inline
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
hba->lrb[task_tag].issue_time_stamp = ktime_get();
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
wmb();
ufshcd_add_command_trace(hba, task_tag, "send");
}
/**
* ufshcd_copy_sense_data - Copy sense data in case of check condition
* @lrb - pointer to local reference block
*/
static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
{
int len;
if (lrbp->sense_buffer &&
ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
int len_to_copy;
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
memcpy(lrbp->sense_buffer,
lrbp->ucd_rsp_ptr->sr.sense_data,
min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
}
}
/**
* ufshcd_copy_query_response() - Copy the Query Response and the data
* descriptor
* @hba: per adapter instance
* @lrb - pointer to local reference block
*/
static
int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
/* Get the descriptor */
if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
GENERAL_UPIU_REQUEST_SIZE;
u16 resp_len;
u16 buf_len;
/* data segment length */
resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
MASK_QUERY_DATA_SEG_LEN;
buf_len = be16_to_cpu(
hba->dev_cmd.query.request.upiu_req.length);
if (likely(buf_len >= resp_len)) {
memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
} else {
dev_warn(hba->dev,
"%s: Response size is bigger than buffer",
__func__);
return -EINVAL;
}
}
return 0;
}
/**
* ufshcd_hba_capabilities - Read controller capabilities
* @hba: per adapter instance
*/
static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
{
hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
/* nutrs and nutmrs are 0 based values */
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
}
/**
* ufshcd_ready_for_uic_cmd - Check if controller is ready
* to accept UIC commands
* @hba: per adapter instance
* Return true on success, else false
*/
static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
{
if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
return true;
else
return false;
}
/**
* ufshcd_get_upmcrs - Get the power mode change request status
* @hba: Pointer to adapter instance
*
* This function gets the UPMCRS field of HCS register
* Returns value of UPMCRS field
*/
static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
{
return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
}
/**
* ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
* @hba: per adapter instance
* @uic_cmd: UIC command
*
* Mutex must be held.
*/
static inline void
ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
WARN_ON(hba->active_uic_cmd);
hba->active_uic_cmd = uic_cmd;
/* Write Args */
ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
/* Write UIC Cmd */
ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
REG_UIC_COMMAND);
}
/**
* ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
* @hba: per adapter instance
* @uic_command: UIC command
*
* Must be called with mutex held.
* Returns 0 only if success.
*/
static int
ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
unsigned long flags;
if (wait_for_completion_timeout(&uic_cmd->done,
msecs_to_jiffies(UIC_CMD_TIMEOUT)))
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
else
ret = -ETIMEDOUT;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
return ret;
}
/**
* __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
* @hba: per adapter instance
* @uic_cmd: UIC command
* @completion: initialize the completion only if this is set to true
*
* Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
* with mutex held and host_lock locked.
* Returns 0 only if success.
*/
static int
__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
bool completion)
{
if (!ufshcd_ready_for_uic_cmd(hba)) {
dev_err(hba->dev,
"Controller not ready to accept UIC commands\n");
return -EIO;
}
if (completion)
init_completion(&uic_cmd->done);
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
return 0;
}
/**
* ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
* @hba: per adapter instance
* @uic_cmd: UIC command
*
* Returns 0 only if success.
*/
static int
ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
unsigned long flags;
ufshcd_hold(hba, false);
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
mutex_unlock(&hba->uic_cmd_mutex);
ufshcd_release(hba);
return ret;
}
/**
* ufshcd_map_sg - Map scatter-gather list to prdt
* @lrbp - pointer to local reference block
*
* Returns 0 in case of success, non-zero value in case of failure
*/
static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
struct ufshcd_sg_entry *prd_table;
struct scatterlist *sg;
struct scsi_cmnd *cmd;
int sg_segments;
int i;
cmd = lrbp->cmd;
sg_segments = scsi_dma_map(cmd);
if (sg_segments < 0)
return sg_segments;
if (sg_segments) {
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
lrbp->utr_descriptor_ptr->prd_table_length =
cpu_to_le16((u16)(sg_segments *
sizeof(struct ufshcd_sg_entry)));
else
lrbp->utr_descriptor_ptr->prd_table_length =
cpu_to_le16((u16) (sg_segments));
prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
scsi_for_each_sg(cmd, sg, sg_segments, i) {
prd_table[i].size =
cpu_to_le32(((u32) sg_dma_len(sg))-1);
prd_table[i].base_addr =
cpu_to_le32(lower_32_bits(sg->dma_address));
prd_table[i].upper_addr =
cpu_to_le32(upper_32_bits(sg->dma_address));
prd_table[i].reserved = 0;
}
} else {
lrbp->utr_descriptor_ptr->prd_table_length = 0;
}
return 0;
}
/**
* ufshcd_enable_intr - enable interrupts
* @hba: per adapter instance
* @intrs: interrupt bits
*/
static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
if (hba->ufs_version == UFSHCI_VERSION_10) {
u32 rw;
rw = set & INTERRUPT_MASK_RW_VER_10;
set = rw | ((set ^ intrs) & intrs);
} else {
set |= intrs;
}
ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}
/**
* ufshcd_disable_intr - disable interrupts
* @hba: per adapter instance
* @intrs: interrupt bits
*/
static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
if (hba->ufs_version == UFSHCI_VERSION_10) {
u32 rw;
rw = (set & INTERRUPT_MASK_RW_VER_10) &
~(intrs & INTERRUPT_MASK_RW_VER_10);
set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
} else {
set &= ~intrs;
}
ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}
/**
* ufshcd_prepare_req_desc_hdr() - Fills the requests header
* descriptor according to request
* @lrbp: pointer to local reference block
* @upiu_flags: flags required in the header
* @cmd_dir: requests data direction
*/
static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
u32 *upiu_flags, enum dma_data_direction cmd_dir)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
u32 data_direction;
u32 dword_0;
if (cmd_dir == DMA_FROM_DEVICE) {
data_direction = UTP_DEVICE_TO_HOST;
*upiu_flags = UPIU_CMD_FLAGS_READ;
} else if (cmd_dir == DMA_TO_DEVICE) {
data_direction = UTP_HOST_TO_DEVICE;
*upiu_flags = UPIU_CMD_FLAGS_WRITE;
} else {
data_direction = UTP_NO_DATA_TRANSFER;
*upiu_flags = UPIU_CMD_FLAGS_NONE;
}
dword_0 = data_direction | (lrbp->command_type
<< UPIU_COMMAND_TYPE_OFFSET);
if (lrbp->intr_cmd)
dword_0 |= UTP_REQ_DESC_INT_CMD;
/* Transfer request descriptor header fields */
req_desc->header.dword_0 = cpu_to_le32(dword_0);
/* dword_1 is reserved, hence it is set to 0 */
req_desc->header.dword_1 = 0;
/*
* assigning invalid value for command status. Controller
* updates OCS on command completion, with the command
* status
*/
req_desc->header.dword_2 =
cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
/* dword_3 is reserved, hence it is set to 0 */
req_desc->header.dword_3 = 0;
req_desc->prd_table_length = 0;
}
/**
* ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
* for scsi commands
* @lrbp - local reference block pointer
* @upiu_flags - flags
*/
static
void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
{
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
unsigned short cdb_len;
/* command descriptor fields */
ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
UPIU_TRANSACTION_COMMAND, upiu_flags,
lrbp->lun, lrbp->task_tag);
ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
/* Total EHS length and Data segment length will be zero */
ucd_req_ptr->header.dword_2 = 0;
ucd_req_ptr->sc.exp_data_transfer_len =
cpu_to_be32(lrbp->cmd->sdb.length);
cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
* ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
* for query requsts
* @hba: UFS hba
* @lrbp: local reference block pointer
* @upiu_flags: flags
*/
static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, u32 upiu_flags)
{
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
struct ufs_query *query = &hba->dev_cmd.query;
u16 len = be16_to_cpu(query->request.upiu_req.length);
u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
/* Query request header */
ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
lrbp->lun, lrbp->task_tag);
ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
0, query->request.query_func, 0, 0);
/* Data segment length only need for WRITE_DESC */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
ucd_req_ptr->header.dword_2 =
UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
else
ucd_req_ptr->header.dword_2 = 0;
/* Copy the Query Request buffer as is */
memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
QUERY_OSF_SIZE);
/* Copy the Descriptor */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
memcpy(descp, query->descriptor, len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
{
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
/* command descriptor fields */
ucd_req_ptr->header.dword_0 =
UPIU_HEADER_DWORD(
UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
/* clear rest of the fields of basic header */
ucd_req_ptr->header.dword_1 = 0;
ucd_req_ptr->header.dword_2 = 0;
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
* ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
* for Device Management Purposes
* @hba - per adapter instance
* @lrb - pointer to local reference block
*/
static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
u32 upiu_flags;
int ret = 0;
if (hba->ufs_version == UFSHCI_VERSION_20)
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
else
lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
ufshcd_prepare_utp_nop_upiu(lrbp);
else
ret = -EINVAL;
return ret;
}
/**
* ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
* for SCSI Purposes
* @hba - per adapter instance
* @lrb - pointer to local reference block
*/
static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
u32 upiu_flags;
int ret = 0;
if (hba->ufs_version == UFSHCI_VERSION_20)
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
else
lrbp->command_type = UTP_CMD_TYPE_SCSI;
if (likely(lrbp->cmd)) {
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
lrbp->cmd->sc_data_direction);
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
} else {
ret = -EINVAL;
}
return ret;
}
/*
* ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
* @scsi_lun: scsi LUN id
*
* Returns UPIU LUN id
*/
static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
{
if (scsi_is_wlun(scsi_lun))
return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
| UFS_UPIU_WLUN_ID;
else
return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
}
/**
* ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
* @scsi_lun: UPIU W-LUN id
*
* Returns SCSI W-LUN id
*/
static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
{
return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
}
/**
* ufshcd_queuecommand - main entry point for SCSI requests
* @cmd: command from SCSI Midlayer
* @done: call back function
*
* Returns 0 for success, non-zero in case of failure
*/
static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
struct ufshcd_lrb *lrbp;
struct ufs_hba *hba;
unsigned long flags;
int tag;
int err = 0;
hba = shost_priv(host);
tag = cmd->request->tag;
if (!ufshcd_valid_tag(hba, tag)) {
dev_err(hba->dev,
"%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
__func__, tag, cmd, cmd->request);
BUG();
}
if (!down_read_trylock(&hba->clk_scaling_lock))
return SCSI_MLQUEUE_HOST_BUSY;
spin_lock_irqsave(hba->host->host_lock, flags);
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
case UFSHCD_STATE_EH_SCHEDULED:
case UFSHCD_STATE_RESET:
err = SCSI_MLQUEUE_HOST_BUSY;
goto out_unlock;
case UFSHCD_STATE_ERROR:
set_host_byte(cmd, DID_ERROR);
cmd->scsi_done(cmd);
goto out_unlock;
default:
dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
__func__, hba->ufshcd_state);
set_host_byte(cmd, DID_BAD_TARGET);
cmd->scsi_done(cmd);
goto out_unlock;
}
/* if error handling is in progress, don't issue commands */
if (ufshcd_eh_in_progress(hba)) {
set_host_byte(cmd, DID_ERROR);
cmd->scsi_done(cmd);
goto out_unlock;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
hba->req_abort_count = 0;
/* acquire the tag to make sure device cmds don't use it */
if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
/*
* Dev manage command in progress, requeue the command.
* Requeuing the command helps in cases where the request *may*
* find different tag instead of waiting for dev manage command
* completion.
*/
err = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
err = ufshcd_hold(hba, true);
if (err) {
err = SCSI_MLQUEUE_HOST_BUSY;
clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
WARN_ON(hba->clk_gating.state != CLKS_ON);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
lrbp->sense_buffer = cmd->sense_buffer;
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
lrbp->req_abort_skip = false;
ufshcd_comp_scsi_upiu(hba, lrbp);
err = ufshcd_map_sg(hba, lrbp);
if (err) {
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
/* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
ufshcd_send_command(hba, tag);
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
up_read(&hba->clk_scaling_lock);
return err;
}
static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
{
lrbp->cmd = NULL;
lrbp->sense_bufflen = 0;
lrbp->sense_buffer = NULL;
lrbp->task_tag = tag;
lrbp->lun = 0; /* device management cmd is not specific to any LUN */
lrbp->intr_cmd = true; /* No interrupt aggregation */
hba->dev_cmd.type = cmd_type;
return ufshcd_comp_devman_upiu(hba, lrbp);
}
static int
ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
{
int err = 0;
unsigned long flags;
u32 mask = 1 << tag;
/* clear outstanding transaction before retry */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_utrl_clear(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
* wait for for h/w to clear corresponding bit in door-bell.
* max. wait is 1 sec.
*/
err = ufshcd_wait_for_register(hba,
REG_UTP_TRANSFER_REQ_DOOR_BELL,
mask, ~mask, 1000, 1000, true);
return err;
}
static int
ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
/* Get the UPIU response */
query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
UPIU_RSP_CODE_OFFSET;
return query_res->response;
}
/**
* ufshcd_dev_cmd_completion() - handles device management command responses
* @hba: per adapter instance
* @lrbp: pointer to local reference block
*/
static int
ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
int resp;
int err = 0;
hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
switch (resp) {
case UPIU_TRANSACTION_NOP_IN:
if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
err = -EINVAL;
dev_err(hba->dev, "%s: unexpected response %x\n",
__func__, resp);
}
break;
case UPIU_TRANSACTION_QUERY_RSP:
err = ufshcd_check_query_response(hba, lrbp);
if (!err)
err = ufshcd_copy_query_response(hba, lrbp);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
err = -EPERM;
dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
__func__);
break;
default:
err = -EINVAL;
dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
__func__, resp);
break;
}
return err;
}
static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, int max_timeout)
{
int err = 0;
unsigned long time_left;
unsigned long flags;
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
msecs_to_jiffies(max_timeout));
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
hba->dev_cmd.complete = NULL;
if (likely(time_left)) {
err = ufshcd_get_tr_ocs(lrbp);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (!time_left) {
err = -ETIMEDOUT;
dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
__func__, lrbp->task_tag);
if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
/* successfully cleared the command, retry if needed */
err = -EAGAIN;
/*
* in case of an error, after clearing the doorbell,
* we also need to clear the outstanding_request
* field in hba
*/
ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
}
return err;
}
/**
* ufshcd_get_dev_cmd_tag - Get device management command tag
* @hba: per-adapter instance
* @tag: pointer to variable with available slot value
*
* Get a free slot and lock it until device management command
* completes.
*
* Returns false if free slot is unavailable for locking, else
* return true with tag value in @tag.
*/
static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
{
int tag;
bool ret = false;
unsigned long tmp;
if (!tag_out)
goto out;
do {
tmp = ~hba->lrb_in_use;
tag = find_last_bit(&tmp, hba->nutrs);
if (tag >= hba->nutrs)
goto out;
} while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
*tag_out = tag;
ret = true;
out:
return ret;
}
static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
{
clear_bit_unlock(tag, &hba->lrb_in_use);
}
/**
* ufshcd_exec_dev_cmd - API for sending device management requests
* @hba - UFS hba
* @cmd_type - specifies the type (NOP, Query...)
* @timeout - time in seconds
*
* NOTE: Since there is only one available tag for device management commands,
* it is expected you hold the hba->dev_cmd.lock mutex.
*/
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
struct ufshcd_lrb *lrbp;
int err;
int tag;
struct completion wait;
unsigned long flags;
down_read(&hba->clk_scaling_lock);
/*
* Get free slot, sleep if slots are unavailable.
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by SCSI request timeout.
*/
wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
init_completion(&wait);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
if (unlikely(err))
goto out_put_tag;
hba->dev_cmd.complete = &wait;
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
out_put_tag:
ufshcd_put_dev_cmd_tag(hba, tag);
wake_up(&hba->dev_cmd.tag_wq);
up_read(&hba->clk_scaling_lock);
return err;
}
/**
* ufshcd_init_query() - init the query response and request parameters
* @hba: per-adapter instance
* @request: address of the request pointer to be initialized
* @response: address of the response pointer to be initialized
* @opcode: operation to perform
* @idn: flag idn to access
* @index: LU number to access
* @selector: query/flag/descriptor further identification
*/
static inline void ufshcd_init_query(struct ufs_hba *hba,
struct ufs_query_req **request, struct ufs_query_res **response,
enum query_opcode opcode, u8 idn, u8 index, u8 selector)
{
*request = &hba->dev_cmd.query.request;
*response = &hba->dev_cmd.query.response;
memset(*request, 0, sizeof(struct ufs_query_req));
memset(*response, 0, sizeof(struct ufs_query_res));
(*request)->upiu_req.opcode = opcode;
(*request)->upiu_req.idn = idn;
(*request)->upiu_req.index = index;
(*request)->upiu_req.selector = selector;
}
static int ufshcd_query_flag_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
{
int ret;
int retries;
for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
if (ret)
dev_dbg(hba->dev,
"%s: failed with error %d, retries %d\n",
__func__, ret, retries);
else
break;
}
if (ret)
dev_err(hba->dev,
"%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
__func__, opcode, idn, ret, retries);
return ret;
}
/**
* ufshcd_query_flag() - API function for sending flag query requests
* hba: per-adapter instance
* query_opcode: flag query to perform
* idn: flag idn to access
* flag_res: the flag value after the query request completes
*
* Returns 0 for success, non-zero in case of failure
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err, index = 0, selector = 0;
int timeout = QUERY_REQ_TIMEOUT;
BUG_ON(!hba);
ufshcd_hold(hba, false);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
switch (opcode) {
case UPIU_QUERY_OPCODE_SET_FLAG:
case UPIU_QUERY_OPCODE_CLEAR_FLAG:
case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
break;
case UPIU_QUERY_OPCODE_READ_FLAG:
request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
if (!flag_res) {
/* No dummy reads */
dev_err(hba->dev, "%s: Invalid argument for read request\n",
__func__);
err = -EINVAL;
goto out_unlock;
}
break;
default:
dev_err(hba->dev,
"%s: Expected query flag opcode but got = %d\n",
__func__, opcode);
err = -EINVAL;
goto out_unlock;
}
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
if (err) {
dev_err(hba->dev,
"%s: Sending flag query for idn %d failed, err = %d\n",
__func__, idn, err);
goto out_unlock;
}
if (flag_res)
*flag_res = (be32_to_cpu(response->upiu_res.value) &
MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
out_unlock:
mutex_unlock(&hba->dev_cmd.lock);
ufshcd_release(hba);
return err;
}
/**
* ufshcd_query_attr - API function for sending attribute requests
* hba: per-adapter instance
* opcode: attribute opcode
* idn: attribute idn to access
* index: index field
* selector: selector field
* attr_val: the attribute value after the query request completes
*
* Returns 0 for success, non-zero in case of failure
*/
static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err;
BUG_ON(!hba);
ufshcd_hold(hba, false);
if (!attr_val) {
dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
__func__, opcode);
err = -EINVAL;
goto out;
}
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
switch (opcode) {
case UPIU_QUERY_OPCODE_WRITE_ATTR:
request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
request->upiu_req.value = cpu_to_be32(*attr_val);
break;
case UPIU_QUERY_OPCODE_READ_ATTR:
request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
break;
default:
dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
__func__, opcode);
err = -EINVAL;
goto out_unlock;
}
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
__func__, opcode, idn, index, err);
goto out_unlock;
}
*attr_val = be32_to_cpu(response->upiu_res.value);
out_unlock:
mutex_unlock(&hba->dev_cmd.lock);
out:
ufshcd_release(hba);
return err;
}
/**
* ufshcd_query_attr_retry() - API function for sending query
* attribute with retries
* @hba: per-adapter instance
* @opcode: attribute opcode
* @idn: attribute idn to access
* @index: index field
* @selector: selector field
* @attr_val: the attribute value after the query request
* completes
*
* Returns 0 for success, non-zero in case of failure
*/
static int ufshcd_query_attr_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
u32 *attr_val)
{
int ret = 0;
u32 retries;
for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
ret = ufshcd_query_attr(hba, opcode, idn, index,
selector, attr_val);
if (ret)
dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
__func__, ret, retries);
else
break;
}
if (ret)
dev_err(hba->dev,
"%s: query attribute, idn %d, failed with error %d after %d retires\n",
__func__, idn, ret, QUERY_REQ_RETRIES);
return ret;
}
static int __ufshcd_query_descriptor(struct ufs_hba *hba,
enum query_opcode opcode, enum desc_idn idn, u8 index,