mirror of
https://github.com/LuckfoxTECH/luckfox-pico.git
synced 2026-01-19 09:52:31 +01:00
project:build.sh: Added fastboot support; custom modifications to U-Boot and kernel implemented using patches.
project:cfg:BoardConfig_IPC: Added fastboot BoardConfig file and firmware post-scripts, distinguishing between the BoardConfigs for Luckfox Pico Pro and Luckfox Pico Max. project:app: Added fastboot_client and rk_smart_door for quick boot applications; updated rkipc app to adapt to the latest media library. media:samples: Added more usage examples. media:rockit: Fixed bugs; removed support for retrieving data frames from VPSS. media:isp: Updated rkaiq library and related tools to support connection to RKISP_Tuner. sysdrv:Makefile: Added support for compiling drv_ko on Luckfox Pico Ultra W using Ubuntu; added support for custom root filesystem. sysdrv:tools:board: Updated Buildroot optional mirror sources, updated some software versions, and stored device tree files and configuration files that undergo multiple modifications for U-Boot and kernel separately. sysdrv:source:mcu: Used RISC-V MCU SDK with RT-Thread system, mainly for initializing camera AE during quick boot. sysdrv:source:uboot: Added support for fastboot; added high baud rate DDR bin for serial firmware upgrades. sysdrv:source:kernel: Upgraded to version 5.10.160; increased NPU frequency for RV1106G3; added support for fastboot. Signed-off-by: luckfox-eng29 <eng29@luckfox.com>
This commit is contained in:
@@ -531,36 +531,54 @@ EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
|
||||
|
||||
static inline void nvme_clear_nvme_request(struct request *req)
|
||||
{
|
||||
if (!(req->rq_flags & RQF_DONTPREP)) {
|
||||
nvme_req(req)->retries = 0;
|
||||
nvme_req(req)->flags = 0;
|
||||
req->rq_flags |= RQF_DONTPREP;
|
||||
}
|
||||
nvme_req(req)->retries = 0;
|
||||
nvme_req(req)->flags = 0;
|
||||
req->rq_flags |= RQF_DONTPREP;
|
||||
}
|
||||
|
||||
struct request *nvme_alloc_request(struct request_queue *q,
|
||||
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
|
||||
static inline unsigned int nvme_req_op(struct nvme_command *cmd)
|
||||
{
|
||||
unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
|
||||
struct request *req;
|
||||
return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
|
||||
}
|
||||
|
||||
if (qid == NVME_QID_ANY) {
|
||||
req = blk_mq_alloc_request(q, op, flags);
|
||||
} else {
|
||||
req = blk_mq_alloc_request_hctx(q, op, flags,
|
||||
qid ? qid - 1 : 0);
|
||||
}
|
||||
if (IS_ERR(req))
|
||||
return req;
|
||||
static inline void nvme_init_request(struct request *req,
|
||||
struct nvme_command *cmd)
|
||||
{
|
||||
if (req->q->queuedata)
|
||||
req->timeout = NVME_IO_TIMEOUT;
|
||||
else /* no queuedata implies admin queue */
|
||||
req->timeout = ADMIN_TIMEOUT;
|
||||
|
||||
req->cmd_flags |= REQ_FAILFAST_DRIVER;
|
||||
nvme_clear_nvme_request(req);
|
||||
nvme_req(req)->cmd = cmd;
|
||||
}
|
||||
|
||||
struct request *nvme_alloc_request(struct request_queue *q,
|
||||
struct nvme_command *cmd, blk_mq_req_flags_t flags)
|
||||
{
|
||||
struct request *req;
|
||||
|
||||
req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
|
||||
if (!IS_ERR(req))
|
||||
nvme_init_request(req, cmd);
|
||||
return req;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_alloc_request);
|
||||
|
||||
struct request *nvme_alloc_request_qid(struct request_queue *q,
|
||||
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
|
||||
{
|
||||
struct request *req;
|
||||
|
||||
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
|
||||
qid ? qid - 1 : 0);
|
||||
if (!IS_ERR(req))
|
||||
nvme_init_request(req, cmd);
|
||||
return req;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
|
||||
|
||||
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
|
||||
{
|
||||
struct nvme_command c;
|
||||
@@ -663,7 +681,7 @@ static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
|
||||
req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
|
||||
}
|
||||
|
||||
static void nvme_setup_passthrough(struct request *req,
|
||||
static inline void nvme_setup_passthrough(struct request *req,
|
||||
struct nvme_command *cmd)
|
||||
{
|
||||
memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
|
||||
@@ -834,7 +852,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
||||
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
|
||||
blk_status_t ret = BLK_STS_OK;
|
||||
|
||||
nvme_clear_nvme_request(req);
|
||||
if (!(req->rq_flags & RQF_DONTPREP))
|
||||
nvme_clear_nvme_request(req);
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
switch (req_op(req)) {
|
||||
@@ -923,11 +942,15 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
||||
struct request *req;
|
||||
int ret;
|
||||
|
||||
req = nvme_alloc_request(q, cmd, flags, qid);
|
||||
if (qid == NVME_QID_ANY)
|
||||
req = nvme_alloc_request(q, cmd, flags);
|
||||
else
|
||||
req = nvme_alloc_request_qid(q, cmd, flags, qid);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
|
||||
if (timeout)
|
||||
req->timeout = timeout;
|
||||
|
||||
if (buffer && bufflen) {
|
||||
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
|
||||
@@ -1093,11 +1116,12 @@ static int nvme_submit_user_cmd(struct request_queue *q,
|
||||
void *meta = NULL;
|
||||
int ret;
|
||||
|
||||
req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
|
||||
req = nvme_alloc_request(q, cmd, 0);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
|
||||
if (timeout)
|
||||
req->timeout = timeout;
|
||||
nvme_req(req)->flags |= NVME_REQ_USERCMD;
|
||||
|
||||
if (ubuffer && bufflen) {
|
||||
@@ -1167,8 +1191,8 @@ static int nvme_keep_alive(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
|
||||
NVME_QID_ANY);
|
||||
rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
|
||||
BLK_MQ_REQ_RESERVED);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
@@ -1270,6 +1294,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
|
||||
warn_str, cur->nidl);
|
||||
return -1;
|
||||
}
|
||||
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
|
||||
return NVME_NIDT_EUI64_LEN;
|
||||
memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
|
||||
return NVME_NIDT_EUI64_LEN;
|
||||
case NVME_NIDT_NGUID:
|
||||
@@ -1278,6 +1304,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
|
||||
warn_str, cur->nidl);
|
||||
return -1;
|
||||
}
|
||||
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
|
||||
return NVME_NIDT_NGUID_LEN;
|
||||
memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
|
||||
return NVME_NIDT_NGUID_LEN;
|
||||
case NVME_NIDT_UUID:
|
||||
@@ -1286,6 +1314,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
|
||||
warn_str, cur->nidl);
|
||||
return -1;
|
||||
}
|
||||
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
|
||||
return NVME_NIDT_UUID_LEN;
|
||||
uuid_copy(&ids->uuid, data + sizeof(*cur));
|
||||
return NVME_NIDT_UUID_LEN;
|
||||
case NVME_NIDT_CSI:
|
||||
@@ -1381,12 +1411,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
|
||||
if ((*id)->ncap == 0) /* namespace not allocated or attached */
|
||||
goto out_free_id;
|
||||
|
||||
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
|
||||
!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
|
||||
memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
|
||||
if (ctrl->vs >= NVME_VS(1, 2, 0) &&
|
||||
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
|
||||
memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
|
||||
|
||||
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
|
||||
dev_info(ctrl->device,
|
||||
"Ignoring bogus Namespace Identifiers\n");
|
||||
} else {
|
||||
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
|
||||
!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
|
||||
memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
|
||||
if (ctrl->vs >= NVME_VS(1, 2, 0) &&
|
||||
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
|
||||
memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -2012,7 +2048,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
|
||||
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
|
||||
}
|
||||
blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
|
||||
blk_queue_dma_alignment(q, 7);
|
||||
blk_queue_dma_alignment(q, 3);
|
||||
blk_queue_write_cache(q, vwc, vwc);
|
||||
}
|
||||
|
||||
@@ -2249,18 +2285,21 @@ static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
|
||||
enum pr_type type, bool abort)
|
||||
{
|
||||
u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
|
||||
|
||||
return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
|
||||
}
|
||||
|
||||
static int nvme_pr_clear(struct block_device *bdev, u64 key)
|
||||
{
|
||||
u32 cdw10 = 1 | (key ? 1 << 3 : 0);
|
||||
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
|
||||
u32 cdw10 = 1 | (key ? 0 : 1 << 3);
|
||||
|
||||
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
|
||||
}
|
||||
|
||||
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
|
||||
{
|
||||
u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
|
||||
u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3);
|
||||
|
||||
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
|
||||
}
|
||||
|
||||
@@ -2663,6 +2702,34 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
|
||||
.vid = 0x14a4,
|
||||
.fr = "22301111",
|
||||
.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
|
||||
},
|
||||
{
|
||||
/*
|
||||
* This Kioxia CD6-V Series / HPE PE8030 device times out and
|
||||
* aborts I/O during any load, but more easily reproducible
|
||||
* with discards (fstrim).
|
||||
*
|
||||
* The device is left in a state where it is also not possible
|
||||
* to use "nvme set-feature" to disable APST, but booting with
|
||||
* nvme_core.default_ps_max_latency=0 works.
|
||||
*/
|
||||
.vid = 0x1e0f,
|
||||
.mn = "KCD6XVUL6T40",
|
||||
.quirks = NVME_QUIRK_NO_APST,
|
||||
},
|
||||
{
|
||||
/*
|
||||
* The external Samsung X5 SSD fails initialization without a
|
||||
* delay before checking if it is ready and has a whole set of
|
||||
* other problems. To make this even more interesting, it
|
||||
* shares the PCI ID with internal Samsung 970 Evo Plus that
|
||||
* does not need or want these quirks.
|
||||
*/
|
||||
.vid = 0x144d,
|
||||
.mn = "Samsung Portable SSD X5",
|
||||
.quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
|
||||
NVME_QUIRK_NO_DEEPEST_PS |
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN,
|
||||
}
|
||||
};
|
||||
|
||||
@@ -2801,8 +2868,8 @@ static ssize_t subsys_##field##_show(struct device *dev, \
|
||||
{ \
|
||||
struct nvme_subsystem *subsys = \
|
||||
container_of(dev, struct nvme_subsystem, dev); \
|
||||
return sprintf(buf, "%.*s\n", \
|
||||
(int)sizeof(subsys->field), subsys->field); \
|
||||
return sysfs_emit(buf, "%.*s\n", \
|
||||
(int)sizeof(subsys->field), subsys->field); \
|
||||
} \
|
||||
static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
|
||||
|
||||
@@ -2882,7 +2949,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
||||
nvme_init_subnqn(subsys, ctrl, id);
|
||||
memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
|
||||
memcpy(subsys->model, id->mn, sizeof(subsys->model));
|
||||
memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
|
||||
subsys->vendor_id = le16_to_cpu(id->vid);
|
||||
subsys->cmic = id->cmic;
|
||||
subsys->awupf = le16_to_cpu(id->awupf);
|
||||
@@ -3026,10 +3092,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
if (!ctrl->identified) {
|
||||
int i;
|
||||
|
||||
ret = nvme_init_subsystem(ctrl, id);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
/*
|
||||
* Check for quirks. Quirk can depend on firmware version,
|
||||
* so, in principle, the set of quirks present can change
|
||||
@@ -3042,7 +3104,13 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
if (quirk_matches(id, &core_quirks[i]))
|
||||
ctrl->quirks |= core_quirks[i].quirks;
|
||||
}
|
||||
|
||||
ret = nvme_init_subsystem(ctrl, id);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
}
|
||||
memcpy(ctrl->subsys->firmware_rev, id->fr,
|
||||
sizeof(ctrl->subsys->firmware_rev));
|
||||
|
||||
if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
|
||||
dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
|
||||
@@ -3164,8 +3232,12 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
return ret;
|
||||
|
||||
if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
|
||||
/*
|
||||
* Do not return errors unless we are in a controller reset,
|
||||
* the controller works perfectly fine without hwmon.
|
||||
*/
|
||||
ret = nvme_hwmon_init(ctrl);
|
||||
if (ret < 0)
|
||||
if (ret == -EINTR)
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -3258,11 +3330,17 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
|
||||
case NVME_IOCTL_IO_CMD:
|
||||
return nvme_dev_user_cmd(ctrl, argp);
|
||||
case NVME_IOCTL_RESET:
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
dev_warn(ctrl->device, "resetting controller\n");
|
||||
return nvme_reset_ctrl_sync(ctrl);
|
||||
case NVME_IOCTL_SUBSYS_RESET:
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
return nvme_reset_subsystem(ctrl);
|
||||
case NVME_IOCTL_RESCAN:
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
nvme_queue_scan(ctrl);
|
||||
return 0;
|
||||
default:
|
||||
@@ -3323,13 +3401,13 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
|
||||
int model_len = sizeof(subsys->model);
|
||||
|
||||
if (!uuid_is_null(&ids->uuid))
|
||||
return sprintf(buf, "uuid.%pU\n", &ids->uuid);
|
||||
return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
|
||||
|
||||
if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
|
||||
return sprintf(buf, "eui.%16phN\n", ids->nguid);
|
||||
return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
|
||||
|
||||
if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
|
||||
return sprintf(buf, "eui.%8phN\n", ids->eui64);
|
||||
return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
|
||||
|
||||
while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
|
||||
subsys->serial[serial_len - 1] == '\0'))
|
||||
@@ -3338,7 +3416,7 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
|
||||
subsys->model[model_len - 1] == '\0'))
|
||||
model_len--;
|
||||
|
||||
return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
|
||||
return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
|
||||
serial_len, subsys->serial, model_len, subsys->model,
|
||||
head->ns_id);
|
||||
}
|
||||
@@ -3347,7 +3425,7 @@ static DEVICE_ATTR_RO(wwid);
|
||||
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
|
||||
return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
|
||||
}
|
||||
static DEVICE_ATTR_RO(nguid);
|
||||
|
||||
@@ -3360,25 +3438,25 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
|
||||
* we have no UUID set
|
||||
*/
|
||||
if (uuid_is_null(&ids->uuid)) {
|
||||
printk_ratelimited(KERN_WARNING
|
||||
"No UUID available providing old NGUID\n");
|
||||
return sprintf(buf, "%pU\n", ids->nguid);
|
||||
dev_warn_ratelimited(dev,
|
||||
"No UUID available providing old NGUID\n");
|
||||
return sysfs_emit(buf, "%pU\n", ids->nguid);
|
||||
}
|
||||
return sprintf(buf, "%pU\n", &ids->uuid);
|
||||
return sysfs_emit(buf, "%pU\n", &ids->uuid);
|
||||
}
|
||||
static DEVICE_ATTR_RO(uuid);
|
||||
|
||||
static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
|
||||
return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
|
||||
}
|
||||
static DEVICE_ATTR_RO(eui);
|
||||
|
||||
static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
|
||||
return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
|
||||
}
|
||||
static DEVICE_ATTR_RO(nsid);
|
||||
|
||||
@@ -3443,7 +3521,7 @@ static ssize_t field##_show(struct device *dev, \
|
||||
struct device_attribute *attr, char *buf) \
|
||||
{ \
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
|
||||
return sprintf(buf, "%.*s\n", \
|
||||
return sysfs_emit(buf, "%.*s\n", \
|
||||
(int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
|
||||
} \
|
||||
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
|
||||
@@ -3457,7 +3535,7 @@ static ssize_t field##_show(struct device *dev, \
|
||||
struct device_attribute *attr, char *buf) \
|
||||
{ \
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
|
||||
return sprintf(buf, "%d\n", ctrl->field); \
|
||||
return sysfs_emit(buf, "%d\n", ctrl->field); \
|
||||
} \
|
||||
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
|
||||
|
||||
@@ -3505,9 +3583,9 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
|
||||
|
||||
if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
|
||||
state_name[ctrl->state])
|
||||
return sprintf(buf, "%s\n", state_name[ctrl->state]);
|
||||
return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
|
||||
|
||||
return sprintf(buf, "unknown state\n");
|
||||
return sysfs_emit(buf, "unknown state\n");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
|
||||
@@ -3559,9 +3637,9 @@ static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
|
||||
struct nvmf_ctrl_options *opts = ctrl->opts;
|
||||
|
||||
if (ctrl->opts->max_reconnects == -1)
|
||||
return sprintf(buf, "off\n");
|
||||
return sprintf(buf, "%d\n",
|
||||
opts->max_reconnects * opts->reconnect_delay);
|
||||
return sysfs_emit(buf, "off\n");
|
||||
return sysfs_emit(buf, "%d\n",
|
||||
opts->max_reconnects * opts->reconnect_delay);
|
||||
}
|
||||
|
||||
static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
|
||||
@@ -3591,8 +3669,8 @@ static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
|
||||
if (ctrl->opts->reconnect_delay == -1)
|
||||
return sprintf(buf, "off\n");
|
||||
return sprintf(buf, "%d\n", ctrl->opts->reconnect_delay);
|
||||
return sysfs_emit(buf, "off\n");
|
||||
return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
|
||||
}
|
||||
|
||||
static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
|
||||
@@ -4396,6 +4474,8 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
|
||||
nvme_stop_keep_alive(ctrl);
|
||||
flush_work(&ctrl->async_event_work);
|
||||
cancel_work_sync(&ctrl->fw_act_work);
|
||||
if (ctrl->ops->stop_ctrl)
|
||||
ctrl->ops->stop_ctrl(ctrl);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
|
||||
|
||||
@@ -4408,12 +4488,14 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
|
||||
if (ctrl->queue_count > 1) {
|
||||
nvme_queue_scan(ctrl);
|
||||
nvme_start_queues(ctrl);
|
||||
nvme_mpath_update(ctrl);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
|
||||
|
||||
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
nvme_hwmon_exit(ctrl);
|
||||
nvme_fault_inject_fini(&ctrl->fault_inject);
|
||||
dev_pm_qos_hide_latency_tolerance(ctrl->device);
|
||||
cdev_device_del(&ctrl->cdev, ctrl->device);
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
struct nvme_hwmon_data {
|
||||
struct nvme_ctrl *ctrl;
|
||||
struct nvme_smart_log log;
|
||||
struct nvme_smart_log *log;
|
||||
struct mutex read_lock;
|
||||
};
|
||||
|
||||
@@ -60,14 +60,14 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
|
||||
static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
|
||||
{
|
||||
return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
|
||||
NVME_CSI_NVM, &data->log, sizeof(data->log), 0);
|
||||
NVME_CSI_NVM, data->log, sizeof(*data->log), 0);
|
||||
}
|
||||
|
||||
static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
|
||||
u32 attr, int channel, long *val)
|
||||
{
|
||||
struct nvme_hwmon_data *data = dev_get_drvdata(dev);
|
||||
struct nvme_smart_log *log = &data->log;
|
||||
struct nvme_smart_log *log = data->log;
|
||||
int temp;
|
||||
int err;
|
||||
|
||||
@@ -163,7 +163,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
|
||||
case hwmon_temp_max:
|
||||
case hwmon_temp_min:
|
||||
if ((!channel && data->ctrl->wctemp) ||
|
||||
(channel && data->log.temp_sensor[channel - 1])) {
|
||||
(channel && data->log->temp_sensor[channel - 1])) {
|
||||
if (data->ctrl->quirks &
|
||||
NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
|
||||
return 0444;
|
||||
@@ -176,7 +176,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
|
||||
break;
|
||||
case hwmon_temp_input:
|
||||
case hwmon_temp_label:
|
||||
if (!channel || data->log.temp_sensor[channel - 1])
|
||||
if (!channel || data->log->temp_sensor[channel - 1])
|
||||
return 0444;
|
||||
break;
|
||||
default:
|
||||
@@ -223,33 +223,57 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = {
|
||||
|
||||
int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct device *dev = ctrl->dev;
|
||||
struct device *dev = ctrl->device;
|
||||
struct nvme_hwmon_data *data;
|
||||
struct device *hwmon;
|
||||
int err;
|
||||
|
||||
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return 0;
|
||||
return -ENOMEM;
|
||||
|
||||
data->log = kzalloc(sizeof(*data->log), GFP_KERNEL);
|
||||
if (!data->log) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_data;
|
||||
}
|
||||
|
||||
data->ctrl = ctrl;
|
||||
mutex_init(&data->read_lock);
|
||||
|
||||
err = nvme_hwmon_get_smart_log(data);
|
||||
if (err) {
|
||||
dev_warn(ctrl->device,
|
||||
"Failed to read smart log (error %d)\n", err);
|
||||
devm_kfree(dev, data);
|
||||
return err;
|
||||
dev_warn(dev, "Failed to read smart log (error %d)\n", err);
|
||||
goto err_free_log;
|
||||
}
|
||||
|
||||
hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
|
||||
&nvme_hwmon_chip_info,
|
||||
NULL);
|
||||
hwmon = hwmon_device_register_with_info(dev, "nvme",
|
||||
data, &nvme_hwmon_chip_info,
|
||||
NULL);
|
||||
if (IS_ERR(hwmon)) {
|
||||
dev_warn(dev, "Failed to instantiate hwmon device\n");
|
||||
devm_kfree(dev, data);
|
||||
err = PTR_ERR(hwmon);
|
||||
goto err_free_log;
|
||||
}
|
||||
|
||||
ctrl->hwmon_device = hwmon;
|
||||
return 0;
|
||||
|
||||
err_free_log:
|
||||
kfree(data->log);
|
||||
err_free_data:
|
||||
kfree(data);
|
||||
return err;
|
||||
}
|
||||
|
||||
void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
if (ctrl->hwmon_device) {
|
||||
struct nvme_hwmon_data *data =
|
||||
dev_get_drvdata(ctrl->hwmon_device);
|
||||
|
||||
hwmon_device_unregister(ctrl->hwmon_device);
|
||||
ctrl->hwmon_device = NULL;
|
||||
kfree(data->log);
|
||||
kfree(data);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -653,7 +653,7 @@ static struct request *nvme_nvm_alloc_request(struct request_queue *q,
|
||||
|
||||
nvme_nvm_rqtocmd(rqd, ns, cmd);
|
||||
|
||||
rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
|
||||
rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0);
|
||||
if (IS_ERR(rq))
|
||||
return rq;
|
||||
|
||||
@@ -767,14 +767,14 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
int ret = 0;
|
||||
|
||||
rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
|
||||
NVME_QID_ANY);
|
||||
rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0);
|
||||
if (IS_ERR(rq)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_cmd;
|
||||
}
|
||||
|
||||
rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
|
||||
if (timeout)
|
||||
rq->timeout = timeout;
|
||||
|
||||
if (ppa_buf && ppa_len) {
|
||||
ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
|
||||
|
||||
@@ -484,8 +484,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
|
||||
ns->ana_grpid = le32_to_cpu(desc->grpid);
|
||||
ns->ana_state = desc->state;
|
||||
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
|
||||
|
||||
if (nvme_state_is_live(ns->ana_state))
|
||||
/*
|
||||
* nvme_mpath_set_live() will trigger I/O to the multipath path device
|
||||
* and in turn to this path device. However we cannot accept this I/O
|
||||
* if the controller is not live. This may deadlock if called from
|
||||
* nvme_mpath_init_identify() and the ctrl will never complete
|
||||
* initialization, preventing I/O from completing. For this case we
|
||||
* will reprocess the ANA log page in nvme_mpath_update() once the
|
||||
* controller is ready.
|
||||
*/
|
||||
if (nvme_state_is_live(ns->ana_state) &&
|
||||
ns->ctrl->state == NVME_CTRL_LIVE)
|
||||
nvme_mpath_set_live(ns);
|
||||
}
|
||||
|
||||
@@ -572,6 +581,18 @@ static void nvme_ana_work(struct work_struct *work)
|
||||
nvme_read_ana_log(ctrl);
|
||||
}
|
||||
|
||||
void nvme_mpath_update(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
u32 nr_change_groups = 0;
|
||||
|
||||
if (!ctrl->ana_log_buf)
|
||||
return;
|
||||
|
||||
mutex_lock(&ctrl->ana_lock);
|
||||
nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
|
||||
mutex_unlock(&ctrl->ana_lock);
|
||||
}
|
||||
|
||||
static void nvme_anatt_timeout(struct timer_list *t)
|
||||
{
|
||||
struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
|
||||
@@ -603,8 +624,8 @@ static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
|
||||
struct nvme_subsystem *subsys =
|
||||
container_of(dev, struct nvme_subsystem, dev);
|
||||
|
||||
return sprintf(buf, "%s\n",
|
||||
nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
|
||||
return sysfs_emit(buf, "%s\n",
|
||||
nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
|
||||
}
|
||||
|
||||
static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
|
||||
@@ -629,7 +650,7 @@ SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
|
||||
static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
|
||||
return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
|
||||
}
|
||||
DEVICE_ATTR_RO(ana_grpid);
|
||||
|
||||
@@ -638,7 +659,7 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
|
||||
{
|
||||
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
|
||||
|
||||
return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
|
||||
return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
|
||||
}
|
||||
DEVICE_ATTR_RO(ana_state);
|
||||
|
||||
|
||||
@@ -150,6 +150,11 @@ enum nvme_quirks {
|
||||
* encoding the generation sequence number.
|
||||
*/
|
||||
NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
|
||||
|
||||
/*
|
||||
* Reports garbage in the namespace identifiers (eui64, nguid, uuid).
|
||||
*/
|
||||
NVME_QUIRK_BOGUS_NID = (1 << 18),
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -252,6 +257,9 @@ struct nvme_ctrl {
|
||||
struct rw_semaphore namespaces_rwsem;
|
||||
struct device ctrl_device;
|
||||
struct device *device; /* char device */
|
||||
#ifdef CONFIG_NVME_HWMON
|
||||
struct device *hwmon_device;
|
||||
#endif
|
||||
struct cdev cdev;
|
||||
struct work_struct reset_work;
|
||||
struct work_struct delete_work;
|
||||
@@ -473,6 +481,7 @@ struct nvme_ctrl_ops {
|
||||
void (*free_ctrl)(struct nvme_ctrl *ctrl);
|
||||
void (*submit_async_event)(struct nvme_ctrl *ctrl);
|
||||
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
|
||||
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
|
||||
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
|
||||
};
|
||||
|
||||
@@ -535,11 +544,23 @@ static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
|
||||
static inline void nvme_should_fail(struct request *req) {}
|
||||
#endif
|
||||
|
||||
bool nvme_wait_reset(struct nvme_ctrl *ctrl);
|
||||
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
|
||||
|
||||
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!ctrl->subsystem)
|
||||
return -ENOTTY;
|
||||
return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
|
||||
if (!nvme_wait_reset(ctrl))
|
||||
return -EBUSY;
|
||||
|
||||
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return nvme_try_sched_reset(ctrl);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -626,7 +647,6 @@ void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
|
||||
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
|
||||
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
||||
enum nvme_ctrl_state new_state);
|
||||
bool nvme_wait_reset(struct nvme_ctrl *ctrl);
|
||||
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
|
||||
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
|
||||
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
|
||||
@@ -657,6 +677,8 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
|
||||
|
||||
#define NVME_QID_ANY -1
|
||||
struct request *nvme_alloc_request(struct request_queue *q,
|
||||
struct nvme_command *cmd, blk_mq_req_flags_t flags);
|
||||
struct request *nvme_alloc_request_qid(struct request_queue *q,
|
||||
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
|
||||
void nvme_cleanup_cmd(struct request *req);
|
||||
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
||||
@@ -677,7 +699,6 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
|
||||
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
|
||||
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
|
||||
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
|
||||
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
|
||||
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
|
||||
|
||||
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
|
||||
@@ -707,6 +728,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
|
||||
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
|
||||
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
|
||||
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
|
||||
void nvme_mpath_update(struct nvme_ctrl *ctrl);
|
||||
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
|
||||
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
|
||||
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
|
||||
@@ -793,6 +815,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
|
||||
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
|
||||
return 0;
|
||||
}
|
||||
static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
}
|
||||
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
}
|
||||
@@ -864,11 +889,16 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
|
||||
|
||||
#ifdef CONFIG_NVME_HWMON
|
||||
int nvme_hwmon_init(struct nvme_ctrl *ctrl);
|
||||
void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
|
||||
#else
|
||||
static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
|
||||
@@ -224,6 +224,7 @@ struct nvme_queue {
|
||||
*/
|
||||
struct nvme_iod {
|
||||
struct nvme_request req;
|
||||
struct nvme_command cmd;
|
||||
struct nvme_queue *nvmeq;
|
||||
bool use_sgl;
|
||||
int aborted;
|
||||
@@ -816,6 +817,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
|
||||
cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
|
||||
if (bv->bv_len > first_prp_len)
|
||||
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
|
||||
else
|
||||
cmnd->dptr.prp2 = 0;
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
@@ -917,7 +920,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
struct nvme_dev *dev = nvmeq->dev;
|
||||
struct request *req = bd->rq;
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
struct nvme_command cmnd;
|
||||
struct nvme_command *cmnd = &iod->cmd;
|
||||
blk_status_t ret;
|
||||
|
||||
iod->aborted = 0;
|
||||
@@ -931,24 +934,24 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
ret = nvme_setup_cmd(ns, req, &cmnd);
|
||||
ret = nvme_setup_cmd(ns, req, cmnd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (blk_rq_nr_phys_segments(req)) {
|
||||
ret = nvme_map_data(dev, req, &cmnd);
|
||||
ret = nvme_map_data(dev, req, cmnd);
|
||||
if (ret)
|
||||
goto out_free_cmd;
|
||||
}
|
||||
|
||||
if (blk_integrity_rq(req)) {
|
||||
ret = nvme_map_metadata(dev, req, &cmnd);
|
||||
ret = nvme_map_metadata(dev, req, cmnd);
|
||||
if (ret)
|
||||
goto out_unmap_data;
|
||||
}
|
||||
|
||||
blk_mq_start_request(req);
|
||||
nvme_submit_cmd(nvmeq, &cmnd, bd->last);
|
||||
nvme_submit_cmd(nvmeq, cmnd, bd->last);
|
||||
return BLK_STS_OK;
|
||||
out_unmap_data:
|
||||
nvme_unmap_data(dev, req);
|
||||
@@ -1350,13 +1353,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
||||
req->tag, nvmeq->qid);
|
||||
|
||||
abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
|
||||
BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
|
||||
BLK_MQ_REQ_NOWAIT);
|
||||
if (IS_ERR(abort_req)) {
|
||||
atomic_inc(&dev->ctrl.abort_limit);
|
||||
return BLK_EH_RESET_TIMER;
|
||||
}
|
||||
|
||||
abort_req->timeout = ADMIN_TIMEOUT;
|
||||
abort_req->end_io_data = NULL;
|
||||
blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
|
||||
|
||||
@@ -1666,6 +1668,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
|
||||
dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
|
||||
if (IS_ERR(dev->ctrl.admin_q)) {
|
||||
blk_mq_free_tag_set(&dev->admin_tagset);
|
||||
dev->ctrl.admin_q = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!blk_get_queue(dev->ctrl.admin_q)) {
|
||||
@@ -2278,11 +2281,10 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
|
||||
cmd.delete_queue.opcode = opcode;
|
||||
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
|
||||
|
||||
req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
|
||||
req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
req->timeout = ADMIN_TIMEOUT;
|
||||
req->end_io_data = nvmeq;
|
||||
|
||||
init_completion(&nvmeq->delete_done);
|
||||
@@ -2624,6 +2626,8 @@ static void nvme_reset_work(struct work_struct *work)
|
||||
if (result)
|
||||
goto out_unlock;
|
||||
|
||||
dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
|
||||
|
||||
/*
|
||||
* Limit the max command size to prevent iod->sg allocations going
|
||||
* over a single page.
|
||||
@@ -2636,7 +2640,6 @@ static void nvme_reset_work(struct work_struct *work)
|
||||
* Don't limit the IOMMU merged segment size.
|
||||
*/
|
||||
dma_set_max_seg_size(dev->dev, 0xffffffff);
|
||||
dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
|
||||
|
||||
mutex_unlock(&dev->shutdown_lock);
|
||||
|
||||
@@ -3212,7 +3215,10 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
|
||||
.driver_data = NVME_QUIRK_IDENTIFY_CNS |
|
||||
NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
NVME_QUIRK_DISABLE_WRITE_ZEROES |
|
||||
NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
|
||||
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
|
||||
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
|
||||
@@ -3231,7 +3237,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
NVME_QUIRK_DISABLE_WRITE_ZEROES|
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
|
||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
|
||||
NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
|
||||
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
@@ -3242,10 +3249,15 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
{ PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
|
||||
.driver_data = NVME_QUIRK_LIGHTNVM, },
|
||||
{ PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
|
||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
|
||||
NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN },
|
||||
{ PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
|
||||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
|
||||
@@ -3262,7 +3274,6 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
NVME_QUIRK_128_BYTES_SQES |
|
||||
NVME_QUIRK_SHARED_TAGS |
|
||||
NVME_QUIRK_SKIP_CID_GEN },
|
||||
|
||||
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
@@ -1057,6 +1057,14 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
|
||||
}
|
||||
}
|
||||
|
||||
static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
|
||||
{
|
||||
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
|
||||
|
||||
cancel_work_sync(&ctrl->err_work);
|
||||
cancel_delayed_work_sync(&ctrl->reconnect_work);
|
||||
}
|
||||
|
||||
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
|
||||
{
|
||||
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
|
||||
@@ -2236,9 +2244,6 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
|
||||
|
||||
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
|
||||
{
|
||||
cancel_work_sync(&ctrl->err_work);
|
||||
cancel_delayed_work_sync(&ctrl->reconnect_work);
|
||||
|
||||
nvme_rdma_teardown_io_queues(ctrl, shutdown);
|
||||
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
|
||||
if (shutdown)
|
||||
@@ -2288,6 +2293,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
|
||||
.submit_async_event = nvme_rdma_submit_async_event,
|
||||
.delete_ctrl = nvme_rdma_delete_ctrl,
|
||||
.get_address = nvmf_get_address,
|
||||
.stop_ctrl = nvme_rdma_stop_ctrl,
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -118,7 +118,6 @@ struct nvme_tcp_queue {
|
||||
struct mutex send_mutex;
|
||||
struct llist_head req_list;
|
||||
struct list_head send_list;
|
||||
bool more_requests;
|
||||
|
||||
/* recv state */
|
||||
void *pdu;
|
||||
@@ -314,7 +313,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
|
||||
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
return !list_empty(&queue->send_list) ||
|
||||
!llist_empty(&queue->req_list) || queue->more_requests;
|
||||
!llist_empty(&queue->req_list);
|
||||
}
|
||||
|
||||
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
||||
@@ -333,9 +332,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
||||
*/
|
||||
if (queue->io_cpu == raw_smp_processor_id() &&
|
||||
sync && empty && mutex_trylock(&queue->send_mutex)) {
|
||||
queue->more_requests = !last;
|
||||
nvme_tcp_send_all(queue);
|
||||
queue->more_requests = false;
|
||||
mutex_unlock(&queue->send_mutex);
|
||||
}
|
||||
|
||||
@@ -1149,8 +1146,7 @@ done:
|
||||
} else if (ret < 0) {
|
||||
dev_err(queue->ctrl->ctrl.device,
|
||||
"failed to send request %d\n", ret);
|
||||
if (ret != -EPIPE && ret != -ECONNRESET)
|
||||
nvme_tcp_fail_request(queue->request);
|
||||
nvme_tcp_fail_request(queue->request);
|
||||
nvme_tcp_done_send_req(queue);
|
||||
}
|
||||
return ret;
|
||||
@@ -1197,7 +1193,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
|
||||
else if (unlikely(result < 0))
|
||||
return;
|
||||
|
||||
if (!pending)
|
||||
if (!pending || !queue->rd_enabled)
|
||||
return;
|
||||
|
||||
} while (!time_after(jiffies, deadline)); /* quota is exhausted */
|
||||
@@ -2136,9 +2132,6 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
|
||||
|
||||
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
|
||||
{
|
||||
cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
|
||||
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
|
||||
|
||||
nvme_tcp_teardown_io_queues(ctrl, shutdown);
|
||||
blk_mq_quiesce_queue(ctrl->admin_q);
|
||||
if (shutdown)
|
||||
@@ -2178,6 +2171,12 @@ out_fail:
|
||||
nvme_tcp_reconnect_or_remove(ctrl);
|
||||
}
|
||||
|
||||
static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
|
||||
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
|
||||
}
|
||||
|
||||
static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
|
||||
{
|
||||
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
|
||||
@@ -2500,6 +2499,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
|
||||
.submit_async_event = nvme_tcp_submit_async_event,
|
||||
.delete_ctrl = nvme_tcp_delete_ctrl,
|
||||
.get_address = nvmf_get_address,
|
||||
.stop_ctrl = nvme_tcp_stop_ctrl,
|
||||
};
|
||||
|
||||
static bool
|
||||
|
||||
@@ -98,7 +98,7 @@ TRACE_EVENT(nvme_complete_rq,
|
||||
TP_fast_assign(
|
||||
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
|
||||
__entry->qid = nvme_req_qid(req);
|
||||
__entry->cid = req->tag;
|
||||
__entry->cid = nvme_req(req)->cmd->common.command_id;
|
||||
__entry->result = le64_to_cpu(nvme_req(req)->result.u64);
|
||||
__entry->retries = nvme_req(req)->retries;
|
||||
__entry->flags = nvme_req(req)->flags;
|
||||
|
||||
Reference in New Issue
Block a user