1
0
Fork 0

Merge branch 'nvme-5.3' of git://git.infradead.org/nvme into for-linus

Pull NVMe fixes from Christoph:

"Lof of fixes all over the place, and two very minor features that
 were in the nvme tree by the end of the merge window, but hadn't made
 it out to Jens yet."

* 'nvme-5.3' of git://git.infradead.org/nvme:
  nvme: fix regression upon hot device removal and insertion
  nvme-fc: fix module unloads while lports still pending
  nvme-tcp: don't use sendpage for SLAB pages
  nvme-tcp: set the STABLE_WRITES flag when data digests are enabled
  nvmet: print a hint while rejecting NSID 0 or 0xffffffff
  nvme-multipath: do not select namespaces which are about to be removed
  nvme-multipath: also check for a disabled path if there is a single sibling
  nvme-multipath: factor out a nvme_path_is_disabled helper
  nvme: set physical block size and optimal I/O size
  nvme: add I/O characteristics fields
  nvmet: export I/O characteristics attributes in Identify
  nvme-trace: add delete completion and submission queue to admin cmds tracer
  nvme-trace: fix spelling mistake "spcecific" -> "specific"
  nvme-pci: limit max_hw_sectors based on the DMA max mapping size
  nvme-pci: check for NULL return from pci_alloc_p2pmem()
  nvme-pci: don't create a read hctx mapping without read queues
  nvme-pci: don't fall back to a 32-bit DMA mask
  nvme-pci: make nvme_dev_pm_ops static
  nvme-fcloop: resolve warnings on RCU usage and sleep warnings
  nvme-fcloop: fix inconsistent lock state warnings
alistair/sunxi64-5.4-dsi
Jens Axboe 2019-07-11 08:12:31 -06:00
commit b740306607
14 changed files with 237 additions and 51 deletions

View File

@ -11,6 +11,7 @@
#include <linux/hdreg.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/list_sort.h>
#include <linux/slab.h>
#include <linux/types.h>
@ -1626,6 +1627,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
{
sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9);
unsigned short bs = 1 << ns->lba_shift;
u32 atomic_bs, phys_bs, io_opt;
if (ns->lba_shift > PAGE_SHIFT) {
/* unsupported block size, set capacity to 0 later */
@ -1634,9 +1636,37 @@ static void nvme_update_disk_info(struct gendisk *disk,
blk_mq_freeze_queue(disk->queue);
blk_integrity_unregister(disk);
if (id->nabo == 0) {
/*
* Bit 1 indicates whether NAWUPF is defined for this namespace
* and whether it should be used instead of AWUPF. If NAWUPF ==
* 0 then AWUPF must be used instead.
*/
if (id->nsfeat & (1 << 1) && id->nawupf)
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
else
atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
} else {
atomic_bs = bs;
}
phys_bs = bs;
io_opt = bs;
if (id->nsfeat & (1 << 4)) {
/* NPWG = Namespace Preferred Write Granularity */
phys_bs *= 1 + le16_to_cpu(id->npwg);
/* NOWS = Namespace Optimal Write Size */
io_opt *= 1 + le16_to_cpu(id->nows);
}
blk_queue_logical_block_size(disk->queue, bs);
blk_queue_physical_block_size(disk->queue, bs);
blk_queue_io_min(disk->queue, bs);
/*
* Linux filesystems assume writing a single physical block is
* an atomic operation. Hence limit the physical block size to the
* value of the Atomic Write Unit Power Fail parameter.
*/
blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
blk_queue_io_min(disk->queue, phys_bs);
blk_queue_io_opt(disk->queue, io_opt);
if (ns->ms && !ns->ext &&
(ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
@ -2386,8 +2416,8 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
lockdep_assert_held(&nvme_subsystems_lock);
list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
if (ctrl->state == NVME_CTRL_DELETING ||
ctrl->state == NVME_CTRL_DEAD)
if (tmp->state == NVME_CTRL_DELETING ||
tmp->state == NVME_CTRL_DEAD)
continue;
if (tmp->cntlid == ctrl->cntlid) {
@ -2433,6 +2463,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
subsys->vendor_id = le16_to_cpu(id->vid);
subsys->cmic = id->cmic;
subsys->awupf = le16_to_cpu(id->awupf);
#ifdef CONFIG_NVME_MULTIPATH
subsys->iopolicy = NVME_IOPOLICY_NUMA;
#endif
@ -3274,6 +3305,10 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
goto out_free_ns;
}
if (ctrl->opts->data_digest)
ns->queue->backing_dev_info->capabilities
|= BDI_CAP_STABLE_WRITES;
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);

View File

@ -204,6 +204,9 @@ static DEFINE_IDA(nvme_fc_ctrl_cnt);
static struct workqueue_struct *nvme_fc_wq;
static bool nvme_fc_waiting_to_unload;
static DECLARE_COMPLETION(nvme_fc_unload_proceed);
/*
* These items are short-term. They will eventually be moved into
* a generic FC class. See comments in module init.
@ -229,6 +232,8 @@ nvme_fc_free_lport(struct kref *ref)
/* remove from transport list */
spin_lock_irqsave(&nvme_fc_lock, flags);
list_del(&lport->port_list);
if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
complete(&nvme_fc_unload_proceed);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
@ -3456,11 +3461,51 @@ out_destroy_wq:
return ret;
}
static void
nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
{
struct nvme_fc_ctrl *ctrl;
spin_lock(&rport->lock);
list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport unloading: deleting ctrl\n",
ctrl->cnum);
nvme_delete_ctrl(&ctrl->ctrl);
}
spin_unlock(&rport->lock);
}
static void
nvme_fc_cleanup_for_unload(void)
{
struct nvme_fc_lport *lport;
struct nvme_fc_rport *rport;
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
list_for_each_entry(rport, &lport->endp_list, endp_list) {
nvme_fc_delete_controllers(rport);
}
}
}
static void __exit nvme_fc_exit_module(void)
{
/* sanity check - all lports should be removed */
if (!list_empty(&nvme_fc_lport_list))
pr_warn("%s: localport list not empty\n", __func__);
unsigned long flags;
bool need_cleanup = false;
spin_lock_irqsave(&nvme_fc_lock, flags);
nvme_fc_waiting_to_unload = true;
if (!list_empty(&nvme_fc_lport_list)) {
need_cleanup = true;
nvme_fc_cleanup_for_unload();
}
spin_unlock_irqrestore(&nvme_fc_lock, flags);
if (need_cleanup) {
pr_info("%s: waiting for ctlr deletes\n", __func__);
wait_for_completion(&nvme_fc_unload_proceed);
pr_info("%s: ctrl deletes complete\n", __func__);
}
nvmf_unregister_transport(&nvme_fc_transport);

View File

@ -123,14 +123,20 @@ void nvme_mpath_clear_current_path(struct nvme_ns *ns)
}
}
static bool nvme_path_is_disabled(struct nvme_ns *ns)
{
return ns->ctrl->state != NVME_CTRL_LIVE ||
test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
test_bit(NVME_NS_REMOVING, &ns->flags);
}
static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
{
int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
struct nvme_ns *found = NULL, *fallback = NULL, *ns;
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (ns->ctrl->state != NVME_CTRL_LIVE ||
test_bit(NVME_NS_ANA_PENDING, &ns->flags))
if (nvme_path_is_disabled(ns))
continue;
if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
@ -178,14 +184,16 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
{
struct nvme_ns *ns, *found, *fallback = NULL;
if (list_is_singular(&head->list))
if (list_is_singular(&head->list)) {
if (nvme_path_is_disabled(old))
return NULL;
return old;
}
for (ns = nvme_next_ns(head, old);
ns != old;
ns = nvme_next_ns(head, ns)) {
if (ns->ctrl->state != NVME_CTRL_LIVE ||
test_bit(NVME_NS_ANA_PENDING, &ns->flags))
if (nvme_path_is_disabled(ns))
continue;
if (ns->ana_state == NVME_ANA_OPTIMIZED) {

View File

@ -283,6 +283,7 @@ struct nvme_subsystem {
char firmware_rev[8];
u8 cmic;
u16 vendor_id;
u16 awupf; /* 0's based awupf value. */
struct ida ns_ida;
#ifdef CONFIG_NVME_MULTIPATH
enum nvme_iopolicy iopolicy;

View File

@ -1439,11 +1439,15 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
nvmeq->sq_cmds);
if (nvmeq->sq_dma_addr) {
set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
return 0;
if (nvmeq->sq_cmds) {
nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
nvmeq->sq_cmds);
if (nvmeq->sq_dma_addr) {
set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
return 0;
}
pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(depth));
}
}
@ -2250,7 +2254,9 @@ static int nvme_dev_add(struct nvme_dev *dev)
if (!dev->ctrl.tagset) {
dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1;
dev->tagset.nr_maps = 2; /* default + read */
dev->tagset.nr_maps = 1; /* default */
if (dev->io_queues[HCTX_TYPE_READ])
dev->tagset.nr_maps++;
if (dev->io_queues[HCTX_TYPE_POLL])
dev->tagset.nr_maps++;
dev->tagset.timeout = NVME_IO_TIMEOUT;
@ -2289,8 +2295,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
pci_set_master(pdev);
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)))
goto disable;
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
@ -2498,7 +2503,8 @@ static void nvme_reset_work(struct work_struct *work)
* Limit the max command size to prevent iod->sg allocations going
* over a single page.
*/
dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
dev->ctrl.max_hw_sectors = min_t(u32,
NVME_MAX_KB_SZ << 1, dma_max_mapping_size(dev->dev) >> 9);
dev->ctrl.max_segments = NVME_MAX_SEGS;
/*
@ -2923,7 +2929,7 @@ static int nvme_simple_resume(struct device *dev)
return 0;
}
const struct dev_pm_ops nvme_dev_pm_ops = {
static const struct dev_pm_ops nvme_dev_pm_ops = {
.suspend = nvme_suspend,
.resume = nvme_resume,
.freeze = nvme_simple_suspend,

View File

@ -860,7 +860,14 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
else
flags |= MSG_MORE;
ret = kernel_sendpage(queue->sock, page, offset, len, flags);
/* can't zcopy slab pages */
if (unlikely(PageSlab(page))) {
ret = sock_no_sendpage(queue->sock, page, offset, len,
flags);
} else {
ret = kernel_sendpage(queue->sock, page, offset, len,
flags);
}
if (ret <= 0)
return ret;

View File

@ -7,6 +7,17 @@
#include <asm/unaligned.h>
#include "trace.h"
static const char *nvme_trace_delete_sq(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
u16 sqid = get_unaligned_le16(cdw10);
trace_seq_printf(p, "sqid=%u", sqid);
trace_seq_putc(p, 0);
return ret;
}
static const char *nvme_trace_create_sq(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
@ -23,6 +34,17 @@ static const char *nvme_trace_create_sq(struct trace_seq *p, u8 *cdw10)
return ret;
}
static const char *nvme_trace_delete_cq(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
u16 cqid = get_unaligned_le16(cdw10);
trace_seq_printf(p, "cqid=%u", cqid);
trace_seq_putc(p, 0);
return ret;
}
static const char *nvme_trace_create_cq(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
@ -107,8 +129,12 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
u8 opcode, u8 *cdw10)
{
switch (opcode) {
case nvme_admin_delete_sq:
return nvme_trace_delete_sq(p, cdw10);
case nvme_admin_create_sq:
return nvme_trace_create_sq(p, cdw10);
case nvme_admin_delete_cq:
return nvme_trace_delete_cq(p, cdw10);
case nvme_admin_create_cq:
return nvme_trace_create_cq(p, cdw10);
case nvme_admin_identify:
@ -178,7 +204,7 @@ static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_printf(p, "spcecific=%*ph", 24, spc);
trace_seq_printf(p, "specific=%*ph", 24, spc);
trace_seq_putc(p, 0);
return ret;
}

View File

@ -442,6 +442,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
break;
}
if (ns->bdev)
nvmet_bdev_set_limits(ns->bdev, id);
/*
* We just provide a single LBA format that matches what the
* underlying device reports.

View File

@ -588,8 +588,10 @@ static struct config_group *nvmet_ns_make(struct config_group *group,
goto out;
ret = -EINVAL;
if (nsid == 0 || nsid == NVME_NSID_ALL)
if (nsid == 0 || nsid == NVME_NSID_ALL) {
pr_err("invalid nsid %#x", nsid);
goto out;
}
ret = -ENOMEM;
ns = nvmet_ns_alloc(subsys, nsid);

View File

@ -434,7 +434,7 @@ fcloop_fcp_recv_work(struct work_struct *work)
int ret = 0;
bool aborted = false;
spin_lock(&tfcp_req->reqlock);
spin_lock_irq(&tfcp_req->reqlock);
switch (tfcp_req->inistate) {
case INI_IO_START:
tfcp_req->inistate = INI_IO_ACTIVE;
@ -443,11 +443,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
aborted = true;
break;
default:
spin_unlock(&tfcp_req->reqlock);
spin_unlock_irq(&tfcp_req->reqlock);
WARN_ON(1);
return;
}
spin_unlock(&tfcp_req->reqlock);
spin_unlock_irq(&tfcp_req->reqlock);
if (unlikely(aborted))
ret = -ECANCELED;
@ -469,7 +469,7 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
struct nvmefc_fcp_req *fcpreq;
bool completed = false;
spin_lock(&tfcp_req->reqlock);
spin_lock_irq(&tfcp_req->reqlock);
fcpreq = tfcp_req->fcpreq;
switch (tfcp_req->inistate) {
case INI_IO_ABORTED:
@ -478,11 +478,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
completed = true;
break;
default:
spin_unlock(&tfcp_req->reqlock);
spin_unlock_irq(&tfcp_req->reqlock);
WARN_ON(1);
return;
}
spin_unlock(&tfcp_req->reqlock);
spin_unlock_irq(&tfcp_req->reqlock);
if (unlikely(completed)) {
/* remove reference taken in original abort downcall */
@ -494,9 +494,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
&tfcp_req->tgt_fcp_req);
spin_lock(&tfcp_req->reqlock);
spin_lock_irq(&tfcp_req->reqlock);
tfcp_req->fcpreq = NULL;
spin_unlock(&tfcp_req->reqlock);
spin_unlock_irq(&tfcp_req->reqlock);
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
/* call_host_done releases reference for abort downcall */
@ -513,10 +513,10 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
container_of(work, struct fcloop_fcpreq, tio_done_work);
struct nvmefc_fcp_req *fcpreq;
spin_lock(&tfcp_req->reqlock);
spin_lock_irq(&tfcp_req->reqlock);
fcpreq = tfcp_req->fcpreq;
tfcp_req->inistate = INI_IO_COMPLETED;
spin_unlock(&tfcp_req->reqlock);
spin_unlock_irq(&tfcp_req->reqlock);
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
}
@ -535,7 +535,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
if (!rport->targetport)
return -ECONNREFUSED;
tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
if (!tfcp_req)
return -ENOMEM;
@ -621,12 +621,12 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
int fcp_err = 0, active, aborted;
u8 op = tgt_fcpreq->op;
spin_lock(&tfcp_req->reqlock);
spin_lock_irq(&tfcp_req->reqlock);
fcpreq = tfcp_req->fcpreq;
active = tfcp_req->active;
aborted = tfcp_req->aborted;
tfcp_req->active = true;
spin_unlock(&tfcp_req->reqlock);
spin_unlock_irq(&tfcp_req->reqlock);
if (unlikely(active))
/* illegal - call while i/o active */
@ -634,9 +634,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
if (unlikely(aborted)) {
/* target transport has aborted i/o prior */
spin_lock(&tfcp_req->reqlock);
spin_lock_irq(&tfcp_req->reqlock);
tfcp_req->active = false;
spin_unlock(&tfcp_req->reqlock);
spin_unlock_irq(&tfcp_req->reqlock);
tgt_fcpreq->transferred_length = 0;
tgt_fcpreq->fcp_error = -ECANCELED;
tgt_fcpreq->done(tgt_fcpreq);
@ -693,9 +693,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
break;
}
spin_lock(&tfcp_req->reqlock);
spin_lock_irq(&tfcp_req->reqlock);
tfcp_req->active = false;
spin_unlock(&tfcp_req->reqlock);
spin_unlock_irq(&tfcp_req->reqlock);
tgt_fcpreq->transferred_length = xfrlen;
tgt_fcpreq->fcp_error = fcp_err;
@ -715,9 +715,9 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
* (one doing io, other doing abort) and only kills ops posted
* after the abort request
*/
spin_lock(&tfcp_req->reqlock);
spin_lock_irq(&tfcp_req->reqlock);
tfcp_req->aborted = true;
spin_unlock(&tfcp_req->reqlock);
spin_unlock_irq(&tfcp_req->reqlock);
tfcp_req->status = NVME_SC_INTERNAL;
@ -765,7 +765,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
return;
/* break initiator/target relationship for io */
spin_lock(&tfcp_req->reqlock);
spin_lock_irq(&tfcp_req->reqlock);
switch (tfcp_req->inistate) {
case INI_IO_START:
case INI_IO_ACTIVE:
@ -775,11 +775,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
abortio = false;
break;
default:
spin_unlock(&tfcp_req->reqlock);
spin_unlock_irq(&tfcp_req->reqlock);
WARN_ON(1);
return;
}
spin_unlock(&tfcp_req->reqlock);
spin_unlock_irq(&tfcp_req->reqlock);
if (abortio)
/* leave the reference while the work item is scheduled */

View File

@ -8,6 +8,45 @@
#include <linux/module.h>
#include "nvmet.h"
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
{
const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
/* Number of physical blocks per logical block. */
const u32 ppl = ql->physical_block_size / ql->logical_block_size;
/* Physical blocks per logical block, 0's based. */
const __le16 ppl0b = to0based(ppl);
/*
* For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
* NAWUPF, and NACWU are defined for this namespace and should be
* used by the host for this namespace instead of the AWUN, AWUPF,
* and ACWU fields in the Identify Controller data structure. If
* any of these fields are zero that means that the corresponding
* field from the identify controller data structure should be used.
*/
id->nsfeat |= 1 << 1;
id->nawun = ppl0b;
id->nawupf = ppl0b;
id->nacwu = ppl0b;
/*
* Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
* NOWS are defined for this namespace and should be used by
* the host for I/O optimization.
*/
id->nsfeat |= 1 << 4;
/* NPWG = Namespace Preferred Write Granularity. 0's based */
id->npwg = ppl0b;
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
id->npdg = to0based(ql->discard_granularity / ql->logical_block_size);
/* NPDG = Namespace Preferred Deallocate Alignment */
id->npda = id->npdg;
/* NOWS = Namespace Optimal Write Size */
id->nows = to0based(ql->io_opt / ql->logical_block_size);
}
int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
{
int ret;

View File

@ -365,6 +365,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
void nvmet_execute_async_event(struct nvmet_req *req);
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
@ -492,4 +493,11 @@ static inline u32 nvmet_rw_len(struct nvmet_req *req)
}
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
/* Convert a 32-bit number to a 16-bit 0's based number */
static inline __le16 to0based(u32 a)
{
return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
}
#endif /* _NVMET_H */

View File

@ -146,7 +146,7 @@ static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_printf(p, "spcecific=%*ph", 24, spc);
trace_seq_printf(p, "specific=%*ph", 24, spc);
trace_seq_putc(p, 0);
return ret;
}

View File

@ -315,7 +315,7 @@ struct nvme_id_ns {
__u8 nmic;
__u8 rescap;
__u8 fpi;
__u8 rsvd33;
__u8 dlfeat;
__le16 nawun;
__le16 nawupf;
__le16 nacwu;
@ -324,11 +324,17 @@ struct nvme_id_ns {
__le16 nabspf;
__le16 noiob;
__u8 nvmcap[16];
__u8 rsvd64[28];
__le16 npwg;
__le16 npwa;
__le16 npdg;
__le16 npda;
__le16 nows;
__u8 rsvd74[18];
__le32 anagrpid;
__u8 rsvd96[3];
__u8 nsattr;
__u8 rsvd100[4];
__le16 nvmsetid;
__le16 endgid;
__u8 nguid[16];
__u8 eui64[8];
struct nvme_lbaf lbaf[16];