1
0
Fork 0

lightnvm: use internal allocation for chunk log page

The lightnvm subsystem provides helpers to retrieve chunk metadata,
where the target needs to provide a buffer to store the metadata. An
implicit assumption is that this buffer is contiguous and can be used to
retrieve the data from the device. If the device exposes too many
chunks, then kmalloc might fail, thus failing instance creation.

This patch removes this assumption by implementing an internal buffer in
the lightnvm subsystem to retrieve chunk metadata. Targets can then
use virtual memory allocations. Since this is a target API change, adapt
pblk accordingly.

Signed-off-by: Javier González <javier@cnexlabs.com>
Reviewed-by: Hans Holmberg <hans.holmberg@cnexlabs.com>
Signed-off-by: Matias Bjørling <mb@lightnvm.io>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
hifive-unleashed-5.1
Javier González 2018-10-09 13:12:01 +02:00 committed by Jens Axboe
parent 7325b4bbe5
commit 090ee26fd5
3 changed files with 20 additions and 11 deletions

View File

@ -120,7 +120,7 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
/*
* Get information for all chunks from the device.
*
* The caller is responsible for freeing the returned structure
* The caller is responsible for freeing (vmalloc) the returned structure
*/
struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
{
@ -134,7 +134,7 @@ struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
ppa.ppa = 0;
len = geo->all_chunks * sizeof(*meta);
meta = kzalloc(len, GFP_KERNEL);
meta = vzalloc(len);
if (!meta)
return ERR_PTR(-ENOMEM);

View File

@ -1039,7 +1039,7 @@ static int pblk_lines_init(struct pblk *pblk)
pblk_set_provision(pblk, nr_free_chks);
kfree(chunk_meta);
vfree(chunk_meta);
return 0;
fail_free_lines:

View File

@ -573,7 +573,7 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
struct nvm_geo *geo = &ndev->geo;
struct nvme_ns *ns = ndev->q->queuedata;
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_chk_meta *dev_meta = (struct nvme_nvm_chk_meta *)meta;
struct nvme_nvm_chk_meta *dev_meta, *dev_meta_off;
struct ppa_addr ppa;
size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
size_t log_pos, offset, len;
@ -585,6 +585,10 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
*/
max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024);
dev_meta = kmalloc(max_len, GFP_KERNEL);
if (!dev_meta)
return -ENOMEM;
/* Normalize lba address space to obtain log offset */
ppa.ppa = slba;
ppa = dev_to_generic_addr(ndev, ppa);
@ -598,6 +602,9 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
while (left) {
len = min_t(unsigned int, left, max_len);
memset(dev_meta, 0, max_len);
dev_meta_off = dev_meta;
ret = nvme_get_log(ctrl, ns->head->ns_id,
NVME_NVM_LOG_REPORT_CHUNK, 0, dev_meta, len,
offset);
@ -607,21 +614,23 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
}
for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) {
meta->state = dev_meta->state;
meta->type = dev_meta->type;
meta->wi = dev_meta->wi;
meta->slba = le64_to_cpu(dev_meta->slba);
meta->cnlb = le64_to_cpu(dev_meta->cnlb);
meta->wp = le64_to_cpu(dev_meta->wp);
meta->state = dev_meta_off->state;
meta->type = dev_meta_off->type;
meta->wi = dev_meta_off->wi;
meta->slba = le64_to_cpu(dev_meta_off->slba);
meta->cnlb = le64_to_cpu(dev_meta_off->cnlb);
meta->wp = le64_to_cpu(dev_meta_off->wp);
meta++;
dev_meta++;
dev_meta_off++;
}
offset += len;
left -= len;
}
kfree(dev_meta);
return ret;
}