mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
for-linus-20180608
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAlsa4sQQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpqPNEADbZby01Q6i+dTZYIosz5+/gq8gSkCcpQ/T krK/f2MlwD7Rdog1BnGNNP5XOqK8pKIGdARL1FQKpViii6xGIoOc2F4VK+vO44yR LI+BeeOM6rWNOAoBO4CqeZz/Fv5IYi7KURWogYhZMqrxBqT2OeD9MMowm5NulBix YZ2ttFWiTJScJttJCDPE6cu9EjHDeK63Nr7+UU80k3atU4eUpUp1mRFGmtaYWulq l3KaENCwm00WCVqM4i/gVWr2AkgTZqAAyeCx7IrPsrQrCMEhxEpMnU52e2kXSxhM Qx6FLNEOjzARuBDurtfJE74usQcW2xDLzT8fh2UStnPpt6S/JX6f9GMBVk0G7I8B 8COF4DF+bzdbhhz2SiZaTFOmDML5H1iQ8t6lTTms0Bnq29mE3E4QFom8lO+2BxN3 g6PFhvYaOkhTVtV5BPXpXs9xZBLHrv5G/JopXsZh0RF1kpiova+nfA1K2uJPFpJ0 NcHuMZKmIG3uBqY3fj5Ul+zuVhZ/1v8B69zWoSWafLrk+VRdcEAniuY2E6SsQFP5 gV4GNja85S53DnlIVwEUXPYMiY6opiwP53yMNMvkB/FdzaQB5Ehdif2fhZu64QmE TtqbHtAuV0VZ3z4GrJ3XNbV6Np4wMOhYls4lTkZsnqNNO2sw/eoTYcmwxLDEYOQw uQ9rhZh4IQ== =N3BP -----END PGP SIGNATURE----- Merge tag 'for-linus-20180608' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "A few fixes for this merge window, where some of them should go in sooner rather than later, hence a new pull this week. This pull request contains: - Set of NVMe fixes, mostly follow up cleanups/fixes to the queue changes, but also teardown/removal and misc changes (Christop/Dan/ Johannes/Sagi/Steve). - Two lightnvm fixes for issues that showed up in this window (Colin/Wei). - Failfast/driver flags inheritance for flush requests (Hannes). - The md device put sanitization and fix (Kent). - dm bio_set inheritance fix (me). - nbd discard granularity fix (Josef). - nbd consistency in command printing (Kevin). - Loop recursion validation fix (Ted). - Partition overlap check (Wang)" [ .. and now my build is warning-free again thanks to the md fix - Linus ] * tag 'for-linus-20180608' of git://git.kernel.dk/linux-block: (22 commits) nvme: cleanup double shift issue nvme-pci: make CMB SQ mod-param read-only nvme-pci: unquiesce dead controller queues nvme-pci: remove HMB teardown on reset nvme-pci: queue creation fixes nvme-pci: remove unnecessary completion doorbell check nvme-pci: remove unnecessary nested locking nvmet: filter newlines from user input nvme-rdma: correctly check for target keyed sgl support nvme: don't hold nvmf_transports_rwsem for more than transport lookups nvmet: return all zeroed buffer when we can't find an active namespace md: Unify mddev destruction paths dm: use bioset_init_from_src() to copy bio_set block: add bioset_init_from_src() helper block: always set partition number to '0' in blk_partition_remap() block: pass failfast and driver-specific flags to flush requests nbd: set discard_alignment to the granularity nbd: Consistently use request pointer in debug messages. block: add verifier for cmdline partition lightnvm: pblk: fix resource leak of invalid_bitmap ...
This commit is contained in:
commit
a3818841bd
18
block/bio.c
18
block/bio.c
@ -1997,6 +1997,24 @@ bad:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(bioset_init);
|
EXPORT_SYMBOL(bioset_init);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initialize and setup a new bio_set, based on the settings from
|
||||||
|
* another bio_set.
|
||||||
|
*/
|
||||||
|
int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
|
||||||
|
{
|
||||||
|
int flags;
|
||||||
|
|
||||||
|
flags = 0;
|
||||||
|
if (src->bvec_pool.min_nr)
|
||||||
|
flags |= BIOSET_NEED_BVECS;
|
||||||
|
if (src->rescue_workqueue)
|
||||||
|
flags |= BIOSET_NEED_RESCUER;
|
||||||
|
|
||||||
|
return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bioset_init_from_src);
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_CGROUP
|
#ifdef CONFIG_BLK_CGROUP
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2220,10 +2220,10 @@ static inline int blk_partition_remap(struct bio *bio)
|
|||||||
if (bio_check_eod(bio, part_nr_sects_read(p)))
|
if (bio_check_eod(bio, part_nr_sects_read(p)))
|
||||||
goto out;
|
goto out;
|
||||||
bio->bi_iter.bi_sector += p->start_sect;
|
bio->bi_iter.bi_sector += p->start_sect;
|
||||||
bio->bi_partno = 0;
|
|
||||||
trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
|
trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
|
||||||
bio->bi_iter.bi_sector - p->start_sect);
|
bio->bi_iter.bi_sector - p->start_sect);
|
||||||
}
|
}
|
||||||
|
bio->bi_partno = 0;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
out:
|
out:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
@ -94,7 +94,7 @@ enum {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static bool blk_kick_flush(struct request_queue *q,
|
static bool blk_kick_flush(struct request_queue *q,
|
||||||
struct blk_flush_queue *fq);
|
struct blk_flush_queue *fq, unsigned int flags);
|
||||||
|
|
||||||
static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
|
static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
|
||||||
{
|
{
|
||||||
@ -212,7 +212,7 @@ static bool blk_flush_complete_seq(struct request *rq,
|
|||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
kicked = blk_kick_flush(q, fq);
|
kicked = blk_kick_flush(q, fq, rq->cmd_flags);
|
||||||
return kicked | queued;
|
return kicked | queued;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -281,6 +281,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
|||||||
* blk_kick_flush - consider issuing flush request
|
* blk_kick_flush - consider issuing flush request
|
||||||
* @q: request_queue being kicked
|
* @q: request_queue being kicked
|
||||||
* @fq: flush queue
|
* @fq: flush queue
|
||||||
|
* @flags: cmd_flags of the original request
|
||||||
*
|
*
|
||||||
* Flush related states of @q have changed, consider issuing flush request.
|
* Flush related states of @q have changed, consider issuing flush request.
|
||||||
* Please read the comment at the top of this file for more info.
|
* Please read the comment at the top of this file for more info.
|
||||||
@ -291,7 +292,8 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
|||||||
* RETURNS:
|
* RETURNS:
|
||||||
* %true if flush was issued, %false otherwise.
|
* %true if flush was issued, %false otherwise.
|
||||||
*/
|
*/
|
||||||
static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
|
static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
|
||||||
|
unsigned int flags)
|
||||||
{
|
{
|
||||||
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
|
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
|
||||||
struct request *first_rq =
|
struct request *first_rq =
|
||||||
@ -346,6 +348,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
|
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
|
||||||
|
flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
|
||||||
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
|
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
|
||||||
flush_rq->rq_disk = first_rq->rq_disk;
|
flush_rq->rq_disk = first_rq->rq_disk;
|
||||||
flush_rq->end_io = flush_end_io;
|
flush_rq->end_io = flush_end_io;
|
||||||
|
@ -58,6 +58,62 @@ static int __init cmdline_parts_setup(char *s)
|
|||||||
}
|
}
|
||||||
__setup("blkdevparts=", cmdline_parts_setup);
|
__setup("blkdevparts=", cmdline_parts_setup);
|
||||||
|
|
||||||
|
static bool has_overlaps(sector_t from, sector_t size,
|
||||||
|
sector_t from2, sector_t size2)
|
||||||
|
{
|
||||||
|
sector_t end = from + size;
|
||||||
|
sector_t end2 = from2 + size2;
|
||||||
|
|
||||||
|
if (from >= from2 && from < end2)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (end > from2 && end <= end2)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (from2 >= from && from2 < end)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (end2 > from && end2 <= end)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void overlaps_warns_header(void)
|
||||||
|
{
|
||||||
|
pr_warn("Overlapping partitions are used in command line partitions.");
|
||||||
|
pr_warn("Don't use filesystems on overlapping partitions:");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cmdline_parts_verifier(int slot, struct parsed_partitions *state)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
bool header = true;
|
||||||
|
|
||||||
|
for (; slot < state->limit && state->parts[slot].has_info; slot++) {
|
||||||
|
for (i = slot+1; i < state->limit && state->parts[i].has_info;
|
||||||
|
i++) {
|
||||||
|
if (has_overlaps(state->parts[slot].from,
|
||||||
|
state->parts[slot].size,
|
||||||
|
state->parts[i].from,
|
||||||
|
state->parts[i].size)) {
|
||||||
|
if (header) {
|
||||||
|
header = false;
|
||||||
|
overlaps_warns_header();
|
||||||
|
}
|
||||||
|
pr_warn("%s[%llu,%llu] overlaps with "
|
||||||
|
"%s[%llu,%llu].",
|
||||||
|
state->parts[slot].info.volname,
|
||||||
|
(u64)state->parts[slot].from << 9,
|
||||||
|
(u64)state->parts[slot].size << 9,
|
||||||
|
state->parts[i].info.volname,
|
||||||
|
(u64)state->parts[i].from << 9,
|
||||||
|
(u64)state->parts[i].size << 9);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Purpose: allocate cmdline partitions.
|
* Purpose: allocate cmdline partitions.
|
||||||
* Returns:
|
* Returns:
|
||||||
@ -93,6 +149,7 @@ int cmdline_partition(struct parsed_partitions *state)
|
|||||||
disk_size = get_capacity(state->bdev->bd_disk) << 9;
|
disk_size = get_capacity(state->bdev->bd_disk) << 9;
|
||||||
|
|
||||||
cmdline_parts_set(parts, disk_size, 1, add_part, (void *)state);
|
cmdline_parts_set(parts, disk_size, 1, add_part, (void *)state);
|
||||||
|
cmdline_parts_verifier(1, state);
|
||||||
|
|
||||||
strlcat(state->pp_buf, "\n", PAGE_SIZE);
|
strlcat(state->pp_buf, "\n", PAGE_SIZE);
|
||||||
|
|
||||||
|
@ -647,6 +647,36 @@ static void loop_reread_partitions(struct loop_device *lo,
|
|||||||
__func__, lo->lo_number, lo->lo_file_name, rc);
|
__func__, lo->lo_number, lo->lo_file_name, rc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int is_loop_device(struct file *file)
|
||||||
|
{
|
||||||
|
struct inode *i = file->f_mapping->host;
|
||||||
|
|
||||||
|
return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int loop_validate_file(struct file *file, struct block_device *bdev)
|
||||||
|
{
|
||||||
|
struct inode *inode = file->f_mapping->host;
|
||||||
|
struct file *f = file;
|
||||||
|
|
||||||
|
/* Avoid recursion */
|
||||||
|
while (is_loop_device(f)) {
|
||||||
|
struct loop_device *l;
|
||||||
|
|
||||||
|
if (f->f_mapping->host->i_bdev == bdev)
|
||||||
|
return -EBADF;
|
||||||
|
|
||||||
|
l = f->f_mapping->host->i_bdev->bd_disk->private_data;
|
||||||
|
if (l->lo_state == Lo_unbound) {
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
f = l->lo_backing_file;
|
||||||
|
}
|
||||||
|
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
|
||||||
|
return -EINVAL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* loop_change_fd switched the backing store of a loopback device to
|
* loop_change_fd switched the backing store of a loopback device to
|
||||||
* a new file. This is useful for operating system installers to free up
|
* a new file. This is useful for operating system installers to free up
|
||||||
@ -676,14 +706,15 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
|||||||
if (!file)
|
if (!file)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
error = loop_validate_file(file, bdev);
|
||||||
|
if (error)
|
||||||
|
goto out_putf;
|
||||||
|
|
||||||
inode = file->f_mapping->host;
|
inode = file->f_mapping->host;
|
||||||
old_file = lo->lo_backing_file;
|
old_file = lo->lo_backing_file;
|
||||||
|
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
|
|
||||||
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
|
|
||||||
goto out_putf;
|
|
||||||
|
|
||||||
/* size of the new backing store needs to be the same */
|
/* size of the new backing store needs to be the same */
|
||||||
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
|
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
|
||||||
goto out_putf;
|
goto out_putf;
|
||||||
@ -709,13 +740,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int is_loop_device(struct file *file)
|
|
||||||
{
|
|
||||||
struct inode *i = file->f_mapping->host;
|
|
||||||
|
|
||||||
return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* loop sysfs attributes */
|
/* loop sysfs attributes */
|
||||||
|
|
||||||
static ssize_t loop_attr_show(struct device *dev, char *page,
|
static ssize_t loop_attr_show(struct device *dev, char *page,
|
||||||
@ -881,7 +905,7 @@ static int loop_prepare_queue(struct loop_device *lo)
|
|||||||
static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
||||||
struct block_device *bdev, unsigned int arg)
|
struct block_device *bdev, unsigned int arg)
|
||||||
{
|
{
|
||||||
struct file *file, *f;
|
struct file *file;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
struct address_space *mapping;
|
struct address_space *mapping;
|
||||||
int lo_flags = 0;
|
int lo_flags = 0;
|
||||||
@ -900,29 +924,13 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
|||||||
if (lo->lo_state != Lo_unbound)
|
if (lo->lo_state != Lo_unbound)
|
||||||
goto out_putf;
|
goto out_putf;
|
||||||
|
|
||||||
/* Avoid recursion */
|
error = loop_validate_file(file, bdev);
|
||||||
f = file;
|
if (error)
|
||||||
while (is_loop_device(f)) {
|
|
||||||
struct loop_device *l;
|
|
||||||
|
|
||||||
if (f->f_mapping->host->i_bdev == bdev)
|
|
||||||
goto out_putf;
|
goto out_putf;
|
||||||
|
|
||||||
l = f->f_mapping->host->i_bdev->bd_disk->private_data;
|
|
||||||
if (l->lo_state == Lo_unbound) {
|
|
||||||
error = -EINVAL;
|
|
||||||
goto out_putf;
|
|
||||||
}
|
|
||||||
f = l->lo_backing_file;
|
|
||||||
}
|
|
||||||
|
|
||||||
mapping = file->f_mapping;
|
mapping = file->f_mapping;
|
||||||
inode = mapping->host;
|
inode = mapping->host;
|
||||||
|
|
||||||
error = -EINVAL;
|
|
||||||
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
|
|
||||||
goto out_putf;
|
|
||||||
|
|
||||||
if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
|
if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
|
||||||
!file->f_op->write_iter)
|
!file->f_op->write_iter)
|
||||||
lo_flags |= LO_FLAGS_READ_ONLY;
|
lo_flags |= LO_FLAGS_READ_ONLY;
|
||||||
|
@ -246,6 +246,7 @@ static void nbd_size_update(struct nbd_device *nbd)
|
|||||||
|
|
||||||
if (config->flags & NBD_FLAG_SEND_TRIM) {
|
if (config->flags & NBD_FLAG_SEND_TRIM) {
|
||||||
nbd->disk->queue->limits.discard_granularity = config->blksize;
|
nbd->disk->queue->limits.discard_granularity = config->blksize;
|
||||||
|
nbd->disk->queue->limits.discard_alignment = config->blksize;
|
||||||
blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
|
blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
|
||||||
}
|
}
|
||||||
blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
|
blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
|
||||||
@ -275,7 +276,7 @@ static void nbd_complete_rq(struct request *req)
|
|||||||
{
|
{
|
||||||
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
|
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
|
||||||
|
|
||||||
dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", cmd,
|
dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
|
||||||
cmd->status ? "failed" : "done");
|
cmd->status ? "failed" : "done");
|
||||||
|
|
||||||
blk_mq_end_request(req, cmd->status);
|
blk_mq_end_request(req, cmd->status);
|
||||||
@ -482,7 +483,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
|||||||
memcpy(request.handle, &tag, sizeof(tag));
|
memcpy(request.handle, &tag, sizeof(tag));
|
||||||
|
|
||||||
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
|
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
|
||||||
cmd, nbdcmd_to_ascii(type),
|
req, nbdcmd_to_ascii(type),
|
||||||
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
|
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
|
||||||
result = sock_xmit(nbd, index, 1, &from,
|
result = sock_xmit(nbd, index, 1, &from,
|
||||||
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
|
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
|
||||||
@ -518,7 +519,7 @@ send_pages:
|
|||||||
int flags = is_last ? 0 : MSG_MORE;
|
int flags = is_last ? 0 : MSG_MORE;
|
||||||
|
|
||||||
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
|
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
|
||||||
cmd, bvec.bv_len);
|
req, bvec.bv_len);
|
||||||
iov_iter_bvec(&from, ITER_BVEC | WRITE,
|
iov_iter_bvec(&from, ITER_BVEC | WRITE,
|
||||||
&bvec, 1, bvec.bv_len);
|
&bvec, 1, bvec.bv_len);
|
||||||
if (skip) {
|
if (skip) {
|
||||||
@ -610,7 +611,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
|
|||||||
return cmd;
|
return cmd;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
|
dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
|
||||||
if (rq_data_dir(req) != WRITE) {
|
if (rq_data_dir(req) != WRITE) {
|
||||||
struct req_iterator iter;
|
struct req_iterator iter;
|
||||||
struct bio_vec bvec;
|
struct bio_vec bvec;
|
||||||
@ -637,7 +638,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
|
|||||||
return ERR_PTR(-EIO);
|
return ERR_PTR(-EIO);
|
||||||
}
|
}
|
||||||
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
|
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
|
||||||
cmd, bvec.bv_len);
|
req, bvec.bv_len);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* See the comment in nbd_queue_rq. */
|
/* See the comment in nbd_queue_rq. */
|
||||||
@ -1062,6 +1063,7 @@ static void nbd_config_put(struct nbd_device *nbd)
|
|||||||
|
|
||||||
nbd->tag_set.timeout = 0;
|
nbd->tag_set.timeout = 0;
|
||||||
nbd->disk->queue->limits.discard_granularity = 0;
|
nbd->disk->queue->limits.discard_granularity = 0;
|
||||||
|
nbd->disk->queue->limits.discard_alignment = 0;
|
||||||
blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
|
blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
|
||||||
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
|
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
|
||||||
|
|
||||||
@ -1516,6 +1518,7 @@ static int nbd_dev_add(int index)
|
|||||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
|
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
|
||||||
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
|
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
|
||||||
disk->queue->limits.discard_granularity = 0;
|
disk->queue->limits.discard_granularity = 0;
|
||||||
|
disk->queue->limits.discard_alignment = 0;
|
||||||
blk_queue_max_discard_sectors(disk->queue, 0);
|
blk_queue_max_discard_sectors(disk->queue, 0);
|
||||||
blk_queue_max_segment_size(disk->queue, UINT_MAX);
|
blk_queue_max_segment_size(disk->queue, UINT_MAX);
|
||||||
blk_queue_max_segments(disk->queue, USHRT_MAX);
|
blk_queue_max_segments(disk->queue, USHRT_MAX);
|
||||||
|
@ -203,7 +203,7 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
|
|||||||
if (!lba_list) {
|
if (!lba_list) {
|
||||||
pr_err("pblk: could not interpret emeta (line %d)\n",
|
pr_err("pblk: could not interpret emeta (line %d)\n",
|
||||||
line->id);
|
line->id);
|
||||||
goto fail_free_ws;
|
goto fail_free_invalid_bitmap;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -280,6 +280,7 @@ fail_free_gc_rq:
|
|||||||
kfree(gc_rq);
|
kfree(gc_rq);
|
||||||
fail_free_lba_list:
|
fail_free_lba_list:
|
||||||
pblk_mfree(lba_list, l_mg->emeta_alloc_type);
|
pblk_mfree(lba_list, l_mg->emeta_alloc_type);
|
||||||
|
fail_free_invalid_bitmap:
|
||||||
kfree(invalid_bitmap);
|
kfree(invalid_bitmap);
|
||||||
fail_free_ws:
|
fail_free_ws:
|
||||||
kfree(line_ws);
|
kfree(line_ws);
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
#include "pblk.h"
|
#include "pblk.h"
|
||||||
|
|
||||||
unsigned int write_buffer_size;
|
static unsigned int write_buffer_size;
|
||||||
|
|
||||||
module_param(write_buffer_size, uint, 0644);
|
module_param(write_buffer_size, uint, 0644);
|
||||||
MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
|
MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
|
||||||
|
@ -1953,9 +1953,10 @@ static void free_dev(struct mapped_device *md)
|
|||||||
kvfree(md);
|
kvfree(md);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
||||||
{
|
{
|
||||||
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
|
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if (dm_table_bio_based(t)) {
|
if (dm_table_bio_based(t)) {
|
||||||
/*
|
/*
|
||||||
@ -1982,13 +1983,16 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
|||||||
bioset_initialized(&md->bs) ||
|
bioset_initialized(&md->bs) ||
|
||||||
bioset_initialized(&md->io_bs));
|
bioset_initialized(&md->io_bs));
|
||||||
|
|
||||||
md->bs = p->bs;
|
ret = bioset_init_from_src(&md->bs, &p->bs);
|
||||||
memset(&p->bs, 0, sizeof(p->bs));
|
if (ret)
|
||||||
md->io_bs = p->io_bs;
|
goto out;
|
||||||
memset(&p->io_bs, 0, sizeof(p->io_bs));
|
ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
|
||||||
|
if (ret)
|
||||||
|
bioset_exit(&md->bs);
|
||||||
out:
|
out:
|
||||||
/* mempool bind completed, no longer need any mempools in the table */
|
/* mempool bind completed, no longer need any mempools in the table */
|
||||||
dm_table_free_md_mempools(t);
|
dm_table_free_md_mempools(t);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2033,6 +2037,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
|||||||
struct request_queue *q = md->queue;
|
struct request_queue *q = md->queue;
|
||||||
bool request_based = dm_table_request_based(t);
|
bool request_based = dm_table_request_based(t);
|
||||||
sector_t size;
|
sector_t size;
|
||||||
|
int ret;
|
||||||
|
|
||||||
lockdep_assert_held(&md->suspend_lock);
|
lockdep_assert_held(&md->suspend_lock);
|
||||||
|
|
||||||
@ -2068,7 +2073,11 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
|||||||
md->immutable_target = dm_table_get_immutable_target(t);
|
md->immutable_target = dm_table_get_immutable_target(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
__bind_mempools(md, t);
|
ret = __bind_mempools(md, t);
|
||||||
|
if (ret) {
|
||||||
|
old_map = ERR_PTR(ret);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
|
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
|
||||||
rcu_assign_pointer(md->map, (void *)t);
|
rcu_assign_pointer(md->map, (void *)t);
|
||||||
@ -2078,6 +2087,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
|||||||
if (old_map)
|
if (old_map)
|
||||||
dm_sync_table(md);
|
dm_sync_table(md);
|
||||||
|
|
||||||
|
out:
|
||||||
return old_map;
|
return old_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,6 +84,8 @@ static void autostart_arrays(int part);
|
|||||||
static LIST_HEAD(pers_list);
|
static LIST_HEAD(pers_list);
|
||||||
static DEFINE_SPINLOCK(pers_lock);
|
static DEFINE_SPINLOCK(pers_lock);
|
||||||
|
|
||||||
|
static struct kobj_type md_ktype;
|
||||||
|
|
||||||
struct md_cluster_operations *md_cluster_ops;
|
struct md_cluster_operations *md_cluster_ops;
|
||||||
EXPORT_SYMBOL(md_cluster_ops);
|
EXPORT_SYMBOL(md_cluster_ops);
|
||||||
struct module *md_cluster_mod;
|
struct module *md_cluster_mod;
|
||||||
@ -510,11 +512,6 @@ static void mddev_delayed_delete(struct work_struct *ws);
|
|||||||
|
|
||||||
static void mddev_put(struct mddev *mddev)
|
static void mddev_put(struct mddev *mddev)
|
||||||
{
|
{
|
||||||
struct bio_set bs, sync_bs;
|
|
||||||
|
|
||||||
memset(&bs, 0, sizeof(bs));
|
|
||||||
memset(&sync_bs, 0, sizeof(sync_bs));
|
|
||||||
|
|
||||||
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
|
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
|
||||||
return;
|
return;
|
||||||
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
|
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
|
||||||
@ -522,30 +519,23 @@ static void mddev_put(struct mddev *mddev)
|
|||||||
/* Array is not configured at all, and not held active,
|
/* Array is not configured at all, and not held active,
|
||||||
* so destroy it */
|
* so destroy it */
|
||||||
list_del_init(&mddev->all_mddevs);
|
list_del_init(&mddev->all_mddevs);
|
||||||
bs = mddev->bio_set;
|
|
||||||
sync_bs = mddev->sync_set;
|
/*
|
||||||
memset(&mddev->bio_set, 0, sizeof(mddev->bio_set));
|
* Call queue_work inside the spinlock so that
|
||||||
memset(&mddev->sync_set, 0, sizeof(mddev->sync_set));
|
* flush_workqueue() after mddev_find will succeed in waiting
|
||||||
if (mddev->gendisk) {
|
* for the work to be done.
|
||||||
/* We did a probe so need to clean up. Call
|
|
||||||
* queue_work inside the spinlock so that
|
|
||||||
* flush_workqueue() after mddev_find will
|
|
||||||
* succeed in waiting for the work to be done.
|
|
||||||
*/
|
*/
|
||||||
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
|
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
|
||||||
queue_work(md_misc_wq, &mddev->del_work);
|
queue_work(md_misc_wq, &mddev->del_work);
|
||||||
} else
|
|
||||||
kfree(mddev);
|
|
||||||
}
|
}
|
||||||
spin_unlock(&all_mddevs_lock);
|
spin_unlock(&all_mddevs_lock);
|
||||||
bioset_exit(&bs);
|
|
||||||
bioset_exit(&sync_bs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void md_safemode_timeout(struct timer_list *t);
|
static void md_safemode_timeout(struct timer_list *t);
|
||||||
|
|
||||||
void mddev_init(struct mddev *mddev)
|
void mddev_init(struct mddev *mddev)
|
||||||
{
|
{
|
||||||
|
kobject_init(&mddev->kobj, &md_ktype);
|
||||||
mutex_init(&mddev->open_mutex);
|
mutex_init(&mddev->open_mutex);
|
||||||
mutex_init(&mddev->reconfig_mutex);
|
mutex_init(&mddev->reconfig_mutex);
|
||||||
mutex_init(&mddev->bitmap_info.mutex);
|
mutex_init(&mddev->bitmap_info.mutex);
|
||||||
@ -5215,6 +5205,8 @@ static void md_free(struct kobject *ko)
|
|||||||
put_disk(mddev->gendisk);
|
put_disk(mddev->gendisk);
|
||||||
percpu_ref_exit(&mddev->writes_pending);
|
percpu_ref_exit(&mddev->writes_pending);
|
||||||
|
|
||||||
|
bioset_exit(&mddev->bio_set);
|
||||||
|
bioset_exit(&mddev->sync_set);
|
||||||
kfree(mddev);
|
kfree(mddev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5348,8 +5340,7 @@ static int md_alloc(dev_t dev, char *name)
|
|||||||
mutex_lock(&mddev->open_mutex);
|
mutex_lock(&mddev->open_mutex);
|
||||||
add_disk(disk);
|
add_disk(disk);
|
||||||
|
|
||||||
error = kobject_init_and_add(&mddev->kobj, &md_ktype,
|
error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
|
||||||
&disk_to_dev(disk)->kobj, "%s", "md");
|
|
||||||
if (error) {
|
if (error) {
|
||||||
/* This isn't possible, but as kobject_init_and_add is marked
|
/* This isn't possible, but as kobject_init_and_add is marked
|
||||||
* __must_check, we must do something with the result
|
* __must_check, we must do something with the result
|
||||||
@ -5506,7 +5497,7 @@ int md_run(struct mddev *mddev)
|
|||||||
if (!bioset_initialized(&mddev->sync_set)) {
|
if (!bioset_initialized(&mddev->sync_set)) {
|
||||||
err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||||
if (err)
|
if (err)
|
||||||
goto abort;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&pers_lock);
|
spin_lock(&pers_lock);
|
||||||
@ -5519,8 +5510,7 @@ int md_run(struct mddev *mddev)
|
|||||||
else
|
else
|
||||||
pr_warn("md: personality for level %s is not loaded!\n",
|
pr_warn("md: personality for level %s is not loaded!\n",
|
||||||
mddev->clevel);
|
mddev->clevel);
|
||||||
err = -EINVAL;
|
return -EINVAL;
|
||||||
goto abort;
|
|
||||||
}
|
}
|
||||||
spin_unlock(&pers_lock);
|
spin_unlock(&pers_lock);
|
||||||
if (mddev->level != pers->level) {
|
if (mddev->level != pers->level) {
|
||||||
@ -5533,8 +5523,7 @@ int md_run(struct mddev *mddev)
|
|||||||
pers->start_reshape == NULL) {
|
pers->start_reshape == NULL) {
|
||||||
/* This personality cannot handle reshaping... */
|
/* This personality cannot handle reshaping... */
|
||||||
module_put(pers->owner);
|
module_put(pers->owner);
|
||||||
err = -EINVAL;
|
return -EINVAL;
|
||||||
goto abort;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pers->sync_request) {
|
if (pers->sync_request) {
|
||||||
@ -5603,7 +5592,7 @@ int md_run(struct mddev *mddev)
|
|||||||
mddev->private = NULL;
|
mddev->private = NULL;
|
||||||
module_put(pers->owner);
|
module_put(pers->owner);
|
||||||
bitmap_destroy(mddev);
|
bitmap_destroy(mddev);
|
||||||
goto abort;
|
return err;
|
||||||
}
|
}
|
||||||
if (mddev->queue) {
|
if (mddev->queue) {
|
||||||
bool nonrot = true;
|
bool nonrot = true;
|
||||||
@ -5665,12 +5654,6 @@ int md_run(struct mddev *mddev)
|
|||||||
sysfs_notify_dirent_safe(mddev->sysfs_action);
|
sysfs_notify_dirent_safe(mddev->sysfs_action);
|
||||||
sysfs_notify(&mddev->kobj, NULL, "degraded");
|
sysfs_notify(&mddev->kobj, NULL, "degraded");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
abort:
|
|
||||||
bioset_exit(&mddev->bio_set);
|
|
||||||
bioset_exit(&mddev->sync_set);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(md_run);
|
EXPORT_SYMBOL_GPL(md_run);
|
||||||
|
|
||||||
|
@ -3245,7 +3245,7 @@ static void nvme_scan_work(struct work_struct *work)
|
|||||||
|
|
||||||
WARN_ON_ONCE(!ctrl->tagset);
|
WARN_ON_ONCE(!ctrl->tagset);
|
||||||
|
|
||||||
if (test_and_clear_bit(EVENT_NS_CHANGED, &ctrl->events)) {
|
if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
|
||||||
if (nvme_scan_changed_ns_log(ctrl))
|
if (nvme_scan_changed_ns_log(ctrl))
|
||||||
goto out_sort_namespaces;
|
goto out_sort_namespaces;
|
||||||
dev_info(ctrl->device, "rescanning namespaces.\n");
|
dev_info(ctrl->device, "rescanning namespaces.\n");
|
||||||
@ -3386,7 +3386,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
|
|||||||
{
|
{
|
||||||
switch ((result & 0xff00) >> 8) {
|
switch ((result & 0xff00) >> 8) {
|
||||||
case NVME_AER_NOTICE_NS_CHANGED:
|
case NVME_AER_NOTICE_NS_CHANGED:
|
||||||
set_bit(EVENT_NS_CHANGED, &ctrl->events);
|
set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
|
||||||
nvme_queue_scan(ctrl);
|
nvme_queue_scan(ctrl);
|
||||||
break;
|
break;
|
||||||
case NVME_AER_NOTICE_FW_ACT_STARTING:
|
case NVME_AER_NOTICE_FW_ACT_STARTING:
|
||||||
|
@ -952,6 +952,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
|
|||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
up_read(&nvmf_transports_rwsem);
|
||||||
|
|
||||||
ret = nvmf_check_required_opts(opts, ops->required_opts);
|
ret = nvmf_check_required_opts(opts, ops->required_opts);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -968,11 +969,11 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
|
|||||||
}
|
}
|
||||||
|
|
||||||
module_put(ops->module);
|
module_put(ops->module);
|
||||||
up_read(&nvmf_transports_rwsem);
|
|
||||||
return ctrl;
|
return ctrl;
|
||||||
|
|
||||||
out_module_put:
|
out_module_put:
|
||||||
module_put(ops->module);
|
module_put(ops->module);
|
||||||
|
goto out_free_opts;
|
||||||
out_unlock:
|
out_unlock:
|
||||||
up_read(&nvmf_transports_rwsem);
|
up_read(&nvmf_transports_rwsem);
|
||||||
out_free_opts:
|
out_free_opts:
|
||||||
|
@ -124,6 +124,9 @@ struct nvmf_ctrl_options {
|
|||||||
* 1. At minimum, 'required_opts' and 'allowed_opts' should
|
* 1. At minimum, 'required_opts' and 'allowed_opts' should
|
||||||
* be set to the same enum parsing options defined earlier.
|
* be set to the same enum parsing options defined earlier.
|
||||||
* 2. create_ctrl() must be defined (even if it does nothing)
|
* 2. create_ctrl() must be defined (even if it does nothing)
|
||||||
|
* 3. struct nvmf_transport_ops must be statically allocated in the
|
||||||
|
* modules .bss section so that a pure module_get on @module
|
||||||
|
* prevents the memory from beeing freed.
|
||||||
*/
|
*/
|
||||||
struct nvmf_transport_ops {
|
struct nvmf_transport_ops {
|
||||||
struct list_head entry;
|
struct list_head entry;
|
||||||
|
@ -194,7 +194,6 @@ struct nvme_ctrl {
|
|||||||
struct delayed_work ka_work;
|
struct delayed_work ka_work;
|
||||||
struct nvme_command ka_cmd;
|
struct nvme_command ka_cmd;
|
||||||
struct work_struct fw_act_work;
|
struct work_struct fw_act_work;
|
||||||
#define EVENT_NS_CHANGED (1 << 0)
|
|
||||||
unsigned long events;
|
unsigned long events;
|
||||||
|
|
||||||
/* Power saving configuration */
|
/* Power saving configuration */
|
||||||
|
@ -42,7 +42,7 @@ static int use_threaded_interrupts;
|
|||||||
module_param(use_threaded_interrupts, int, 0);
|
module_param(use_threaded_interrupts, int, 0);
|
||||||
|
|
||||||
static bool use_cmb_sqes = true;
|
static bool use_cmb_sqes = true;
|
||||||
module_param(use_cmb_sqes, bool, 0644);
|
module_param(use_cmb_sqes, bool, 0444);
|
||||||
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
|
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
|
||||||
|
|
||||||
static unsigned int max_host_mem_size_mb = 128;
|
static unsigned int max_host_mem_size_mb = 128;
|
||||||
@ -920,12 +920,10 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
|
|||||||
{
|
{
|
||||||
u16 head = nvmeq->cq_head;
|
u16 head = nvmeq->cq_head;
|
||||||
|
|
||||||
if (likely(nvmeq->cq_vector >= 0)) {
|
|
||||||
if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
|
if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
|
||||||
nvmeq->dbbuf_cq_ei))
|
nvmeq->dbbuf_cq_ei))
|
||||||
writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
|
writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
||||||
{
|
{
|
||||||
@ -1477,11 +1475,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
|
|||||||
*/
|
*/
|
||||||
vector = dev->num_vecs == 1 ? 0 : qid;
|
vector = dev->num_vecs == 1 ? 0 : qid;
|
||||||
result = adapter_alloc_cq(dev, qid, nvmeq, vector);
|
result = adapter_alloc_cq(dev, qid, nvmeq, vector);
|
||||||
if (result < 0)
|
if (result)
|
||||||
goto out;
|
return result;
|
||||||
|
|
||||||
result = adapter_alloc_sq(dev, qid, nvmeq);
|
result = adapter_alloc_sq(dev, qid, nvmeq);
|
||||||
if (result < 0)
|
if (result < 0)
|
||||||
|
return result;
|
||||||
|
else if (result)
|
||||||
goto release_cq;
|
goto release_cq;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1503,7 +1503,6 @@ release_sq:
|
|||||||
adapter_delete_sq(dev, qid);
|
adapter_delete_sq(dev, qid);
|
||||||
release_cq:
|
release_cq:
|
||||||
adapter_delete_cq(dev, qid);
|
adapter_delete_cq(dev, qid);
|
||||||
out:
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2012,13 +2011,7 @@ static void nvme_del_cq_end(struct request *req, blk_status_t error)
|
|||||||
if (!error) {
|
if (!error) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/*
|
spin_lock_irqsave(&nvmeq->cq_lock, flags);
|
||||||
* We might be called with the AQ cq_lock held
|
|
||||||
* and the I/O queue cq_lock should always
|
|
||||||
* nest inside the AQ one.
|
|
||||||
*/
|
|
||||||
spin_lock_irqsave_nested(&nvmeq->cq_lock, flags,
|
|
||||||
SINGLE_DEPTH_NESTING);
|
|
||||||
nvme_process_cq(nvmeq, &start, &end, -1);
|
nvme_process_cq(nvmeq, &start, &end, -1);
|
||||||
spin_unlock_irqrestore(&nvmeq->cq_lock, flags);
|
spin_unlock_irqrestore(&nvmeq->cq_lock, flags);
|
||||||
|
|
||||||
@ -2231,14 +2224,6 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
|
|||||||
nvme_stop_queues(&dev->ctrl);
|
nvme_stop_queues(&dev->ctrl);
|
||||||
|
|
||||||
if (!dead && dev->ctrl.queue_count > 0) {
|
if (!dead && dev->ctrl.queue_count > 0) {
|
||||||
/*
|
|
||||||
* If the controller is still alive tell it to stop using the
|
|
||||||
* host memory buffer. In theory the shutdown / reset should
|
|
||||||
* make sure that it doesn't access the host memoery anymore,
|
|
||||||
* but I'd rather be safe than sorry..
|
|
||||||
*/
|
|
||||||
if (dev->host_mem_descs)
|
|
||||||
nvme_set_host_mem(dev, 0);
|
|
||||||
nvme_disable_io_queues(dev);
|
nvme_disable_io_queues(dev);
|
||||||
nvme_disable_admin_queue(dev, shutdown);
|
nvme_disable_admin_queue(dev, shutdown);
|
||||||
}
|
}
|
||||||
@ -2614,7 +2599,7 @@ static void nvme_remove(struct pci_dev *pdev)
|
|||||||
|
|
||||||
if (!pci_device_is_present(pdev)) {
|
if (!pci_device_is_present(pdev)) {
|
||||||
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
|
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
|
||||||
nvme_dev_disable(dev, false);
|
nvme_dev_disable(dev, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
flush_work(&dev->ctrl.reset_work);
|
flush_work(&dev->ctrl.reset_work);
|
||||||
|
@ -1951,8 +1951,9 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* sanity check keyed sgls */
|
/* sanity check keyed sgls */
|
||||||
if (!(ctrl->ctrl.sgls & (1 << 20))) {
|
if (!(ctrl->ctrl.sgls & (1 << 2))) {
|
||||||
dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not support\n");
|
dev_err(ctrl->ctrl.device,
|
||||||
|
"Mandatory keyed sgls are not supported!\n");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out_remove_admin_queue;
|
goto out_remove_admin_queue;
|
||||||
}
|
}
|
||||||
|
@ -270,8 +270,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
|||||||
struct nvme_id_ns *id;
|
struct nvme_id_ns *id;
|
||||||
u16 status = 0;
|
u16 status = 0;
|
||||||
|
|
||||||
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
|
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
|
||||||
if (!ns) {
|
|
||||||
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -279,9 +278,14 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
|||||||
id = kzalloc(sizeof(*id), GFP_KERNEL);
|
id = kzalloc(sizeof(*id), GFP_KERNEL);
|
||||||
if (!id) {
|
if (!id) {
|
||||||
status = NVME_SC_INTERNAL;
|
status = NVME_SC_INTERNAL;
|
||||||
goto out_put_ns;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* return an all zeroed buffer if we can't find an active namespace */
|
||||||
|
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
|
||||||
|
if (!ns)
|
||||||
|
goto done;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* nuse = ncap = nsze isn't always true, but we have no way to find
|
* nuse = ncap = nsze isn't always true, but we have no way to find
|
||||||
* that out from the underlying device.
|
* that out from the underlying device.
|
||||||
@ -306,11 +310,10 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
|||||||
|
|
||||||
id->lbaf[0].ds = ns->blksize_shift;
|
id->lbaf[0].ds = ns->blksize_shift;
|
||||||
|
|
||||||
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
|
|
||||||
|
|
||||||
kfree(id);
|
|
||||||
out_put_ns:
|
|
||||||
nvmet_put_namespace(ns);
|
nvmet_put_namespace(ns);
|
||||||
|
done:
|
||||||
|
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
|
||||||
|
kfree(id);
|
||||||
out:
|
out:
|
||||||
nvmet_req_complete(req, status);
|
nvmet_req_complete(req, status);
|
||||||
}
|
}
|
||||||
|
@ -137,8 +137,10 @@ static ssize_t nvmet_addr_traddr_store(struct config_item *item,
|
|||||||
pr_err("Disable the address before modifying\n");
|
pr_err("Disable the address before modifying\n");
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
}
|
}
|
||||||
return snprintf(port->disc_addr.traddr,
|
|
||||||
sizeof(port->disc_addr.traddr), "%s", page);
|
if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
|
||||||
|
return -EINVAL;
|
||||||
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
CONFIGFS_ATTR(nvmet_, addr_traddr);
|
CONFIGFS_ATTR(nvmet_, addr_traddr);
|
||||||
@ -208,8 +210,10 @@ static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
|
|||||||
pr_err("Disable the address before modifying\n");
|
pr_err("Disable the address before modifying\n");
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
}
|
}
|
||||||
return snprintf(port->disc_addr.trsvcid,
|
|
||||||
sizeof(port->disc_addr.trsvcid), "%s", page);
|
if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
|
||||||
|
return -EINVAL;
|
||||||
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
CONFIGFS_ATTR(nvmet_, addr_trsvcid);
|
CONFIGFS_ATTR(nvmet_, addr_trsvcid);
|
||||||
@ -288,7 +292,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
|
|||||||
kfree(ns->device_path);
|
kfree(ns->device_path);
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
ns->device_path = kstrdup(page, GFP_KERNEL);
|
ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL);
|
||||||
if (!ns->device_path)
|
if (!ns->device_path)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
|
@ -422,6 +422,7 @@ enum {
|
|||||||
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
|
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
|
||||||
extern void bioset_exit(struct bio_set *);
|
extern void bioset_exit(struct bio_set *);
|
||||||
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
|
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
|
||||||
|
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
|
||||||
|
|
||||||
extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
|
extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
|
||||||
extern void bio_put(struct bio *);
|
extern void bio_put(struct bio *);
|
||||||
|
Loading…
Reference in New Issue
Block a user