mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 17:12:06 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client
Pull ceph fixes from Sage Weil: "There is an RBD fix for a crash due to the immutable bio changes, an error path fix, and a locking fix in the recent redirect support" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client: libceph: do not dereference a NULL bio pointer libceph: take map_sem for read in handle_reply() libceph: factor out logic from ceph_osdc_start_request() libceph: fix error handling in ceph_osdc_init()
This commit is contained in:
commit
41f76d8bee
@ -840,9 +840,13 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
|
||||
|
||||
if (!cursor->bvec_iter.bi_size) {
|
||||
bio = bio->bi_next;
|
||||
cursor->bvec_iter = bio->bi_iter;
|
||||
cursor->bio = bio;
|
||||
if (bio)
|
||||
cursor->bvec_iter = bio->bi_iter;
|
||||
else
|
||||
memset(&cursor->bvec_iter, 0,
|
||||
sizeof(cursor->bvec_iter));
|
||||
}
|
||||
cursor->bio = bio;
|
||||
|
||||
if (!cursor->last_piece) {
|
||||
BUG_ON(!cursor->resid);
|
||||
|
@ -1426,6 +1426,40 @@ static void __send_queued(struct ceph_osd_client *osdc)
|
||||
__send_request(osdc, req);
|
||||
}
|
||||
|
||||
/*
|
||||
* Caller should hold map_sem for read and request_mutex.
|
||||
*/
|
||||
static int __ceph_osdc_start_request(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_request *req,
|
||||
bool nofail)
|
||||
{
|
||||
int rc;
|
||||
|
||||
__register_request(osdc, req);
|
||||
req->r_sent = 0;
|
||||
req->r_got_reply = 0;
|
||||
rc = __map_request(osdc, req, 0);
|
||||
if (rc < 0) {
|
||||
if (nofail) {
|
||||
dout("osdc_start_request failed map, "
|
||||
" will retry %lld\n", req->r_tid);
|
||||
rc = 0;
|
||||
} else {
|
||||
__unregister_request(osdc, req);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (req->r_osd == NULL) {
|
||||
dout("send_request %p no up osds in pg\n", req);
|
||||
ceph_monc_request_next_osdmap(&osdc->client->monc);
|
||||
} else {
|
||||
__send_queued(osdc);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Timeout callback, called every N seconds when 1 or more osd
|
||||
* requests has been active for more than N seconds. When this
|
||||
@ -1653,6 +1687,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
|
||||
osdmap_epoch = ceph_decode_32(&p);
|
||||
|
||||
/* lookup */
|
||||
down_read(&osdc->map_sem);
|
||||
mutex_lock(&osdc->request_mutex);
|
||||
req = __lookup_request(osdc, tid);
|
||||
if (req == NULL) {
|
||||
@ -1709,7 +1744,6 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
|
||||
dout("redirect pool %lld\n", redir.oloc.pool);
|
||||
|
||||
__unregister_request(osdc, req);
|
||||
mutex_unlock(&osdc->request_mutex);
|
||||
|
||||
req->r_target_oloc = redir.oloc; /* struct */
|
||||
|
||||
@ -1721,10 +1755,10 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
|
||||
* successfully. In the future we might want to follow
|
||||
* original request's nofail setting here.
|
||||
*/
|
||||
err = ceph_osdc_start_request(osdc, req, true);
|
||||
err = __ceph_osdc_start_request(osdc, req, true);
|
||||
BUG_ON(err);
|
||||
|
||||
goto done;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
already_completed = req->r_got_reply;
|
||||
@ -1742,8 +1776,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
|
||||
req->r_got_reply = 1;
|
||||
} else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
|
||||
dout("handle_reply tid %llu dup ack\n", tid);
|
||||
mutex_unlock(&osdc->request_mutex);
|
||||
goto done;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
dout("handle_reply tid %llu flags %d\n", tid, flags);
|
||||
@ -1758,6 +1791,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
|
||||
__unregister_request(osdc, req);
|
||||
|
||||
mutex_unlock(&osdc->request_mutex);
|
||||
up_read(&osdc->map_sem);
|
||||
|
||||
if (!already_completed) {
|
||||
if (req->r_unsafe_callback &&
|
||||
@ -1775,10 +1809,14 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
|
||||
complete_request(req);
|
||||
}
|
||||
|
||||
done:
|
||||
out:
|
||||
dout("req=%p req->r_linger=%d\n", req, req->r_linger);
|
||||
ceph_osdc_put_request(req);
|
||||
return;
|
||||
out_unlock:
|
||||
mutex_unlock(&osdc->request_mutex);
|
||||
up_read(&osdc->map_sem);
|
||||
goto out;
|
||||
|
||||
bad_put:
|
||||
req->r_result = -EIO;
|
||||
@ -1791,6 +1829,7 @@ bad_put:
|
||||
ceph_osdc_put_request(req);
|
||||
bad_mutex:
|
||||
mutex_unlock(&osdc->request_mutex);
|
||||
up_read(&osdc->map_sem);
|
||||
bad:
|
||||
pr_err("corrupt osd_op_reply got %d %d\n",
|
||||
(int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
|
||||
@ -2351,34 +2390,16 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_request *req,
|
||||
bool nofail)
|
||||
{
|
||||
int rc = 0;
|
||||
int rc;
|
||||
|
||||
down_read(&osdc->map_sem);
|
||||
mutex_lock(&osdc->request_mutex);
|
||||
__register_request(osdc, req);
|
||||
req->r_sent = 0;
|
||||
req->r_got_reply = 0;
|
||||
rc = __map_request(osdc, req, 0);
|
||||
if (rc < 0) {
|
||||
if (nofail) {
|
||||
dout("osdc_start_request failed map, "
|
||||
" will retry %lld\n", req->r_tid);
|
||||
rc = 0;
|
||||
} else {
|
||||
__unregister_request(osdc, req);
|
||||
}
|
||||
goto out_unlock;
|
||||
}
|
||||
if (req->r_osd == NULL) {
|
||||
dout("send_request %p no up osds in pg\n", req);
|
||||
ceph_monc_request_next_osdmap(&osdc->client->monc);
|
||||
} else {
|
||||
__send_queued(osdc);
|
||||
}
|
||||
rc = 0;
|
||||
out_unlock:
|
||||
|
||||
rc = __ceph_osdc_start_request(osdc, req, nofail);
|
||||
|
||||
mutex_unlock(&osdc->request_mutex);
|
||||
up_read(&osdc->map_sem);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_osdc_start_request);
|
||||
@ -2504,9 +2525,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
|
||||
err = -ENOMEM;
|
||||
osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
|
||||
if (!osdc->notify_wq)
|
||||
goto out_msgpool;
|
||||
goto out_msgpool_reply;
|
||||
|
||||
return 0;
|
||||
|
||||
out_msgpool_reply:
|
||||
ceph_msgpool_destroy(&osdc->msgpool_op_reply);
|
||||
out_msgpool:
|
||||
ceph_msgpool_destroy(&osdc->msgpool_op);
|
||||
out_mempool:
|
||||
|
Loading…
Reference in New Issue
Block a user