sched: Avoid dereferencing skb pointer after child enqueue
Parent qdiscs may dereference the pointer to the enqueued skb after enqueue. However, both CAKE and TBF call consume_skb() on the original skb when splitting GSO packets, leading to a potential use-after-free in the parent. Fix this by avoiding dereferencing the skb pointer after enqueueing to the child. Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
80b3671e93
commit
f6bab19931
@@ -88,13 +88,14 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
struct Qdisc *child,
|
struct Qdisc *child,
|
||||||
struct sk_buff **to_free)
|
struct sk_buff **to_free)
|
||||||
{
|
{
|
||||||
|
unsigned int len = qdisc_pkt_len(skb);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = child->ops->enqueue(skb, child, to_free);
|
err = child->ops->enqueue(skb, child, to_free);
|
||||||
if (err != NET_XMIT_SUCCESS)
|
if (err != NET_XMIT_SUCCESS)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
qdisc_qstats_backlog_inc(sch, skb);
|
sch->qstats.backlog += len;
|
||||||
sch->q.qlen++;
|
sch->q.qlen++;
|
||||||
|
|
||||||
return NET_XMIT_SUCCESS;
|
return NET_XMIT_SUCCESS;
|
||||||
|
|||||||
@@ -350,6 +350,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||||
struct sk_buff **to_free)
|
struct sk_buff **to_free)
|
||||||
{
|
{
|
||||||
|
unsigned int len = qdisc_pkt_len(skb);
|
||||||
struct drr_sched *q = qdisc_priv(sch);
|
struct drr_sched *q = qdisc_priv(sch);
|
||||||
struct drr_class *cl;
|
struct drr_class *cl;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
@@ -376,7 +377,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
cl->deficit = cl->quantum;
|
cl->deficit = cl->quantum;
|
||||||
}
|
}
|
||||||
|
|
||||||
qdisc_qstats_backlog_inc(sch, skb);
|
sch->qstats.backlog += len;
|
||||||
sch->q.qlen++;
|
sch->q.qlen++;
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -199,6 +199,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
|
|||||||
static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||||
struct sk_buff **to_free)
|
struct sk_buff **to_free)
|
||||||
{
|
{
|
||||||
|
unsigned int len = qdisc_pkt_len(skb);
|
||||||
struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@@ -271,7 +272,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
qdisc_qstats_backlog_inc(sch, skb);
|
sch->qstats.backlog += len;
|
||||||
sch->q.qlen++;
|
sch->q.qlen++;
|
||||||
|
|
||||||
return NET_XMIT_SUCCESS;
|
return NET_XMIT_SUCCESS;
|
||||||
|
|||||||
@@ -1539,6 +1539,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
|
|||||||
static int
|
static int
|
||||||
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||||
{
|
{
|
||||||
|
unsigned int len = qdisc_pkt_len(skb);
|
||||||
struct hfsc_class *cl;
|
struct hfsc_class *cl;
|
||||||
int uninitialized_var(err);
|
int uninitialized_var(err);
|
||||||
|
|
||||||
@@ -1560,8 +1561,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (cl->qdisc->q.qlen == 1) {
|
if (cl->qdisc->q.qlen == 1) {
|
||||||
unsigned int len = qdisc_pkt_len(skb);
|
|
||||||
|
|
||||||
if (cl->cl_flags & HFSC_RSC)
|
if (cl->cl_flags & HFSC_RSC)
|
||||||
init_ed(cl, len);
|
init_ed(cl, len);
|
||||||
if (cl->cl_flags & HFSC_FSC)
|
if (cl->cl_flags & HFSC_FSC)
|
||||||
@@ -1576,7 +1575,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
qdisc_qstats_backlog_inc(sch, skb);
|
sch->qstats.backlog += len;
|
||||||
sch->q.qlen++;
|
sch->q.qlen++;
|
||||||
|
|
||||||
return NET_XMIT_SUCCESS;
|
return NET_XMIT_SUCCESS;
|
||||||
|
|||||||
@@ -581,6 +581,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
struct sk_buff **to_free)
|
struct sk_buff **to_free)
|
||||||
{
|
{
|
||||||
int uninitialized_var(ret);
|
int uninitialized_var(ret);
|
||||||
|
unsigned int len = qdisc_pkt_len(skb);
|
||||||
struct htb_sched *q = qdisc_priv(sch);
|
struct htb_sched *q = qdisc_priv(sch);
|
||||||
struct htb_class *cl = htb_classify(skb, sch, &ret);
|
struct htb_class *cl = htb_classify(skb, sch, &ret);
|
||||||
|
|
||||||
@@ -610,7 +611,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
htb_activate(q, cl);
|
htb_activate(q, cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
qdisc_qstats_backlog_inc(sch, skb);
|
sch->qstats.backlog += len;
|
||||||
sch->q.qlen++;
|
sch->q.qlen++;
|
||||||
return NET_XMIT_SUCCESS;
|
return NET_XMIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -72,6 +72,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||||||
static int
|
static int
|
||||||
prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||||
{
|
{
|
||||||
|
unsigned int len = qdisc_pkt_len(skb);
|
||||||
struct Qdisc *qdisc;
|
struct Qdisc *qdisc;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@@ -88,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
|||||||
|
|
||||||
ret = qdisc_enqueue(skb, qdisc, to_free);
|
ret = qdisc_enqueue(skb, qdisc, to_free);
|
||||||
if (ret == NET_XMIT_SUCCESS) {
|
if (ret == NET_XMIT_SUCCESS) {
|
||||||
qdisc_qstats_backlog_inc(sch, skb);
|
sch->qstats.backlog += len;
|
||||||
sch->q.qlen++;
|
sch->q.qlen++;
|
||||||
return NET_XMIT_SUCCESS;
|
return NET_XMIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1210,6 +1210,7 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
|
|||||||
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||||
struct sk_buff **to_free)
|
struct sk_buff **to_free)
|
||||||
{
|
{
|
||||||
|
unsigned int len = qdisc_pkt_len(skb), gso_segs;
|
||||||
struct qfq_sched *q = qdisc_priv(sch);
|
struct qfq_sched *q = qdisc_priv(sch);
|
||||||
struct qfq_class *cl;
|
struct qfq_class *cl;
|
||||||
struct qfq_aggregate *agg;
|
struct qfq_aggregate *agg;
|
||||||
@@ -1224,17 +1225,17 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
}
|
}
|
||||||
pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
|
pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
|
||||||
|
|
||||||
if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) {
|
if (unlikely(cl->agg->lmax < len)) {
|
||||||
pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
|
pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
|
||||||
cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
|
cl->agg->lmax, len, cl->common.classid);
|
||||||
err = qfq_change_agg(sch, cl, cl->agg->class_weight,
|
err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
|
||||||
qdisc_pkt_len(skb));
|
|
||||||
if (err) {
|
if (err) {
|
||||||
cl->qstats.drops++;
|
cl->qstats.drops++;
|
||||||
return qdisc_drop(skb, sch, to_free);
|
return qdisc_drop(skb, sch, to_free);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
|
||||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||||
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
|
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
|
||||||
@@ -1245,8 +1246,9 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
bstats_update(&cl->bstats, skb);
|
cl->bstats.bytes += len;
|
||||||
qdisc_qstats_backlog_inc(sch, skb);
|
cl->bstats.packets += gso_segs;
|
||||||
|
sch->qstats.backlog += len;
|
||||||
++sch->q.qlen;
|
++sch->q.qlen;
|
||||||
|
|
||||||
agg = cl->agg;
|
agg = cl->agg;
|
||||||
@@ -1254,7 +1256,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
if (cl->qdisc->q.qlen != 1) {
|
if (cl->qdisc->q.qlen != 1) {
|
||||||
if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
|
if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
|
||||||
list_first_entry(&agg->active, struct qfq_class, alist)
|
list_first_entry(&agg->active, struct qfq_class, alist)
|
||||||
== cl && cl->deficit < qdisc_pkt_len(skb))
|
== cl && cl->deficit < len)
|
||||||
list_move_tail(&cl->alist, &agg->active);
|
list_move_tail(&cl->alist, &agg->active);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|||||||
@@ -185,6 +185,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
struct sk_buff **to_free)
|
struct sk_buff **to_free)
|
||||||
{
|
{
|
||||||
struct tbf_sched_data *q = qdisc_priv(sch);
|
struct tbf_sched_data *q = qdisc_priv(sch);
|
||||||
|
unsigned int len = qdisc_pkt_len(skb);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (qdisc_pkt_len(skb) > q->max_size) {
|
if (qdisc_pkt_len(skb) > q->max_size) {
|
||||||
@@ -200,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
qdisc_qstats_backlog_inc(sch, skb);
|
sch->qstats.backlog += len;
|
||||||
sch->q.qlen++;
|
sch->q.qlen++;
|
||||||
return NET_XMIT_SUCCESS;
|
return NET_XMIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user