inet: frags: better deal with smp races
[ Upstream commit 0d5b9311baf27bb545f187f12ecfd558220c607d ] Multiple cpus might attempt to insert a new fragment in rhashtable, if for example RPS is buggy, as reported by 배석진 in https://patchwork.ozlabs.org/patch/994601/ We use rhashtable_lookup_get_insert_key() instead of rhashtable_insert_fast() to let cpus losing the race free their own inet_frag_queue and use the one that was inserted by another cpu. Fixes: 648700f76b03 ("inet: frags: use rhashtables for reassembly units") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: 배석진 <soukjin.bae@samsung.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
262e7c9940
commit
eb18330f4d
|
@ -180,21 +180,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
|
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
|
||||||
void *arg)
|
void *arg,
|
||||||
|
struct inet_frag_queue **prev)
|
||||||
{
|
{
|
||||||
struct inet_frags *f = nf->f;
|
struct inet_frags *f = nf->f;
|
||||||
struct inet_frag_queue *q;
|
struct inet_frag_queue *q;
|
||||||
int err;
|
|
||||||
|
|
||||||
q = inet_frag_alloc(nf, f, arg);
|
q = inet_frag_alloc(nf, f, arg);
|
||||||
if (!q)
|
if (!q) {
|
||||||
|
*prev = ERR_PTR(-ENOMEM);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
mod_timer(&q->timer, jiffies + nf->timeout);
|
mod_timer(&q->timer, jiffies + nf->timeout);
|
||||||
|
|
||||||
err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
|
*prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
|
||||||
f->rhash_params);
|
&q->node, f->rhash_params);
|
||||||
if (err < 0) {
|
if (*prev) {
|
||||||
q->flags |= INET_FRAG_COMPLETE;
|
q->flags |= INET_FRAG_COMPLETE;
|
||||||
inet_frag_kill(q);
|
inet_frag_kill(q);
|
||||||
inet_frag_destroy(q);
|
inet_frag_destroy(q);
|
||||||
|
@ -207,17 +208,18 @@ EXPORT_SYMBOL(inet_frag_create);
|
||||||
/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
|
/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
|
||||||
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
|
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
|
||||||
{
|
{
|
||||||
struct inet_frag_queue *fq;
|
struct inet_frag_queue *fq = NULL, *prev;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
|
prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
|
||||||
if (fq) {
|
if (!prev)
|
||||||
|
fq = inet_frag_create(nf, key, &prev);
|
||||||
|
if (prev && !IS_ERR(prev)) {
|
||||||
|
fq = prev;
|
||||||
if (!atomic_inc_not_zero(&fq->refcnt))
|
if (!atomic_inc_not_zero(&fq->refcnt))
|
||||||
fq = NULL;
|
fq = NULL;
|
||||||
rcu_read_unlock();
|
|
||||||
return fq;
|
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return inet_frag_create(nf, key);
|
return fq;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(inet_frag_find);
|
EXPORT_SYMBOL(inet_frag_find);
|
||||||
|
|
Loading…
Reference in New Issue