blk-throttle: Do the new group initialization with the help of a function
Group initialization code seems to be at two places. root group initialization in blk_throtl_init() and dynamically allocated group in throtl_find_alloc_tg(). Create a common function and use at both the places. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
parent
698567f3fa
commit
a29a171e7c
|
@ -159,6 +159,35 @@ static void throtl_put_tg(struct throtl_grp *tg)
|
||||||
kfree(tg);
|
kfree(tg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void throtl_init_group(struct throtl_grp *tg)
|
||||||
|
{
|
||||||
|
INIT_HLIST_NODE(&tg->tg_node);
|
||||||
|
RB_CLEAR_NODE(&tg->rb_node);
|
||||||
|
bio_list_init(&tg->bio_lists[0]);
|
||||||
|
bio_list_init(&tg->bio_lists[1]);
|
||||||
|
tg->limits_changed = false;
|
||||||
|
|
||||||
|
/* Practically unlimited BW */
|
||||||
|
tg->bps[0] = tg->bps[1] = -1;
|
||||||
|
tg->iops[0] = tg->iops[1] = -1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Take the initial reference that will be released on destroy
|
||||||
|
* This can be thought of a joint reference by cgroup and
|
||||||
|
* request queue which will be dropped by either request queue
|
||||||
|
* exit or cgroup deletion path depending on who is exiting first.
|
||||||
|
*/
|
||||||
|
atomic_set(&tg->ref, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Should be called with rcu read lock held (needed for blkcg) */
|
||||||
|
static void
|
||||||
|
throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
|
||||||
|
{
|
||||||
|
hlist_add_head(&tg->tg_node, &td->tg_list);
|
||||||
|
td->nr_undestroyed_grps++;
|
||||||
|
}
|
||||||
|
|
||||||
static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
|
static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
|
||||||
struct blkio_cgroup *blkcg)
|
struct blkio_cgroup *blkcg)
|
||||||
{
|
{
|
||||||
|
@ -196,19 +225,7 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
|
||||||
if (!tg)
|
if (!tg)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
INIT_HLIST_NODE(&tg->tg_node);
|
throtl_init_group(tg);
|
||||||
RB_CLEAR_NODE(&tg->rb_node);
|
|
||||||
bio_list_init(&tg->bio_lists[0]);
|
|
||||||
bio_list_init(&tg->bio_lists[1]);
|
|
||||||
td->limits_changed = false;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Take the initial reference that will be released on destroy
|
|
||||||
* This can be thought of a joint reference by cgroup and
|
|
||||||
* request queue which will be dropped by either request queue
|
|
||||||
* exit or cgroup deletion path depending on who is exiting first.
|
|
||||||
*/
|
|
||||||
atomic_set(&tg->ref, 1);
|
|
||||||
|
|
||||||
/* Add group onto cgroup list */
|
/* Add group onto cgroup list */
|
||||||
sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
|
sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
|
||||||
|
@ -220,8 +237,7 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
|
||||||
tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
|
tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
|
||||||
tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
|
tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
|
||||||
|
|
||||||
hlist_add_head(&tg->tg_node, &td->tg_list);
|
throtl_add_group_to_td_list(td, tg);
|
||||||
td->nr_undestroyed_grps++;
|
|
||||||
done:
|
done:
|
||||||
return tg;
|
return tg;
|
||||||
}
|
}
|
||||||
|
@ -1060,18 +1076,11 @@ int blk_throtl_init(struct request_queue *q)
|
||||||
INIT_HLIST_HEAD(&td->tg_list);
|
INIT_HLIST_HEAD(&td->tg_list);
|
||||||
td->tg_service_tree = THROTL_RB_ROOT;
|
td->tg_service_tree = THROTL_RB_ROOT;
|
||||||
td->limits_changed = false;
|
td->limits_changed = false;
|
||||||
|
INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
|
||||||
|
|
||||||
/* Init root group */
|
/* Init root group */
|
||||||
tg = &td->root_tg;
|
tg = &td->root_tg;
|
||||||
INIT_HLIST_NODE(&tg->tg_node);
|
throtl_init_group(tg);
|
||||||
RB_CLEAR_NODE(&tg->rb_node);
|
|
||||||
bio_list_init(&tg->bio_lists[0]);
|
|
||||||
bio_list_init(&tg->bio_lists[1]);
|
|
||||||
|
|
||||||
/* Practically unlimited BW */
|
|
||||||
tg->bps[0] = tg->bps[1] = -1;
|
|
||||||
tg->iops[0] = tg->iops[1] = -1;
|
|
||||||
td->limits_changed = false;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set root group reference to 2. One reference will be dropped when
|
* Set root group reference to 2. One reference will be dropped when
|
||||||
|
@ -1080,16 +1089,13 @@ int blk_throtl_init(struct request_queue *q)
|
||||||
* as it is statically allocated and gets destroyed when throtl_data
|
* as it is statically allocated and gets destroyed when throtl_data
|
||||||
* goes away.
|
* goes away.
|
||||||
*/
|
*/
|
||||||
atomic_set(&tg->ref, 2);
|
atomic_inc(&tg->ref);
|
||||||
hlist_add_head(&tg->tg_node, &td->tg_list);
|
|
||||||
td->nr_undestroyed_grps++;
|
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td,
|
blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td,
|
||||||
0, BLKIO_POLICY_THROTL);
|
0, BLKIO_POLICY_THROTL);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
throtl_add_group_to_td_list(td, tg);
|
||||||
|
|
||||||
/* Attach throtl data to request queue */
|
/* Attach throtl data to request queue */
|
||||||
td->queue = q;
|
td->queue = q;
|
||||||
|
|
Loading…
Reference in New Issue