summaryrefslogtreecommitdiff
path: root/sys-kernel
diff options
context:
space:
mode:
authorV3n3RiX <venerix@redcorelinux.org>2018-11-04 02:42:59 +0000
committerV3n3RiX <venerix@redcorelinux.org>2018-11-04 02:42:59 +0000
commit2a64b546a0efd4bb7f5907db9a02a03c2bb045ed (patch)
tree3c30a3f683aae9303c7c5bc133dbce6eaeeca746 /sys-kernel
parent6afcc60d32a78331bc2e5fed072b042fb77bd17c (diff)
sys-kernel/linux-{image,sources}-redcore-lts :
* version bump to v4.14.75 * adjust MuQSS v0.162 CPU scheduler to linux-hardened and enable it by default * add BFQ-SQ v8r12 I/O scheduler and enable it by default * lower HZ to 100 (recommended value for MuQSS enabled kernels) * disable dynamic ticks (MuQSS itself is a tickless scheduler, so no longer required)
Diffstat (limited to 'sys-kernel')
-rw-r--r--sys-kernel/linux-image-redcore-lts/Manifest2
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0001-BFQ-v8r12-20171108.patch25199
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch9571
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0002-BFQ-v8r12-20180404.patch4611
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0002-Make-preemptible-kernel-default.patch733
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0003-Expose-vmsplit-for-our-poor-32-bit-users.patch48
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0004-Create-highres-timeout-variants-of-schedule_timeout-.patch153
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch50
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0006-Convert-msleep-to-use-hrtimers-when-active.patch54
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch529
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch311
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch160
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch69
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch136
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch81
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch61
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0014-Swap-sucks.patch25
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0015-Enable-BFQ-io-scheduler-by-default.patch38
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch19
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/0016-unfuck-MuQSS-on-linux-4_14_15+.patch48
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/redcore-lts-amd64.config41
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/uksm-linux-hardened.patch (renamed from sys-kernel/linux-image-redcore-lts/files/uksm-for-linux-hardened.patch)0
-rw-r--r--sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.14.75.ebuild (renamed from sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.14.65.ebuild)25
-rw-r--r--sys-kernel/linux-sources-redcore-lts/Manifest2
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0001-BFQ-v8r12-20171108.patch25199
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch9571
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0002-BFQ-v8r12-20180404.patch4611
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0002-Make-preemptible-kernel-default.patch733
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0003-Expose-vmsplit-for-our-poor-32-bit-users.patch48
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0004-Create-highres-timeout-variants-of-schedule_timeout-.patch153
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch50
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0006-Convert-msleep-to-use-hrtimers-when-active.patch54
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch529
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch311
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch160
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch69
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch136
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch81
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch61
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0014-Swap-sucks.patch25
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0015-Enable-BFQ-io-scheduler-by-default.patch38
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch19
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/0016-unfuck-MuQSS-on-linux-4_14_15+.patch48
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/redcore-lts-amd64.config41
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/uksm-linux-hardened.patch (renamed from sys-kernel/linux-sources-redcore-lts/files/uksm-for-linux-hardened.patch)0
-rw-r--r--sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.14.75.ebuild (renamed from sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.14.65.ebuild)22
46 files changed, 83801 insertions, 124 deletions
diff --git a/sys-kernel/linux-image-redcore-lts/Manifest b/sys-kernel/linux-image-redcore-lts/Manifest
index 17b5b0b2..48f87643 100644
--- a/sys-kernel/linux-image-redcore-lts/Manifest
+++ b/sys-kernel/linux-image-redcore-lts/Manifest
@@ -1 +1 @@
-DIST linux-4.14.65.tar.xz 100977596 BLAKE2B 1864dadfbdd4cf2e8c89c196291e04a680f06f9916a792bc6f2c22e9b74e512f6475a7dbfb70c81882841583e726466c0f7ff6995d3e78d6334a71b4cef06303 SHA512 162382b3567ba256a1caac7b9c0e2188484ae22d8731c2627ab0faa471ac35ca6578e0f0428c17d63d14f53316b7701a0e9c7a99b1bc749ddd6ab408f10c2185
+DIST linux-4.14.75.tar.xz 100992748 BLAKE2B febb717f667f380b4c39a06c0bb522181dc7f16fd21e86794589cef8b4de1b064c216e5e51aa6b4bfb2deead6263b76ecce3bfc480126bdf9840d17c9ba590b9 SHA512 d6d75a89fd0aed92d3dae4e651273a5b2fec242e49ba6fd71cf642c32e346fb6be083b3c9d1f77fc6ded9531d9f1efd82041f28b12f71eaf2c53d16c071e6703
diff --git a/sys-kernel/linux-image-redcore-lts/files/0001-BFQ-v8r12-20171108.patch b/sys-kernel/linux-image-redcore-lts/files/0001-BFQ-v8r12-20171108.patch
new file mode 100644
index 00000000..db7d064b
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0001-BFQ-v8r12-20171108.patch
@@ -0,0 +1,25199 @@
+From c21f53f17430230dab50df29b8ea1b71f99d09d6 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@unimore.it>
+Date: Tue, 7 Apr 2015 13:39:12 +0200
+Subject: [PATCH 01/51] Add BFQ-v8r12
+
+This commit is the result of the following operations.
+
+1. The squash of all the commits between "block: cgroups, kconfig,
+build bits for BFQ-v7r11-4.5.0" and BFQ-v8r12 in the branch
+bfq-mq-v8-v4.11
+
+2. The renaming of two files (block/bfq-cgroup.c ->
+block/bfq-cgroup-included.c and block/bfq-iosched.c ->
+block/bfq-sq-iosched.c) and of one option (CONFIG_BFQ_GROUP_IOSCHED ->
+CONFIG_BFQ_SQ_GROUP_IOSCHED), to avoid name clashes. These name
+clashes are due to the presence of bfq in mainline from 4.12.
+
+3. The modification of block/Makefile and block/Kconfig.iosched to
+comply with the above renaming.
+
+Signed-off-by: Mauro Andreolini <mauro.andreolini@unimore.it>
+Signed-off-by: Arianna Avanzini <avanzini@google.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ Makefile | 2 +-
+ block/Kconfig.iosched | 31 +
+ block/bfq-cgroup-included.c | 1190 ++++++++++
+ block/bfq-ioc.c | 36 +
+ block/bfq-sched.c | 2002 ++++++++++++++++
+ block/bfq-sq-iosched.c | 5379 +++++++++++++++++++++++++++++++++++++++++++
+ block/bfq.h | 948 ++++++++
+ include/linux/blkdev.h | 2 +-
+ 9 files changed, 9589 insertions(+), 2 deletions(-)
+ create mode 100644 block/bfq-cgroup-included.c
+ create mode 100644 block/bfq-ioc.c
+ create mode 100644 block/bfq-sched.c
+ create mode 100644 block/bfq-sq-iosched.c
+ create mode 100644 block/bfq.h
+
+diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
+index a4a8914bf7a4..9e3f4c2f7390 100644
+--- a/block/Kconfig.iosched
++++ b/block/Kconfig.iosched
+@@ -40,6 +40,26 @@ config CFQ_GROUP_IOSCHED
+ ---help---
+ Enable group IO scheduling in CFQ.
+
++config IOSCHED_BFQ_SQ
++ tristate "BFQ-SQ I/O scheduler"
++ default n
++ ---help---
++ The BFQ-SQ I/O scheduler (for legacy blk: SQ stands for
++ SingleQueue) distributes bandwidth among all processes
++ according to their weights, regardless of the device
++ parameters and with any workload. It also guarantees a low
++ latency to interactive and soft real-time applications.
++ Details in Documentation/block/bfq-iosched.txt
++
++config BFQ_SQ_GROUP_IOSCHED
++ bool "BFQ-SQ hierarchical scheduling support"
++ depends on IOSCHED_BFQ_SQ && BLK_CGROUP
++ default n
++ ---help---
++
++ Enable hierarchical scheduling in BFQ-SQ, using the blkio
++ (cgroups-v1) or io (cgroups-v2) controller.
++
+ choice
+
+ prompt "Default I/O scheduler"
+@@ -54,6 +74,16 @@ choice
+ config DEFAULT_CFQ
+ bool "CFQ" if IOSCHED_CFQ=y
+
++ config DEFAULT_BFQ_SQ
++ bool "BFQ-SQ" if IOSCHED_BFQ_SQ=y
++ help
++ Selects BFQ-SQ as the default I/O scheduler which will be
++ used by default for all block devices.
++ The BFQ-SQ I/O scheduler aims at distributing the bandwidth
++ as desired, independently of the disk parameters and with
++ any workload. It also tries to guarantee low latency to
++ interactive and soft real-time applications.
++
+ config DEFAULT_NOOP
+ bool "No-op"
+
+@@ -63,6 +93,7 @@ config DEFAULT_IOSCHED
+ string
+ default "deadline" if DEFAULT_DEADLINE
+ default "cfq" if DEFAULT_CFQ
++ default "bfq-sq" if DEFAULT_BFQ_SQ
+ default "noop" if DEFAULT_NOOP
+
+ config MQ_IOSCHED_DEADLINE
+diff --git a/block/Makefile b/block/Makefile
+index 6a56303b9925..59026b425791 100644
+--- a/block/Makefile
++++ b/block/Makefile
+@@ -24,6 +24,7 @@ obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o
+ obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
+ bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
+ obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
++obj-$(CONFIG_IOSCHED_BFQ_SQ) += bfq-sq-iosched.o
+
+ obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
+ obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+new file mode 100644
+index 000000000000..af7c216a3540
+--- /dev/null
++++ b/block/bfq-cgroup-included.c
+@@ -0,0 +1,1190 @@
++/*
++ * BFQ: CGROUPS support.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2016 Paolo Valente <paolo.valente@linaro.org>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ */
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++
++/* bfqg stats flags */
++enum bfqg_stats_flags {
++ BFQG_stats_waiting = 0,
++ BFQG_stats_idling,
++ BFQG_stats_empty,
++};
++
++#define BFQG_FLAG_FNS(name) \
++static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
++{ \
++ stats->flags |= (1 << BFQG_stats_##name); \
++} \
++static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
++{ \
++ stats->flags &= ~(1 << BFQG_stats_##name); \
++} \
++static int bfqg_stats_##name(struct bfqg_stats *stats) \
++{ \
++ return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
++} \
++
++BFQG_FLAG_FNS(waiting)
++BFQG_FLAG_FNS(idling)
++BFQG_FLAG_FNS(empty)
++#undef BFQG_FLAG_FNS
++
++/* This should be called with the queue_lock held. */
++static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
++{
++ unsigned long long now;
++
++ if (!bfqg_stats_waiting(stats))
++ return;
++
++ now = sched_clock();
++ if (time_after64(now, stats->start_group_wait_time))
++ blkg_stat_add(&stats->group_wait_time,
++ now - stats->start_group_wait_time);
++ bfqg_stats_clear_waiting(stats);
++}
++
++/* This should be called with the queue_lock held. */
++static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
++ struct bfq_group *curr_bfqg)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++
++ if (bfqg_stats_waiting(stats))
++ return;
++ if (bfqg == curr_bfqg)
++ return;
++ stats->start_group_wait_time = sched_clock();
++ bfqg_stats_mark_waiting(stats);
++}
++
++/* This should be called with the queue_lock held. */
++static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
++{
++ unsigned long long now;
++
++ if (!bfqg_stats_empty(stats))
++ return;
++
++ now = sched_clock();
++ if (time_after64(now, stats->start_empty_time))
++ blkg_stat_add(&stats->empty_time,
++ now - stats->start_empty_time);
++ bfqg_stats_clear_empty(stats);
++}
++
++static void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
++{
++ blkg_stat_add(&bfqg->stats.dequeue, 1);
++}
++
++static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++
++ if (blkg_rwstat_total(&stats->queued))
++ return;
++
++ /*
++ * group is already marked empty. This can happen if bfqq got new
++ * request in parent group and moved to this group while being added
++ * to service tree. Just ignore the event and move on.
++ */
++ if (bfqg_stats_empty(stats))
++ return;
++
++ stats->start_empty_time = sched_clock();
++ bfqg_stats_mark_empty(stats);
++}
++
++static void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++
++ if (bfqg_stats_idling(stats)) {
++ unsigned long long now = sched_clock();
++
++ if (time_after64(now, stats->start_idle_time))
++ blkg_stat_add(&stats->idle_time,
++ now - stats->start_idle_time);
++ bfqg_stats_clear_idling(stats);
++ }
++}
++
++static void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++
++ stats->start_idle_time = sched_clock();
++ bfqg_stats_mark_idling(stats);
++}
++
++static void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++
++ blkg_stat_add(&stats->avg_queue_size_sum,
++ blkg_rwstat_total(&stats->queued));
++ blkg_stat_add(&stats->avg_queue_size_samples, 1);
++ bfqg_stats_update_group_wait_time(stats);
++}
++
++static struct blkcg_policy blkcg_policy_bfq;
++
++/*
++ * blk-cgroup policy-related handlers
++ * The following functions help in converting between blk-cgroup
++ * internal structures and BFQ-specific structures.
++ */
++
++static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
++{
++ return pd ? container_of(pd, struct bfq_group, pd) : NULL;
++}
++
++static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
++{
++ return pd_to_blkg(&bfqg->pd);
++}
++
++static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
++{
++ struct blkg_policy_data *pd = blkg_to_pd(blkg, &blkcg_policy_bfq);
++
++ return pd_to_bfqg(pd);
++}
++
++/*
++ * bfq_group handlers
++ * The following functions help in navigating the bfq_group hierarchy
++ * by allowing to find the parent of a bfq_group or the bfq_group
++ * associated to a bfq_queue.
++ */
++
++static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
++{
++ struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
++
++ return pblkg ? blkg_to_bfqg(pblkg) : NULL;
++}
++
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *group_entity = bfqq->entity.parent;
++
++ return group_entity ? container_of(group_entity, struct bfq_group,
++ entity) :
++ bfqq->bfqd->root_group;
++}
++
++/*
++ * The following two functions handle get and put of a bfq_group by
++ * wrapping the related blk-cgroup hooks.
++ */
++
++static void bfqg_get(struct bfq_group *bfqg)
++{
++ return blkg_get(bfqg_to_blkg(bfqg));
++}
++
++static void bfqg_put(struct bfq_group *bfqg)
++{
++ return blkg_put(bfqg_to_blkg(bfqg));
++}
++
++static void bfqg_stats_update_io_add(struct bfq_group *bfqg,
++ struct bfq_queue *bfqq,
++ unsigned int op)
++{
++ blkg_rwstat_add(&bfqg->stats.queued, op, 1);
++ bfqg_stats_end_empty_time(&bfqg->stats);
++ if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
++ bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
++}
++
++static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
++{
++ blkg_rwstat_add(&bfqg->stats.queued, op, -1);
++}
++
++static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
++{
++ blkg_rwstat_add(&bfqg->stats.merged, op, 1);
++}
++
++static void bfqg_stats_update_completion(struct bfq_group *bfqg,
++ uint64_t start_time, uint64_t io_start_time,
++ unsigned int op)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++ unsigned long long now = sched_clock();
++
++ if (time_after64(now, io_start_time))
++ blkg_rwstat_add(&stats->service_time, op,
++ now - io_start_time);
++ if (time_after64(io_start_time, start_time))
++ blkg_rwstat_add(&stats->wait_time, op,
++ io_start_time - start_time);
++}
++
++/* @stats = 0 */
++static void bfqg_stats_reset(struct bfqg_stats *stats)
++{
++ /* queued stats shouldn't be cleared */
++ blkg_rwstat_reset(&stats->merged);
++ blkg_rwstat_reset(&stats->service_time);
++ blkg_rwstat_reset(&stats->wait_time);
++ blkg_stat_reset(&stats->time);
++ blkg_stat_reset(&stats->avg_queue_size_sum);
++ blkg_stat_reset(&stats->avg_queue_size_samples);
++ blkg_stat_reset(&stats->dequeue);
++ blkg_stat_reset(&stats->group_wait_time);
++ blkg_stat_reset(&stats->idle_time);
++ blkg_stat_reset(&stats->empty_time);
++}
++
++/* @to += @from */
++static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
++{
++ if (!to || !from)
++ return;
++
++ /* queued stats shouldn't be cleared */
++ blkg_rwstat_add_aux(&to->merged, &from->merged);
++ blkg_rwstat_add_aux(&to->service_time, &from->service_time);
++ blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
++ blkg_stat_add_aux(&from->time, &from->time);
++ blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
++ blkg_stat_add_aux(&to->avg_queue_size_samples,
++ &from->avg_queue_size_samples);
++ blkg_stat_add_aux(&to->dequeue, &from->dequeue);
++ blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
++ blkg_stat_add_aux(&to->idle_time, &from->idle_time);
++ blkg_stat_add_aux(&to->empty_time, &from->empty_time);
++}
++
++/*
++ * Transfer @bfqg's stats to its parent's dead_stats so that the ancestors'
++ * recursive stats can still account for the amount used by this bfqg after
++ * it's gone.
++ */
++static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
++{
++ struct bfq_group *parent;
++
++ if (!bfqg) /* root_group */
++ return;
++
++ parent = bfqg_parent(bfqg);
++
++ lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
++
++ if (unlikely(!parent))
++ return;
++
++ bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
++ bfqg_stats_reset(&bfqg->stats);
++}
++
++static void bfq_init_entity(struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ entity->weight = entity->new_weight;
++ entity->orig_weight = entity->new_weight;
++ if (bfqq) {
++ bfqq->ioprio = bfqq->new_ioprio;
++ bfqq->ioprio_class = bfqq->new_ioprio_class;
++ bfqg_get(bfqg);
++ }
++ entity->parent = bfqg->my_entity; /* NULL for root group */
++ entity->sched_data = &bfqg->sched_data;
++}
++
++static void bfqg_stats_exit(struct bfqg_stats *stats)
++{
++ blkg_rwstat_exit(&stats->merged);
++ blkg_rwstat_exit(&stats->service_time);
++ blkg_rwstat_exit(&stats->wait_time);
++ blkg_rwstat_exit(&stats->queued);
++ blkg_stat_exit(&stats->time);
++ blkg_stat_exit(&stats->avg_queue_size_sum);
++ blkg_stat_exit(&stats->avg_queue_size_samples);
++ blkg_stat_exit(&stats->dequeue);
++ blkg_stat_exit(&stats->group_wait_time);
++ blkg_stat_exit(&stats->idle_time);
++ blkg_stat_exit(&stats->empty_time);
++}
++
++static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
++{
++ if (blkg_rwstat_init(&stats->merged, gfp) ||
++ blkg_rwstat_init(&stats->service_time, gfp) ||
++ blkg_rwstat_init(&stats->wait_time, gfp) ||
++ blkg_rwstat_init(&stats->queued, gfp) ||
++ blkg_stat_init(&stats->time, gfp) ||
++ blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
++ blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
++ blkg_stat_init(&stats->dequeue, gfp) ||
++ blkg_stat_init(&stats->group_wait_time, gfp) ||
++ blkg_stat_init(&stats->idle_time, gfp) ||
++ blkg_stat_init(&stats->empty_time, gfp)) {
++ bfqg_stats_exit(stats);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
++{
++ return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
++}
++
++static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
++{
++ return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
++}
++
++static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
++{
++ struct bfq_group_data *bgd;
++
++ bgd = kzalloc(sizeof(*bgd), gfp);
++ if (!bgd)
++ return NULL;
++ return &bgd->pd;
++}
++
++static void bfq_cpd_init(struct blkcg_policy_data *cpd)
++{
++ struct bfq_group_data *d = cpd_to_bfqgd(cpd);
++
++ d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
++ CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
++}
++
++static void bfq_cpd_free(struct blkcg_policy_data *cpd)
++{
++ kfree(cpd_to_bfqgd(cpd));
++}
++
++static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
++{
++ struct bfq_group *bfqg;
++
++ bfqg = kzalloc_node(sizeof(*bfqg), gfp, node);
++ if (!bfqg)
++ return NULL;
++
++ if (bfqg_stats_init(&bfqg->stats, gfp)) {
++ kfree(bfqg);
++ return NULL;
++ }
++
++ return &bfqg->pd;
++}
++
++static void bfq_pd_init(struct blkg_policy_data *pd)
++{
++ struct blkcg_gq *blkg;
++ struct bfq_group *bfqg;
++ struct bfq_data *bfqd;
++ struct bfq_entity *entity;
++ struct bfq_group_data *d;
++
++ blkg = pd_to_blkg(pd);
++ BUG_ON(!blkg);
++ bfqg = blkg_to_bfqg(blkg);
++ bfqd = blkg->q->elevator->elevator_data;
++ entity = &bfqg->entity;
++ d = blkcg_to_bfqgd(blkg->blkcg);
++
++ entity->orig_weight = entity->weight = entity->new_weight = d->weight;
++ entity->my_sched_data = &bfqg->sched_data;
++ bfqg->my_entity = entity; /*
++ * the root_group's will be set to NULL
++ * in bfq_init_queue()
++ */
++ bfqg->bfqd = bfqd;
++ bfqg->active_entities = 0;
++ bfqg->rq_pos_tree = RB_ROOT;
++}
++
++static void bfq_pd_free(struct blkg_policy_data *pd)
++{
++ struct bfq_group *bfqg = pd_to_bfqg(pd);
++
++ bfqg_stats_exit(&bfqg->stats);
++ return kfree(bfqg);
++}
++
++static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
++{
++ struct bfq_group *bfqg = pd_to_bfqg(pd);
++
++ bfqg_stats_reset(&bfqg->stats);
++}
++
++static void bfq_group_set_parent(struct bfq_group *bfqg,
++ struct bfq_group *parent)
++{
++ struct bfq_entity *entity;
++
++ BUG_ON(!parent);
++ BUG_ON(!bfqg);
++ BUG_ON(bfqg == parent);
++
++ entity = &bfqg->entity;
++ entity->parent = parent->my_entity;
++ entity->sched_data = &parent->sched_data;
++}
++
++static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
++ struct blkcg *blkcg)
++{
++ struct blkcg_gq *blkg;
++
++ blkg = blkg_lookup(blkcg, bfqd->queue);
++ if (likely(blkg))
++ return blkg_to_bfqg(blkg);
++ return NULL;
++}
++
++static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
++ struct blkcg *blkcg)
++{
++ struct bfq_group *bfqg, *parent;
++ struct bfq_entity *entity;
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++
++ bfqg = bfq_lookup_bfqg(bfqd, blkcg);
++
++ if (unlikely(!bfqg))
++ return NULL;
++
++ /*
++ * Update chain of bfq_groups as we might be handling a leaf group
++ * which, along with some of its relatives, has not been hooked yet
++ * to the private hierarchy of BFQ.
++ */
++ entity = &bfqg->entity;
++ for_each_entity(entity) {
++ bfqg = container_of(entity, struct bfq_group, entity);
++ BUG_ON(!bfqg);
++ if (bfqg != bfqd->root_group) {
++ parent = bfqg_parent(bfqg);
++ if (!parent)
++ parent = bfqd->root_group;
++ BUG_ON(!parent);
++ bfq_group_set_parent(bfqg, parent);
++ }
++ }
++
++ return bfqg;
++}
++
++static void bfq_pos_tree_add_move(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq);
++
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool compensate,
++ enum bfqq_expiration reason);
++
++/**
++ * bfq_bfqq_move - migrate @bfqq to @bfqg.
++ * @bfqd: queue descriptor.
++ * @bfqq: the queue to move.
++ * @bfqg: the group to move to.
++ *
++ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
++ * it on the new one. Avoid putting the entity on the old group idle tree.
++ *
++ * Must be called under the queue lock; the cgroup owning @bfqg must
++ * not disappear (by now this just means that we are called under
++ * rcu_read_lock()).
++ */
++static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct bfq_group *bfqg)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ BUG_ON(!bfq_bfqq_busy(bfqq) && !RB_EMPTY_ROOT(&bfqq->sort_list));
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list) && !entity->on_st);
++ BUG_ON(bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list)
++ && entity->on_st &&
++ bfqq != bfqd->in_service_queue);
++ BUG_ON(!bfq_bfqq_busy(bfqq) && bfqq == bfqd->in_service_queue);
++
++ /* If bfqq is empty, then bfq_bfqq_expire also invokes
++ * bfq_del_bfqq_busy, thereby removing bfqq and its entity
++ * from data structures related to current group. Otherwise we
++ * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
++ * we do below.
++ */
++ if (bfqq == bfqd->in_service_queue)
++ bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
++ false, BFQ_BFQQ_PREEMPTED);
++
++ BUG_ON(entity->on_st && !bfq_bfqq_busy(bfqq)
++ && &bfq_entity_service_tree(entity)->idle !=
++ entity->tree);
++
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_busy(bfqq));
++
++ if (bfq_bfqq_busy(bfqq))
++ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
++ else if (entity->on_st) {
++ BUG_ON(&bfq_entity_service_tree(entity)->idle !=
++ entity->tree);
++ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
++ }
++ bfqg_put(bfqq_group(bfqq));
++
++ /*
++ * Here we use a reference to bfqg. We don't need a refcounter
++ * as the cgroup reference will not be dropped, so that its
++ * destroy() callback will not be invoked.
++ */
++ entity->parent = bfqg->my_entity;
++ entity->sched_data = &bfqg->sched_data;
++ bfqg_get(bfqg);
++
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_busy(bfqq));
++ if (bfq_bfqq_busy(bfqq)) {
++ bfq_pos_tree_add_move(bfqd, bfqq);
++ bfq_activate_bfqq(bfqd, bfqq);
++ }
++
++ if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++ BUG_ON(entity->on_st && !bfq_bfqq_busy(bfqq)
++ && &bfq_entity_service_tree(entity)->idle !=
++ entity->tree);
++}
++
++/**
++ * __bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bfqd: the queue descriptor.
++ * @bic: the bic to move.
++ * @blkcg: the blk-cgroup to move to.
++ *
++ * Move bic to blkcg, assuming that bfqd->queue is locked; the caller
++ * has to make sure that the reference to cgroup is valid across the call.
++ *
++ * NOTE: an alternative approach might have been to store the current
++ * cgroup in bfqq and getting a reference to it, reducing the lookup
++ * time here, at the price of slightly more complex code.
++ */
++static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic,
++ struct blkcg *blkcg)
++{
++ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
++ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
++ struct bfq_group *bfqg;
++ struct bfq_entity *entity;
++
++ lockdep_assert_held(bfqd->queue->queue_lock);
++
++ bfqg = bfq_find_set_group(bfqd, blkcg);
++
++ if (unlikely(!bfqg))
++ bfqg = bfqd->root_group;
++
++ if (async_bfqq) {
++ entity = &async_bfqq->entity;
++
++ if (entity->sched_data != &bfqg->sched_data) {
++ bic_set_bfqq(bic, NULL, 0);
++ bfq_log_bfqq(bfqd, async_bfqq,
++ "bic_change_group: %p %d",
++ async_bfqq,
++ async_bfqq->ref);
++ bfq_put_queue(async_bfqq);
++ }
++ }
++
++ if (sync_bfqq) {
++ entity = &sync_bfqq->entity;
++ if (entity->sched_data != &bfqg->sched_data)
++ bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
++ }
++
++ return bfqg;
++}
++
++static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ struct bfq_group *bfqg = NULL;
++ uint64_t serial_nr;
++
++ rcu_read_lock();
++ serial_nr = bio_blkcg(bio)->css.serial_nr;
++
++ /*
++ * Check whether blkcg has changed. The condition may trigger
++ * spuriously on a newly created cic but there's no harm.
++ */
++ if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
++ goto out;
++
++ bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
++ bic->blkcg_serial_nr = serial_nr;
++out:
++ rcu_read_unlock();
++}
++
++/**
++ * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
++ * @st: the service tree being flushed.
++ */
++static void bfq_flush_idle_tree(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entity = st->first_idle;
++
++ for (; entity ; entity = st->first_idle)
++ __bfq_deactivate_entity(entity, false);
++}
++
++/**
++ * bfq_reparent_leaf_entity - move leaf entity to the root_group.
++ * @bfqd: the device data structure with the root group.
++ * @entity: the entity to move.
++ */
++static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ BUG_ON(!bfqq);
++ bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
++}
++
++/**
++ * bfq_reparent_active_entities - move to the root group all active
++ * entities.
++ * @bfqd: the device data structure with the root group.
++ * @bfqg: the group to move from.
++ * @st: the service tree with the entities.
++ *
++ * Needs queue_lock to be taken and reference to be valid over the call.
++ */
++static void bfq_reparent_active_entities(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ struct bfq_service_tree *st)
++{
++ struct rb_root *active = &st->active;
++ struct bfq_entity *entity = NULL;
++
++ if (!RB_EMPTY_ROOT(&st->active))
++ entity = bfq_entity_of(rb_first(active));
++
++ for (; entity ; entity = bfq_entity_of(rb_first(active)))
++ bfq_reparent_leaf_entity(bfqd, entity);
++
++ if (bfqg->sched_data.in_service_entity)
++ bfq_reparent_leaf_entity(bfqd,
++ bfqg->sched_data.in_service_entity);
++}
++
++/**
++ * bfq_pd_offline - deactivate the entity associated with @pd,
++ * and reparent its children entities.
++ * @pd: descriptor of the policy going offline.
++ *
++ * blkio already grabs the queue_lock for us, so no need to use
++ * RCU-based magic
++ */
++static void bfq_pd_offline(struct blkg_policy_data *pd)
++{
++ struct bfq_service_tree *st;
++ struct bfq_group *bfqg;
++ struct bfq_data *bfqd;
++ struct bfq_entity *entity;
++ int i;
++
++ BUG_ON(!pd);
++ bfqg = pd_to_bfqg(pd);
++ BUG_ON(!bfqg);
++ bfqd = bfqg->bfqd;
++ BUG_ON(bfqd && !bfqd->root_group);
++
++ entity = bfqg->my_entity;
++
++ if (!entity) /* root group */
++ return;
++
++ /*
++ * Empty all service_trees belonging to this group before
++ * deactivating the group itself.
++ */
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
++ BUG_ON(!bfqg->sched_data.service_tree);
++ st = bfqg->sched_data.service_tree + i;
++ /*
++ * The idle tree may still contain bfq_queues belonging
++ * to exited task because they never migrated to a different
++ * cgroup from the one being destroyed now. No one else
++ * can access them so it's safe to act without any lock.
++ */
++ bfq_flush_idle_tree(st);
++
++ /*
++ * It may happen that some queues are still active
++ * (busy) upon group destruction (if the corresponding
++ * processes have been forced to terminate). We move
++ * all the leaf entities corresponding to these queues
++ * to the root_group.
++ * Also, it may happen that the group has an entity
++ * in service, which is disconnected from the active
++ * tree: it must be moved, too.
++ * There is no need to put the sync queues, as the
++ * scheduler has taken no reference.
++ */
++ bfq_reparent_active_entities(bfqd, bfqg, st);
++ BUG_ON(!RB_EMPTY_ROOT(&st->active));
++ BUG_ON(!RB_EMPTY_ROOT(&st->idle));
++ }
++ BUG_ON(bfqg->sched_data.next_in_service);
++ BUG_ON(bfqg->sched_data.in_service_entity);
++
++ __bfq_deactivate_entity(entity, false);
++ bfq_put_async_queues(bfqd, bfqg);
++
++ /*
++ * @blkg is going offline and will be ignored by
++ * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
++ * that they don't get lost. If IOs complete after this point, the
++ * stats for them will be lost. Oh well...
++ */
++ bfqg_stats_xfer_dead(bfqg);
++}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++ struct blkcg_gq *blkg;
++
++ list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
++ struct bfq_group *bfqg = blkg_to_bfqg(blkg);
++ BUG_ON(!bfqg);
++
++ bfq_end_wr_async_queues(bfqd, bfqg);
++ }
++ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++static int bfq_io_show_weight(struct seq_file *sf, void *v)
++{
++ struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
++ struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
++ unsigned int val = 0;
++
++ if (bfqgd)
++ val = bfqgd->weight;
++
++ seq_printf(sf, "%u\n", val);
++
++ return 0;
++}
++
++static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
++ struct cftype *cftype,
++ u64 val)
++{
++ struct blkcg *blkcg = css_to_blkcg(css);
++ struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
++ struct blkcg_gq *blkg;
++ int ret = -ERANGE;
++
++ if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
++ return ret;
++
++ ret = 0;
++ spin_lock_irq(&blkcg->lock);
++ bfqgd->weight = (unsigned short)val;
++ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
++ struct bfq_group *bfqg = blkg_to_bfqg(blkg);
++
++ if (!bfqg)
++ continue;
++ /*
++ * Setting the prio_changed flag of the entity
++ * to 1 with new_weight == weight would re-set
++ * the value of the weight to its ioprio mapping.
++ * Set the flag only if necessary.
++ */
++ if ((unsigned short)val != bfqg->entity.new_weight) {
++ bfqg->entity.new_weight = (unsigned short)val;
++ /*
++ * Make sure that the above new value has been
++ * stored in bfqg->entity.new_weight before
++ * setting the prio_changed flag. In fact,
++ * this flag may be read asynchronously (in
++ * critical sections protected by a different
++ * lock than that held here), and finding this
++ * flag set may cause the execution of the code
++ * for updating parameters whose value may
++ * depend also on bfqg->entity.new_weight (in
++ * __bfq_entity_update_weight_prio).
++ * This barrier makes sure that the new value
++ * of bfqg->entity.new_weight is correctly
++ * seen in that code.
++ */
++ smp_wmb();
++ bfqg->entity.prio_changed = 1;
++ }
++ }
++ spin_unlock_irq(&blkcg->lock);
++
++ return ret;
++}
++
++static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
++ char *buf, size_t nbytes,
++ loff_t off)
++{
++ u64 weight;
++ /* First unsigned long found in the file is used */
++ int ret = kstrtoull(strim(buf), 0, &weight);
++
++ if (ret)
++ return ret;
++
++ return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
++}
++
++static int bfqg_print_stat(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
++ &blkcg_policy_bfq, seq_cft(sf)->private, false);
++ return 0;
++}
++
++static int bfqg_print_rwstat(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
++ &blkcg_policy_bfq, seq_cft(sf)->private, true);
++ return 0;
++}
++
++static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
++ struct blkg_policy_data *pd, int off)
++{
++ u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
++ &blkcg_policy_bfq, off);
++ return __blkg_prfill_u64(sf, pd, sum);
++}
++
++static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
++ struct blkg_policy_data *pd, int off)
++{
++ struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
++ &blkcg_policy_bfq,
++ off);
++ return __blkg_prfill_rwstat(sf, pd, &sum);
++}
++
++static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
++ bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
++ seq_cft(sf)->private, false);
++ return 0;
++}
++
++static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
++ bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
++ seq_cft(sf)->private, true);
++ return 0;
++}
++
++static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
++ int off)
++{
++ u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
++
++ return __blkg_prfill_u64(sf, pd, sum >> 9);
++}
++
++static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
++ bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
++ return 0;
++}
++
++static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
++ struct blkg_policy_data *pd, int off)
++{
++ struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
++ offsetof(struct blkcg_gq, stat_bytes));
++ u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
++ atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
++
++ return __blkg_prfill_u64(sf, pd, sum >> 9);
++}
++
++static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
++ bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
++ false);
++ return 0;
++}
++
++
++static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
++ struct blkg_policy_data *pd, int off)
++{
++ struct bfq_group *bfqg = pd_to_bfqg(pd);
++ u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples);
++ u64 v = 0;
++
++ if (samples) {
++ v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum);
++ v = div64_u64(v, samples);
++ }
++ __blkg_prfill_u64(sf, pd, v);
++ return 0;
++}
++
++/* print avg_queue_size */
++static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
++ bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
++ 0, false);
++ return 0;
++}
++
++static struct bfq_group *
++bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
++{
++ int ret;
++
++ ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
++ if (ret)
++ return NULL;
++
++ return blkg_to_bfqg(bfqd->queue->root_blkg);
++}
++
++static struct cftype bfq_blkcg_legacy_files[] = {
++ {
++ .name = "bfq.weight",
++ .flags = CFTYPE_NOT_ON_ROOT,
++ .seq_show = bfq_io_show_weight,
++ .write_u64 = bfq_io_set_weight_legacy,
++ },
++
++ /* statistics, covers only the tasks in the bfqg */
++ {
++ .name = "bfq.time",
++ .private = offsetof(struct bfq_group, stats.time),
++ .seq_show = bfqg_print_stat,
++ },
++ {
++ .name = "bfq.sectors",
++ .seq_show = bfqg_print_stat_sectors,
++ },
++ {
++ .name = "bfq.io_service_bytes",
++ .private = (unsigned long)&blkcg_policy_bfq,
++ .seq_show = blkg_print_stat_bytes,
++ },
++ {
++ .name = "bfq.io_serviced",
++ .private = (unsigned long)&blkcg_policy_bfq,
++ .seq_show = blkg_print_stat_ios,
++ },
++ {
++ .name = "bfq.io_service_time",
++ .private = offsetof(struct bfq_group, stats.service_time),
++ .seq_show = bfqg_print_rwstat,
++ },
++ {
++ .name = "bfq.io_wait_time",
++ .private = offsetof(struct bfq_group, stats.wait_time),
++ .seq_show = bfqg_print_rwstat,
++ },
++ {
++ .name = "bfq.io_merged",
++ .private = offsetof(struct bfq_group, stats.merged),
++ .seq_show = bfqg_print_rwstat,
++ },
++ {
++ .name = "bfq.io_queued",
++ .private = offsetof(struct bfq_group, stats.queued),
++ .seq_show = bfqg_print_rwstat,
++ },
++
++ /* the same statictics which cover the bfqg and its descendants */
++ {
++ .name = "bfq.time_recursive",
++ .private = offsetof(struct bfq_group, stats.time),
++ .seq_show = bfqg_print_stat_recursive,
++ },
++ {
++ .name = "bfq.sectors_recursive",
++ .seq_show = bfqg_print_stat_sectors_recursive,
++ },
++ {
++ .name = "bfq.io_service_bytes_recursive",
++ .private = (unsigned long)&blkcg_policy_bfq,
++ .seq_show = blkg_print_stat_bytes_recursive,
++ },
++ {
++ .name = "bfq.io_serviced_recursive",
++ .private = (unsigned long)&blkcg_policy_bfq,
++ .seq_show = blkg_print_stat_ios_recursive,
++ },
++ {
++ .name = "bfq.io_service_time_recursive",
++ .private = offsetof(struct bfq_group, stats.service_time),
++ .seq_show = bfqg_print_rwstat_recursive,
++ },
++ {
++ .name = "bfq.io_wait_time_recursive",
++ .private = offsetof(struct bfq_group, stats.wait_time),
++ .seq_show = bfqg_print_rwstat_recursive,
++ },
++ {
++ .name = "bfq.io_merged_recursive",
++ .private = offsetof(struct bfq_group, stats.merged),
++ .seq_show = bfqg_print_rwstat_recursive,
++ },
++ {
++ .name = "bfq.io_queued_recursive",
++ .private = offsetof(struct bfq_group, stats.queued),
++ .seq_show = bfqg_print_rwstat_recursive,
++ },
++ {
++ .name = "bfq.avg_queue_size",
++ .seq_show = bfqg_print_avg_queue_size,
++ },
++ {
++ .name = "bfq.group_wait_time",
++ .private = offsetof(struct bfq_group, stats.group_wait_time),
++ .seq_show = bfqg_print_stat,
++ },
++ {
++ .name = "bfq.idle_time",
++ .private = offsetof(struct bfq_group, stats.idle_time),
++ .seq_show = bfqg_print_stat,
++ },
++ {
++ .name = "bfq.empty_time",
++ .private = offsetof(struct bfq_group, stats.empty_time),
++ .seq_show = bfqg_print_stat,
++ },
++ {
++ .name = "bfq.dequeue",
++ .private = offsetof(struct bfq_group, stats.dequeue),
++ .seq_show = bfqg_print_stat,
++ },
++ { } /* terminate */
++};
++
++static struct cftype bfq_blkg_files[] = {
++ {
++ .name = "bfq.weight",
++ .flags = CFTYPE_NOT_ON_ROOT,
++ .seq_show = bfq_io_show_weight,
++ .write = bfq_io_set_weight,
++ },
++ {} /* terminate */
++};
++
++#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++
++static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg,
++ struct bfq_queue *bfqq, unsigned int op) { }
++static inline void
++bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
++static inline void
++bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
++static inline void bfqg_stats_update_completion(struct bfq_group *bfqg,
++ uint64_t start_time, uint64_t io_start_time,
++ unsigned int op) { }
++static inline void
++bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
++ struct bfq_group *curr_bfqg) { }
++static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { }
++static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
++
++static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct bfq_group *bfqg) {}
++
++static void bfq_init_entity(struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ entity->weight = entity->new_weight;
++ entity->orig_weight = entity->new_weight;
++ if (bfqq) {
++ bfqq->ioprio = bfqq->new_ioprio;
++ bfqq->ioprio_class = bfqq->new_ioprio_class;
++ }
++ entity->sched_data = &bfqg->sched_data;
++}
++
++static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
++ struct blkcg *blkcg)
++{
++ return bfqd->root_group;
++}
++
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
++{
++ return bfqq->bfqd->root_group;
++}
++
++static struct bfq_group *
++bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
++{
++ struct bfq_group *bfqg;
++ int i;
++
++ bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
++ if (!bfqg)
++ return NULL;
++
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++ return bfqg;
++}
++#endif
+diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c
+new file mode 100644
+index 000000000000..fb7bb8f08b75
+--- /dev/null
++++ b/block/bfq-ioc.c
+@@ -0,0 +1,36 @@
++/*
++ * BFQ: I/O context handling.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++/**
++ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
++ * @icq: the iocontext queue.
++ */
++static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
++{
++ /* bic->icq is the first member, %NULL will convert to %NULL */
++ return container_of(icq, struct bfq_io_cq, icq);
++}
++
++/**
++ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
++ * @bfqd: the lookup key.
++ * @ioc: the io_context of the process doing I/O.
++ *
++ * Queue lock must be held.
++ */
++static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
++ struct io_context *ioc)
++{
++ if (ioc)
++ return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue));
++ return NULL;
++}
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+new file mode 100644
+index 000000000000..ac8991bca9fa
+--- /dev/null
++++ b/block/bfq-sched.c
+@@ -0,0 +1,2002 @@
++/*
++ * BFQ: Hierarchical B-WF2Q+ scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2016 Paolo Valente <paolo.valente@linaro.org>
++ */
++
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
++
++/**
++ * bfq_gt - compare two timestamps.
++ * @a: first ts.
++ * @b: second ts.
++ *
++ * Return @a > @b, dealing with wrapping correctly.
++ */
++static int bfq_gt(u64 a, u64 b)
++{
++ return (s64)(a - b) > 0;
++}
++
++static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree)
++{
++ struct rb_node *node = tree->rb_node;
++
++ return rb_entry(node, struct bfq_entity, rb_node);
++}
++
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd);
++
++static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
++
++/**
++ * bfq_update_next_in_service - update sd->next_in_service
++ * @sd: sched_data for which to perform the update.
++ * @new_entity: if not NULL, pointer to the entity whose activation,
++ * requeueing or repositionig triggered the invocation of
++ * this function.
++ *
++ * This function is called to update sd->next_in_service, which, in
++ * its turn, may change as a consequence of the insertion or
++ * extraction of an entity into/from one of the active trees of
++ * sd. These insertions/extractions occur as a consequence of
++ * activations/deactivations of entities, with some activations being
++ * 'true' activations, and other activations being requeueings (i.e.,
++ * implementing the second, requeueing phase of the mechanism used to
++ * reposition an entity in its active tree; see comments on
++ * __bfq_activate_entity and __bfq_requeue_entity for details). In
++ * both the last two activation sub-cases, new_entity points to the
++ * just activated or requeued entity.
++ *
++ * Returns true if sd->next_in_service changes in such a way that
++ * entity->parent may become the next_in_service for its parent
++ * entity.
++ */
++static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
++ struct bfq_entity *new_entity)
++{
++ struct bfq_entity *next_in_service = sd->next_in_service;
++ struct bfq_queue *bfqq;
++ bool parent_sched_may_change = false;
++
++ /*
++ * If this update is triggered by the activation, requeueing
++ * or repositiong of an entity that does not coincide with
++ * sd->next_in_service, then a full lookup in the active tree
++ * can be avoided. In fact, it is enough to check whether the
++ * just-modified entity has a higher priority than
++ * sd->next_in_service, or, even if it has the same priority
++ * as sd->next_in_service, is eligible and has a lower virtual
++ * finish time than sd->next_in_service. If this compound
++ * condition holds, then the new entity becomes the new
++ * next_in_service. Otherwise no change is needed.
++ */
++ if (new_entity && new_entity != sd->next_in_service) {
++ /*
++ * Flag used to decide whether to replace
++ * sd->next_in_service with new_entity. Tentatively
++ * set to true, and left as true if
++ * sd->next_in_service is NULL.
++ */
++ bool replace_next = true;
++
++ /*
++ * If there is already a next_in_service candidate
++ * entity, then compare class priorities or timestamps
++ * to decide whether to replace sd->service_tree with
++ * new_entity.
++ */
++ if (next_in_service) {
++ unsigned int new_entity_class_idx =
++ bfq_class_idx(new_entity);
++ struct bfq_service_tree *st =
++ sd->service_tree + new_entity_class_idx;
++
++ /*
++ * For efficiency, evaluate the most likely
++ * sub-condition first.
++ */
++ replace_next =
++ (new_entity_class_idx ==
++ bfq_class_idx(next_in_service)
++ &&
++ !bfq_gt(new_entity->start, st->vtime)
++ &&
++ bfq_gt(next_in_service->finish,
++ new_entity->finish))
++ ||
++ new_entity_class_idx <
++ bfq_class_idx(next_in_service);
++ }
++
++ if (replace_next)
++ next_in_service = new_entity;
++ } else /* invoked because of a deactivation: lookup needed */
++ next_in_service = bfq_lookup_next_entity(sd);
++
++ if (next_in_service) {
++ parent_sched_may_change = !sd->next_in_service ||
++ bfq_update_parent_budget(next_in_service);
++ }
++
++ sd->next_in_service = next_in_service;
++
++ if (!next_in_service)
++ return parent_sched_may_change;
++
++ bfqq = bfq_entity_to_bfqq(next_in_service);
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "update_next_in_service: chosen this queue");
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(next_in_service,
++ struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "update_next_in_service: chosen this entity");
++ }
++#endif
++ return parent_sched_may_change;
++}
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++/* both next loops stop at one of the child entities of the root group */
++#define for_each_entity(entity) \
++ for (; entity ; entity = entity->parent)
++
++/*
++ * For each iteration, compute parent in advance, so as to be safe if
++ * entity is deallocated during the iteration. Such a deallocation may
++ * happen as a consequence of a bfq_put_queue that frees the bfq_queue
++ * containing entity.
++ */
++#define for_each_entity_safe(entity, parent) \
++ for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
++
++/*
++ * Returns true if this budget changes may let next_in_service->parent
++ * become the next_in_service entity for its parent entity.
++ */
++static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
++{
++ struct bfq_entity *bfqg_entity;
++ struct bfq_group *bfqg;
++ struct bfq_sched_data *group_sd;
++ bool ret = false;
++
++ BUG_ON(!next_in_service);
++
++ group_sd = next_in_service->sched_data;
++
++ bfqg = container_of(group_sd, struct bfq_group, sched_data);
++ /*
++ * bfq_group's my_entity field is not NULL only if the group
++ * is not the root group. We must not touch the root entity
++ * as it must never become an in-service entity.
++ */
++ bfqg_entity = bfqg->my_entity;
++ if (bfqg_entity) {
++ if (bfqg_entity->budget > next_in_service->budget)
++ ret = true;
++ bfqg_entity->budget = next_in_service->budget;
++ }
++
++ return ret;
++}
++
++/*
++ * This function tells whether entity stops being a candidate for next
++ * service, according to the following logic.
++ *
++ * This function is invoked for an entity that is about to be set in
++ * service. If such an entity is a queue, then the entity is no longer
++ * a candidate for next service (i.e, a candidate entity to serve
++ * after the in-service entity is expired). The function then returns
++ * true.
++ *
++ * In contrast, the entity could stil be a candidate for next service
++ * if it is not a queue, and has more than one child. In fact, even if
++ * one of its children is about to be set in service, other children
++ * may still be the next to serve. As a consequence, a non-queue
++ * entity is not a candidate for next-service only if it has only one
++ * child. And only if this condition holds, then the function returns
++ * true for a non-queue entity.
++ */
++static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
++{
++ struct bfq_group *bfqg;
++
++ if (bfq_entity_to_bfqq(entity))
++ return true;
++
++ bfqg = container_of(entity, struct bfq_group, entity);
++
++ BUG_ON(bfqg == ((struct bfq_data *)(bfqg->bfqd))->root_group);
++ BUG_ON(bfqg->active_entities == 0);
++ if (bfqg->active_entities == 1)
++ return true;
++
++ return false;
++}
++
++#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#define for_each_entity(entity) \
++ for (; entity ; entity = NULL)
++
++#define for_each_entity_safe(entity, parent) \
++ for (parent = NULL; entity ; entity = parent)
++
++static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
++{
++ return false;
++}
++
++static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
++{
++ return true;
++}
++
++#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++
++/*
++ * Shift for timestamp calculations. This actually limits the maximum
++ * service allowed in one timestamp delta (small shift values increase it),
++ * the maximum total weight that can be used for the queues in the system
++ * (big shift values increase it), and the period of virtual time
++ * wraparounds.
++ */
++#define WFQ_SERVICE_SHIFT 22
++
++static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = NULL;
++
++ BUG_ON(!entity);
++
++ if (!entity->my_sched_data)
++ bfqq = container_of(entity, struct bfq_queue, entity);
++
++ return bfqq;
++}
++
++
++/**
++ * bfq_delta - map service into the virtual time domain.
++ * @service: amount of service.
++ * @weight: scale factor (weight of an entity or weight sum).
++ */
++static u64 bfq_delta(unsigned long service, unsigned long weight)
++{
++ u64 d = (u64)service << WFQ_SERVICE_SHIFT;
++
++ do_div(d, weight);
++ return d;
++}
++
++/**
++ * bfq_calc_finish - assign the finish time to an entity.
++ * @entity: the entity to act upon.
++ * @service: the service to be charged to the entity.
++ */
++static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ unsigned long long start, finish, delta;
++
++ BUG_ON(entity->weight == 0);
++
++ entity->finish = entity->start +
++ bfq_delta(service, entity->weight);
++
++ start = ((entity->start>>10)*1000)>>12;
++ finish = ((entity->finish>>10)*1000)>>12;
++ delta = ((bfq_delta(service, entity->weight)>>10)*1000)>>12;
++
++ if (bfqq) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_finish: serv %lu, w %d",
++ service, entity->weight);
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_finish: start %llu, finish %llu, delta %llu",
++ start, finish, delta);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ } else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "calc_finish group: serv %lu, w %d",
++ service, entity->weight);
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "calc_finish group: start %llu, finish %llu, delta %llu",
++ start, finish, delta);
++#endif
++ }
++}
++
++/**
++ * bfq_entity_of - get an entity from a node.
++ * @node: the node field of the entity.
++ *
++ * Convert a node pointer to the relative entity. This is used only
++ * to simplify the logic of some functions and not as the generic
++ * conversion mechanism because, e.g., in the tree walking functions,
++ * the check for a %NULL value would be redundant.
++ */
++static struct bfq_entity *bfq_entity_of(struct rb_node *node)
++{
++ struct bfq_entity *entity = NULL;
++
++ if (node)
++ entity = rb_entry(node, struct bfq_entity, rb_node);
++
++ return entity;
++}
++
++/**
++ * bfq_extract - remove an entity from a tree.
++ * @root: the tree root.
++ * @entity: the entity to remove.
++ */
++static void bfq_extract(struct rb_root *root, struct bfq_entity *entity)
++{
++ BUG_ON(entity->tree != root);
++
++ entity->tree = NULL;
++ rb_erase(&entity->rb_node, root);
++}
++
++/**
++ * bfq_idle_extract - extract an entity from the idle tree.
++ * @st: the service tree of the owning @entity.
++ * @entity: the entity being removed.
++ */
++static void bfq_idle_extract(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *next;
++
++ BUG_ON(entity->tree != &st->idle);
++
++ if (entity == st->first_idle) {
++ next = rb_next(&entity->rb_node);
++ st->first_idle = bfq_entity_of(next);
++ }
++
++ if (entity == st->last_idle) {
++ next = rb_prev(&entity->rb_node);
++ st->last_idle = bfq_entity_of(next);
++ }
++
++ bfq_extract(&st->idle, entity);
++
++ if (bfqq)
++ list_del(&bfqq->bfqq_list);
++}
++
++/**
++ * bfq_insert - generic tree insertion.
++ * @root: tree root.
++ * @entity: entity to insert.
++ *
++ * This is used for the idle and the active tree, since they are both
++ * ordered by finish time.
++ */
++static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
++{
++ struct bfq_entity *entry;
++ struct rb_node **node = &root->rb_node;
++ struct rb_node *parent = NULL;
++
++ BUG_ON(entity->tree);
++
++ while (*node) {
++ parent = *node;
++ entry = rb_entry(parent, struct bfq_entity, rb_node);
++
++ if (bfq_gt(entry->finish, entity->finish))
++ node = &parent->rb_left;
++ else
++ node = &parent->rb_right;
++ }
++
++ rb_link_node(&entity->rb_node, parent, node);
++ rb_insert_color(&entity->rb_node, root);
++
++ entity->tree = root;
++}
++
++/**
++ * bfq_update_min - update the min_start field of a entity.
++ * @entity: the entity to update.
++ * @node: one of its children.
++ *
++ * This function is called when @entity may store an invalid value for
++ * min_start due to updates to the active tree. The function assumes
++ * that the subtree rooted at @node (which may be its left or its right
++ * child) has a valid min_start value.
++ */
++static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node)
++{
++ struct bfq_entity *child;
++
++ if (node) {
++ child = rb_entry(node, struct bfq_entity, rb_node);
++ if (bfq_gt(entity->min_start, child->min_start))
++ entity->min_start = child->min_start;
++ }
++}
++
++/**
++ * bfq_update_active_node - recalculate min_start.
++ * @node: the node to update.
++ *
++ * @node may have changed position or one of its children may have moved,
++ * this function updates its min_start value. The left and right subtrees
++ * are assumed to hold a correct min_start value.
++ */
++static void bfq_update_active_node(struct rb_node *node)
++{
++ struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ entity->min_start = entity->start;
++ bfq_update_min(entity, node->rb_right);
++ bfq_update_min(entity, node->rb_left);
++
++ if (bfqq) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "update_active_node: new min_start %llu",
++ ((entity->min_start>>10)*1000)>>12);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ } else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "update_active_node: new min_start %llu",
++ ((entity->min_start>>10)*1000)>>12);
++#endif
++ }
++}
++
++/**
++ * bfq_update_active_tree - update min_start for the whole active tree.
++ * @node: the starting node.
++ *
++ * @node must be the deepest modified node after an update. This function
++ * updates its min_start using the values held by its children, assuming
++ * that they did not change, and then updates all the nodes that may have
++ * changed in the path to the root. The only nodes that may have changed
++ * are the ones in the path or their siblings.
++ */
++static void bfq_update_active_tree(struct rb_node *node)
++{
++ struct rb_node *parent;
++
++up:
++ bfq_update_active_node(node);
++
++ parent = rb_parent(node);
++ if (!parent)
++ return;
++
++ if (node == parent->rb_left && parent->rb_right)
++ bfq_update_active_node(parent->rb_right);
++ else if (parent->rb_left)
++ bfq_update_active_node(parent->rb_left);
++
++ node = parent;
++ goto up;
++}
++
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root);
++
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root);
++
++
++/**
++ * bfq_active_insert - insert an entity in the active tree of its
++ * group/device.
++ * @st: the service tree of the entity.
++ * @entity: the entity being inserted.
++ *
++ * The active tree is ordered by finish time, but an extra key is kept
++ * per each node, containing the minimum value for the start times of
++ * its children (and the node itself), so it's possible to search for
++ * the eligible node with the lowest finish time in logarithmic time.
++ */
++static void bfq_active_insert(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *node = &entity->rb_node;
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ struct bfq_sched_data *sd = NULL;
++ struct bfq_group *bfqg = NULL;
++ struct bfq_data *bfqd = NULL;
++#endif
++
++ bfq_insert(&st->active, entity);
++
++ if (node->rb_left)
++ node = node->rb_left;
++ else if (node->rb_right)
++ node = node->rb_right;
++
++ bfq_update_active_tree(node);
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ sd = entity->sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++ if (bfqq)
++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else { /* bfq_group */
++ BUG_ON(!bfqd);
++ bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
++ }
++ if (bfqg != bfqd->root_group) {
++ BUG_ON(!bfqg);
++ BUG_ON(!bfqd);
++ bfqg->active_entities++;
++ }
++#endif
++}
++
++/**
++ * bfq_ioprio_to_weight - calc a weight from an ioprio.
++ * @ioprio: the ioprio value to convert.
++ */
++static unsigned short bfq_ioprio_to_weight(int ioprio)
++{
++ BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR);
++ return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
++}
++
++/**
++ * bfq_weight_to_ioprio - calc an ioprio from a weight.
++ * @weight: the weight value to convert.
++ *
++ * To preserve as much as possible the old only-ioprio user interface,
++ * 0 is used as an escape ioprio value for weights (numerically) equal or
++ * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
++ */
++static unsigned short bfq_weight_to_ioprio(int weight)
++{
++ BUG_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT);
++ return IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight < 0 ?
++ 0 : IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight;
++}
++
++static void bfq_get_entity(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ if (bfqq) {
++ bfqq->ref++;
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
++ bfqq, bfqq->ref);
++ }
++}
++
++/**
++ * bfq_find_deepest - find the deepest node that an extraction can modify.
++ * @node: the node being removed.
++ *
++ * Do the first step of an extraction in an rb tree, looking for the
++ * node that will replace @node, and returning the deepest node that
++ * the following modifications to the tree can touch. If @node is the
++ * last node in the tree return %NULL.
++ */
++static struct rb_node *bfq_find_deepest(struct rb_node *node)
++{
++ struct rb_node *deepest;
++
++ if (!node->rb_right && !node->rb_left)
++ deepest = rb_parent(node);
++ else if (!node->rb_right)
++ deepest = node->rb_left;
++ else if (!node->rb_left)
++ deepest = node->rb_right;
++ else {
++ deepest = rb_next(node);
++ if (deepest->rb_right)
++ deepest = deepest->rb_right;
++ else if (rb_parent(deepest) != node)
++ deepest = rb_parent(deepest);
++ }
++
++ return deepest;
++}
++
++/**
++ * bfq_active_extract - remove an entity from the active tree.
++ * @st: the service_tree containing the tree.
++ * @entity: the entity being removed.
++ */
++static void bfq_active_extract(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *node;
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ struct bfq_sched_data *sd = NULL;
++ struct bfq_group *bfqg = NULL;
++ struct bfq_data *bfqd = NULL;
++#endif
++
++ node = bfq_find_deepest(&entity->rb_node);
++ bfq_extract(&st->active, entity);
++
++ if (node)
++ bfq_update_active_tree(node);
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ sd = entity->sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++ if (bfqq)
++ list_del(&bfqq->bfqq_list);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else { /* bfq_group */
++ BUG_ON(!bfqd);
++ bfq_weights_tree_remove(bfqd, entity,
++ &bfqd->group_weights_tree);
++ }
++ if (bfqg != bfqd->root_group) {
++ BUG_ON(!bfqg);
++ BUG_ON(!bfqd);
++ BUG_ON(!bfqg->active_entities);
++ bfqg->active_entities--;
++ }
++#endif
++}
++
++/**
++ * bfq_idle_insert - insert an entity into the idle tree.
++ * @st: the service tree containing the tree.
++ * @entity: the entity to insert.
++ */
++static void bfq_idle_insert(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct bfq_entity *first_idle = st->first_idle;
++ struct bfq_entity *last_idle = st->last_idle;
++
++ if (!first_idle || bfq_gt(first_idle->finish, entity->finish))
++ st->first_idle = entity;
++ if (!last_idle || bfq_gt(entity->finish, last_idle->finish))
++ st->last_idle = entity;
++
++ bfq_insert(&st->idle, entity);
++
++ if (bfqq)
++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
++}
++
++/**
++ * bfq_forget_entity - do not consider entity any longer for scheduling
++ * @st: the service tree.
++ * @entity: the entity being removed.
++ * @is_in_service: true if entity is currently the in-service entity.
++ *
++ * Forget everything about @entity. In addition, if entity represents
++ * a queue, and the latter is not in service, then release the service
++ * reference to the queue (the one taken through bfq_get_entity). In
++ * fact, in this case, there is really no more service reference to
++ * the queue, as the latter is also outside any service tree. If,
++ * instead, the queue is in service, then __bfq_bfqd_reset_in_service
++ * will take care of putting the reference when the queue finally
++ * stops being served.
++ */
++static void bfq_forget_entity(struct bfq_service_tree *st,
++ struct bfq_entity *entity,
++ bool is_in_service)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ BUG_ON(!entity->on_st);
++
++ entity->on_st = false;
++ st->wsum -= entity->weight;
++ if (bfqq && !is_in_service) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity (before): %p %d",
++ bfqq, bfqq->ref);
++ bfq_put_queue(bfqq);
++ }
++}
++
++/**
++ * bfq_put_idle_entity - release the idle tree ref of an entity.
++ * @st: service tree for the entity.
++ * @entity: the entity being released.
++ */
++static void bfq_put_idle_entity(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ bfq_idle_extract(st, entity);
++ bfq_forget_entity(st, entity,
++ entity == entity->sched_data->in_service_entity);
++}
++
++/**
++ * bfq_forget_idle - update the idle tree if necessary.
++ * @st: the service tree to act upon.
++ *
++ * To preserve the global O(log N) complexity we only remove one entry here;
++ * as the idle tree will not grow indefinitely this can be done safely.
++ */
++static void bfq_forget_idle(struct bfq_service_tree *st)
++{
++ struct bfq_entity *first_idle = st->first_idle;
++ struct bfq_entity *last_idle = st->last_idle;
++
++ if (RB_EMPTY_ROOT(&st->active) && last_idle &&
++ !bfq_gt(last_idle->finish, st->vtime)) {
++ /*
++ * Forget the whole idle tree, increasing the vtime past
++ * the last finish time of idle entities.
++ */
++ st->vtime = last_idle->finish;
++ }
++
++ if (first_idle && !bfq_gt(first_idle->finish, st->vtime))
++ bfq_put_idle_entity(st, first_idle);
++}
++
++/*
++ * Update weight and priority of entity. If update_class_too is true,
++ * then update the ioprio_class of entity too.
++ *
++ * The reason why the update of ioprio_class is controlled through the
++ * last parameter is as follows. Changing the ioprio class of an
++ * entity implies changing the destination service trees for that
++ * entity. If such a change occurred when the entity is already on one
++ * of the service trees for its previous class, then the state of the
++ * entity would become more complex: none of the new possible service
++ * trees for the entity, according to bfq_entity_service_tree(), would
++ * match any of the possible service trees on which the entity
++ * is. Complex operations involving these trees, such as entity
++ * activations and deactivations, should take into account this
++ * additional complexity. To avoid this issue, this function is
++ * invoked with update_class_too unset in the points in the code where
++ * entity may happen to be on some tree.
++ */
++static struct bfq_service_tree *
++__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
++ struct bfq_entity *entity,
++ bool update_class_too)
++{
++ struct bfq_service_tree *new_st = old_st;
++
++ if (entity->prio_changed) {
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ unsigned int prev_weight, new_weight;
++ struct bfq_data *bfqd = NULL;
++ struct rb_root *root;
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ struct bfq_sched_data *sd;
++ struct bfq_group *bfqg;
++#endif
++
++ if (bfqq)
++ bfqd = bfqq->bfqd;
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ sd = entity->my_sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++ BUG_ON(!bfqd);
++ }
++#endif
++
++ BUG_ON(old_st->wsum < entity->weight);
++ old_st->wsum -= entity->weight;
++
++ if (entity->new_weight != entity->orig_weight) {
++ if (entity->new_weight < BFQ_MIN_WEIGHT ||
++ entity->new_weight > BFQ_MAX_WEIGHT) {
++ pr_crit("update_weight_prio: new_weight %d\n",
++ entity->new_weight);
++ if (entity->new_weight < BFQ_MIN_WEIGHT)
++ entity->new_weight = BFQ_MIN_WEIGHT;
++ else
++ entity->new_weight = BFQ_MAX_WEIGHT;
++ }
++ entity->orig_weight = entity->new_weight;
++ if (bfqq)
++ bfqq->ioprio =
++ bfq_weight_to_ioprio(entity->orig_weight);
++ }
++
++ if (bfqq && update_class_too)
++ bfqq->ioprio_class = bfqq->new_ioprio_class;
++
++ /*
++ * Reset prio_changed only if the ioprio_class change
++ * is not pending any longer.
++ */
++ if (!bfqq || bfqq->ioprio_class == bfqq->new_ioprio_class)
++ entity->prio_changed = 0;
++
++ /*
++ * NOTE: here we may be changing the weight too early,
++ * this will cause unfairness. The correct approach
++ * would have required additional complexity to defer
++ * weight changes to the proper time instants (i.e.,
++ * when entity->finish <= old_st->vtime).
++ */
++ new_st = bfq_entity_service_tree(entity);
++
++ prev_weight = entity->weight;
++ new_weight = entity->orig_weight *
++ (bfqq ? bfqq->wr_coeff : 1);
++ /*
++ * If the weight of the entity changes, remove the entity
++ * from its old weight counter (if there is a counter
++ * associated with the entity), and add it to the counter
++ * associated with its new weight.
++ */
++ if (prev_weight != new_weight) {
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "weight changed %d %d(%d %d)",
++ prev_weight, new_weight,
++ entity->orig_weight,
++ bfqq->wr_coeff);
++
++ root = bfqq ? &bfqd->queue_weights_tree :
++ &bfqd->group_weights_tree;
++ bfq_weights_tree_remove(bfqd, entity, root);
++ }
++ entity->weight = new_weight;
++ /*
++ * Add the entity to its weights tree only if it is
++ * not associated with a weight-raised queue.
++ */
++ if (prev_weight != new_weight &&
++ (bfqq ? bfqq->wr_coeff == 1 : 1))
++ /* If we get here, root has been initialized. */
++ bfq_weights_tree_add(bfqd, entity, root);
++
++ new_st->wsum += entity->weight;
++
++ if (new_st != old_st)
++ entity->start = new_st->vtime;
++ }
++
++ return new_st;
++}
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
++#endif
++
++/**
++ * bfq_bfqq_served - update the scheduler status after selection for
++ * service.
++ * @bfqq: the queue being served.
++ * @served: bytes to transfer.
++ *
++ * NOTE: this can be optimized, as the timestamps of upper level entities
++ * are synchronized every time a new bfqq is selected for service. By now,
++ * we keep it to better check consistency.
++ */
++static void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st;
++
++ for_each_entity(entity) {
++ st = bfq_entity_service_tree(entity);
++
++ entity->service += served;
++
++ BUG_ON(st->wsum == 0);
++
++ st->vtime += bfq_delta(served, st->wsum);
++ bfq_forget_idle(st);
++ }
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ bfqg_stats_set_start_empty_time(bfqq_group(bfqq));
++#endif
++ st = bfq_entity_service_tree(&bfqq->entity);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs, vtime %llu on %p",
++ served, ((st->vtime>>10)*1000)>>12, st);
++}
++
++/**
++ * bfq_bfqq_charge_time - charge an amount of service equivalent to the length
++ * of the time interval during which bfqq has been in
++ * service.
++ * @bfqd: the device
++ * @bfqq: the queue that needs a service update.
++ * @time_ms: the amount of time during which the queue has received service
++ *
++ * If a queue does not consume its budget fast enough, then providing
++ * the queue with service fairness may impair throughput, more or less
++ * severely. For this reason, queues that consume their budget slowly
++ * are provided with time fairness instead of service fairness. This
++ * goal is achieved through the BFQ scheduling engine, even if such an
++ * engine works in the service, and not in the time domain. The trick
++ * is charging these queues with an inflated amount of service, equal
++ * to the amount of service that they would have received during their
++ * service slot if they had been fast, i.e., if their requests had
++ * been dispatched at a rate equal to the estimated peak rate.
++ *
++ * It is worth noting that time fairness can cause important
++ * distortions in terms of bandwidth distribution, on devices with
++ * internal queueing. The reason is that I/O requests dispatched
++ * during the service slot of a queue may be served after that service
++ * slot is finished, and may have a total processing time loosely
++ * correlated with the duration of the service slot. This is
++ * especially true for short service slots.
++ */
++static void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ unsigned long time_ms)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ int tot_serv_to_charge = entity->service;
++ unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout);
++
++ if (time_ms > 0 && time_ms < timeout_ms)
++ tot_serv_to_charge =
++ (bfqd->bfq_max_budget * time_ms) / timeout_ms;
++
++ if (tot_serv_to_charge < entity->service)
++ tot_serv_to_charge = entity->service;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "charge_time: %lu/%u ms, %d/%d/%d sectors",
++ time_ms, timeout_ms, entity->service,
++ tot_serv_to_charge, entity->budget);
++
++ /* Increase budget to avoid inconsistencies */
++ if (tot_serv_to_charge > entity->budget)
++ entity->budget = tot_serv_to_charge;
++
++ bfq_bfqq_served(bfqq,
++ max_t(int, 0, tot_serv_to_charge - entity->service));
++}
++
++static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
++ struct bfq_service_tree *st,
++ bool backshifted)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct bfq_sched_data *sd = entity->sched_data;
++
++ /*
++ * When this function is invoked, entity is not in any service
++ * tree, then it is safe to invoke next function with the last
++ * parameter set (see the comments on the function).
++ */
++ st = __bfq_entity_update_weight_prio(st, entity, true);
++ bfq_calc_finish(entity, entity->budget);
++
++ /*
++ * If some queues enjoy backshifting for a while, then their
++ * (virtual) finish timestamps may happen to become lower and
++ * lower than the system virtual time. In particular, if
++ * these queues often happen to be idle for short time
++ * periods, and during such time periods other queues with
++ * higher timestamps happen to be busy, then the backshifted
++ * timestamps of the former queues can become much lower than
++ * the system virtual time. In fact, to serve the queues with
++ * higher timestamps while the ones with lower timestamps are
++ * idle, the system virtual time may be pushed-up to much
++ * higher values than the finish timestamps of the idle
++ * queues. As a consequence, the finish timestamps of all new
++ * or newly activated queues may end up being much larger than
++ * those of lucky queues with backshifted timestamps. The
++ * latter queues may then monopolize the device for a lot of
++ * time. This would simply break service guarantees.
++ *
++ * To reduce this problem, push up a little bit the
++ * backshifted timestamps of the queue associated with this
++ * entity (only a queue can happen to have the backshifted
++ * flag set): just enough to let the finish timestamp of the
++ * queue be equal to the current value of the system virtual
++ * time. This may introduce a little unfairness among queues
++ * with backshifted timestamps, but it does not break
++ * worst-case fairness guarantees.
++ *
++ * As a special case, if bfqq is weight-raised, push up
++ * timestamps much less, to keep very low the probability that
++ * this push up causes the backshifted finish timestamps of
++ * weight-raised queues to become higher than the backshifted
++ * finish timestamps of non weight-raised queues.
++ */
++ if (backshifted && bfq_gt(st->vtime, entity->finish)) {
++ unsigned long delta = st->vtime - entity->finish;
++
++ if (bfqq)
++ delta /= bfqq->wr_coeff;
++
++ entity->start += delta;
++ entity->finish += delta;
++
++ if (bfqq) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "__activate_entity: new queue finish %llu",
++ ((entity->finish>>10)*1000)>>12);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ } else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "__activate_entity: new group finish %llu",
++ ((entity->finish>>10)*1000)>>12);
++#endif
++ }
++ }
++
++ bfq_active_insert(st, entity);
++
++ if (bfqq) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "__activate_entity: queue %seligible in st %p",
++ entity->start <= st->vtime ? "" : "non ", st);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ } else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "__activate_entity: group %seligible in st %p",
++ entity->start <= st->vtime ? "" : "non ", st);
++#endif
++ }
++ BUG_ON(RB_EMPTY_ROOT(&st->active));
++ BUG_ON(&st->active != &sd->service_tree->active &&
++ &st->active != &(sd->service_tree+1)->active &&
++ &st->active != &(sd->service_tree+2)->active);
++}
++
++/**
++ * __bfq_activate_entity - handle activation of entity.
++ * @entity: the entity being activated.
++ * @non_blocking_wait_rq: true if entity was waiting for a request
++ *
++ * Called for a 'true' activation, i.e., if entity is not active and
++ * one of its children receives a new request.
++ *
++ * Basically, this function updates the timestamps of entity and
++ * inserts entity into its active tree, ater possible extracting it
++ * from its idle tree.
++ */
++static void __bfq_activate_entity(struct bfq_entity *entity,
++ bool non_blocking_wait_rq)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ bool backshifted = false;
++ unsigned long long min_vstart;
++
++ BUG_ON(!sd);
++ BUG_ON(!st);
++
++ /* See comments on bfq_fqq_update_budg_for_activation */
++ if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) {
++ backshifted = true;
++ min_vstart = entity->finish;
++ } else
++ min_vstart = st->vtime;
++
++ if (entity->tree == &st->idle) {
++ /*
++ * Must be on the idle tree, bfq_idle_extract() will
++ * check for that.
++ */
++ bfq_idle_extract(st, entity);
++ entity->start = bfq_gt(min_vstart, entity->finish) ?
++ min_vstart : entity->finish;
++ } else {
++ /*
++ * The finish time of the entity may be invalid, and
++ * it is in the past for sure, otherwise the queue
++ * would have been on the idle tree.
++ */
++ entity->start = min_vstart;
++ st->wsum += entity->weight;
++ /*
++ * entity is about to be inserted into a service tree,
++ * and then set in service: get a reference to make
++ * sure entity does not disappear until it is no
++ * longer in service or scheduled for service.
++ */
++ bfq_get_entity(entity);
++
++ BUG_ON(entity->on_st && bfqq);
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ if (entity->on_st && !bfqq) {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group,
++ entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd,
++ bfqg,
++ "activate bug, class %d in_service %p",
++ bfq_class_idx(entity), sd->in_service_entity);
++ }
++#endif
++ BUG_ON(entity->on_st && !bfqq);
++ entity->on_st = true;
++ }
++
++ bfq_update_fin_time_enqueue(entity, st, backshifted);
++}
++
++/**
++ * __bfq_requeue_entity - handle requeueing or repositioning of an entity.
++ * @entity: the entity being requeued or repositioned.
++ *
++ * Requeueing is needed if this entity stops being served, which
++ * happens if a leaf descendant entity has expired. On the other hand,
++ * repositioning is needed if the next_inservice_entity for the child
++ * entity has changed. See the comments inside the function for
++ * details.
++ *
++ * Basically, this function: 1) removes entity from its active tree if
++ * present there, 2) updates the timestamps of entity and 3) inserts
++ * entity back into its active tree (in the new, right position for
++ * the new values of the timestamps).
++ */
++static void __bfq_requeue_entity(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++
++ BUG_ON(!sd);
++ BUG_ON(!st);
++
++ BUG_ON(entity != sd->in_service_entity &&
++ entity->tree != &st->active);
++
++ if (entity == sd->in_service_entity) {
++ /*
++ * We are requeueing the current in-service entity,
++ * which may have to be done for one of the following
++ * reasons:
++ * - entity represents the in-service queue, and the
++ * in-service queue is being requeued after an
++ * expiration;
++ * - entity represents a group, and its budget has
++ * changed because one of its child entities has
++ * just been either activated or requeued for some
++ * reason; the timestamps of the entity need then to
++ * be updated, and the entity needs to be enqueued
++ * or repositioned accordingly.
++ *
++ * In particular, before requeueing, the start time of
++ * the entity must be moved forward to account for the
++ * service that the entity has received while in
++ * service. This is done by the next instructions. The
++ * finish time will then be updated according to this
++ * new value of the start time, and to the budget of
++ * the entity.
++ */
++ bfq_calc_finish(entity, entity->service);
++ entity->start = entity->finish;
++ BUG_ON(entity->tree && entity->tree != &st->active);
++ /*
++ * In addition, if the entity had more than one child
++ * when set in service, then was not extracted from
++ * the active tree. This implies that the position of
++ * the entity in the active tree may need to be
++ * changed now, because we have just updated the start
++ * time of the entity, and we will update its finish
++ * time in a moment (the requeueing is then, more
++ * precisely, a repositioning in this case). To
++ * implement this repositioning, we: 1) dequeue the
++ * entity here, 2) update the finish time and
++ * requeue the entity according to the new
++ * timestamps below.
++ */
++ if (entity->tree)
++ bfq_active_extract(st, entity);
++ } else { /* The entity is already active, and not in service */
++ /*
++ * In this case, this function gets called only if the
++ * next_in_service entity below this entity has
++ * changed, and this change has caused the budget of
++ * this entity to change, which, finally implies that
++ * the finish time of this entity must be
++ * updated. Such an update may cause the scheduling,
++ * i.e., the position in the active tree, of this
++ * entity to change. We handle this change by: 1)
++ * dequeueing the entity here, 2) updating the finish
++ * time and requeueing the entity according to the new
++ * timestamps below. This is the same approach as the
++ * non-extracted-entity sub-case above.
++ */
++ bfq_active_extract(st, entity);
++ }
++
++ bfq_update_fin_time_enqueue(entity, st, false);
++}
++
++static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
++ struct bfq_sched_data *sd,
++ bool non_blocking_wait_rq)
++{
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++
++ if (sd->in_service_entity == entity || entity->tree == &st->active)
++ /*
++ * in service or already queued on the active tree,
++ * requeue or reposition
++ */
++ __bfq_requeue_entity(entity);
++ else
++ /*
++ * Not in service and not queued on its active tree:
++ * the activity is idle and this is a true activation.
++ */
++ __bfq_activate_entity(entity, non_blocking_wait_rq);
++}
++
++
++/**
++ * bfq_activate_entity - activate or requeue an entity representing a bfq_queue,
++ * and activate, requeue or reposition all ancestors
++ * for which such an update becomes necessary.
++ * @entity: the entity to activate.
++ * @non_blocking_wait_rq: true if this entity was waiting for a request
++ * @requeue: true if this is a requeue, which implies that bfqq is
++ * being expired; thus ALL its ancestors stop being served and must
++ * therefore be requeued
++ */
++static void bfq_activate_requeue_entity(struct bfq_entity *entity,
++ bool non_blocking_wait_rq,
++ bool requeue)
++{
++ struct bfq_sched_data *sd;
++
++ for_each_entity(entity) {
++ BUG_ON(!entity);
++ sd = entity->sched_data;
++ __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq);
++
++ BUG_ON(RB_EMPTY_ROOT(&sd->service_tree->active) &&
++ RB_EMPTY_ROOT(&(sd->service_tree+1)->active) &&
++ RB_EMPTY_ROOT(&(sd->service_tree+2)->active));
++
++ if (!bfq_update_next_in_service(sd, entity) && !requeue) {
++ BUG_ON(!sd->next_in_service);
++ break;
++ }
++ BUG_ON(!sd->next_in_service);
++ }
++}
++
++/**
++ * __bfq_deactivate_entity - deactivate an entity from its service tree.
++ * @entity: the entity to deactivate.
++ * @ins_into_idle_tree: if false, the entity will not be put into the
++ * idle tree.
++ *
++ * Deactivates an entity, independently from its previous state. Must
++ * be invoked only if entity is on a service tree. Extracts the entity
++ * from that tree, and if necessary and allowed, puts it on the idle
++ * tree.
++ */
++static bool __bfq_deactivate_entity(struct bfq_entity *entity,
++ bool ins_into_idle_tree)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st;
++ bool is_in_service;
++
++ if (!entity->on_st) { /* entity never activated, or already inactive */
++ BUG_ON(sd && entity == sd->in_service_entity);
++ return false;
++ }
++
++ /*
++ * If we get here, then entity is active, which implies that
++ * bfq_group_set_parent has already been invoked for the group
++ * represented by entity. Therefore, the field
++ * entity->sched_data has been set, and we can safely use it.
++ */
++ st = bfq_entity_service_tree(entity);
++ is_in_service = entity == sd->in_service_entity;
++
++ BUG_ON(is_in_service && entity->tree && entity->tree != &st->active);
++
++ if (is_in_service)
++ bfq_calc_finish(entity, entity->service);
++
++ if (entity->tree == &st->active)
++ bfq_active_extract(st, entity);
++ else if (!is_in_service && entity->tree == &st->idle)
++ bfq_idle_extract(st, entity);
++ else if (entity->tree)
++ BUG();
++
++ if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime))
++ bfq_forget_entity(st, entity, is_in_service);
++ else
++ bfq_idle_insert(st, entity);
++
++ return true;
++}
++
++/**
++ * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
++ * @entity: the entity to deactivate.
++ * @ins_into_idle_tree: true if the entity can be put on the idle tree
++ */
++static void bfq_deactivate_entity(struct bfq_entity *entity,
++ bool ins_into_idle_tree,
++ bool expiration)
++{
++ struct bfq_sched_data *sd;
++ struct bfq_entity *parent = NULL;
++
++ for_each_entity_safe(entity, parent) {
++ sd = entity->sched_data;
++
++ BUG_ON(sd == NULL); /*
++ * It would mean that this is the
++ * root group.
++ */
++
++ BUG_ON(expiration && entity != sd->in_service_entity);
++
++ BUG_ON(entity != sd->in_service_entity &&
++ entity->tree ==
++ &bfq_entity_service_tree(entity)->active &&
++ !sd->next_in_service);
++
++ if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) {
++ /*
++ * entity is not in any tree any more, so
++ * this deactivation is a no-op, and there is
++ * nothing to change for upper-level entities
++ * (in case of expiration, this can never
++ * happen).
++ */
++ BUG_ON(expiration); /*
++ * entity cannot be already out of
++ * any tree
++ */
++ return;
++ }
++
++ if (sd->next_in_service == entity)
++ /*
++ * entity was the next_in_service entity,
++ * then, since entity has just been
++ * deactivated, a new one must be found.
++ */
++ bfq_update_next_in_service(sd, NULL);
++
++ if (sd->next_in_service) {
++ /*
++ * The parent entity is still backlogged,
++ * because next_in_service is not NULL. So, no
++ * further upwards deactivation must be
++ * performed. Yet, next_in_service has
++ * changed. Then the schedule does need to be
++ * updated upwards.
++ */
++ BUG_ON(sd->next_in_service == entity);
++ break;
++ }
++
++ /*
++ * If we get here, then the parent is no more
++ * backlogged and we need to propagate the
++ * deactivation upwards. Thus let the loop go on.
++ */
++
++ /*
++ * Also let parent be queued into the idle tree on
++ * deactivation, to preserve service guarantees, and
++ * assuming that who invoked this function does not
++ * need parent entities too to be removed completely.
++ */
++ ins_into_idle_tree = true;
++ }
++
++ /*
++ * If the deactivation loop is fully executed, then there are
++ * no more entities to touch and next loop is not executed at
++ * all. Otherwise, requeue remaining entities if they are
++ * about to stop receiving service, or reposition them if this
++ * is not the case.
++ */
++ entity = parent;
++ for_each_entity(entity) {
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ /*
++ * Invoke __bfq_requeue_entity on entity, even if
++ * already active, to requeue/reposition it in the
++ * active tree (because sd->next_in_service has
++ * changed)
++ */
++ __bfq_requeue_entity(entity);
++
++ sd = entity->sched_data;
++ BUG_ON(expiration && sd->in_service_entity != entity);
++
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "invoking udpdate_next for this queue");
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(entity,
++ struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "invoking udpdate_next for this entity");
++ }
++#endif
++ if (!bfq_update_next_in_service(sd, entity) &&
++ !expiration)
++ /*
++ * next_in_service unchanged or not causing
++ * any change in entity->parent->sd, and no
++ * requeueing needed for expiration: stop
++ * here.
++ */
++ break;
++ }
++}
++
++/**
++ * bfq_calc_vtime_jump - compute the value to which the vtime should jump,
++ * if needed, to have at least one entity eligible.
++ * @st: the service tree to act upon.
++ *
++ * Assumes that st is not empty.
++ */
++static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
++{
++ struct bfq_entity *root_entity = bfq_root_active_entity(&st->active);
++
++ if (bfq_gt(root_entity->min_start, st->vtime)) {
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(root_entity);
++
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_vtime_jump: new value %llu",
++ root_entity->min_start);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(root_entity, struct bfq_group,
++ entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "calc_vtime_jump: new value %llu",
++ root_entity->min_start);
++ }
++#endif
++ return root_entity->min_start;
++ }
++ return st->vtime;
++}
++
++static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
++{
++ if (new_value > st->vtime) {
++ st->vtime = new_value;
++ bfq_forget_idle(st);
++ }
++}
++
++/**
++ * bfq_first_active_entity - find the eligible entity with
++ * the smallest finish time
++ * @st: the service tree to select from.
++ * @vtime: the system virtual to use as a reference for eligibility
++ *
++ * This function searches the first schedulable entity, starting from the
++ * root of the tree and going on the left every time on this side there is
++ * a subtree with at least one eligible (start >= vtime) entity. The path on
++ * the right is followed only if a) the left subtree contains no eligible
++ * entities and b) no eligible entity has been found yet.
++ */
++static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
++ u64 vtime)
++{
++ struct bfq_entity *entry, *first = NULL;
++ struct rb_node *node = st->active.rb_node;
++
++ while (node) {
++ entry = rb_entry(node, struct bfq_entity, rb_node);
++left:
++ if (!bfq_gt(entry->start, vtime))
++ first = entry;
++
++ BUG_ON(bfq_gt(entry->min_start, vtime));
++
++ if (node->rb_left) {
++ entry = rb_entry(node->rb_left,
++ struct bfq_entity, rb_node);
++ if (!bfq_gt(entry->min_start, vtime)) {
++ node = node->rb_left;
++ goto left;
++ }
++ }
++ if (first)
++ break;
++ node = node->rb_right;
++ }
++
++ BUG_ON(!first && !RB_EMPTY_ROOT(&st->active));
++ return first;
++}
++
++/**
++ * __bfq_lookup_next_entity - return the first eligible entity in @st.
++ * @st: the service tree.
++ *
++ * If there is no in-service entity for the sched_data st belongs to,
++ * then return the entity that will be set in service if:
++ * 1) the parent entity this st belongs to is set in service;
++ * 2) no entity belonging to such parent entity undergoes a state change
++ * that would influence the timestamps of the entity (e.g., becomes idle,
++ * becomes backlogged, changes its budget, ...).
++ *
++ * In this first case, update the virtual time in @st too (see the
++ * comments on this update inside the function).
++ *
++ * In constrast, if there is an in-service entity, then return the
++ * entity that would be set in service if not only the above
++ * conditions, but also the next one held true: the currently
++ * in-service entity, on expiration,
++ * 1) gets a finish time equal to the current one, or
++ * 2) is not eligible any more, or
++ * 3) is idle.
++ */
++static struct bfq_entity *
++__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service
++#if 0
++ , bool force
++#endif
++ )
++{
++ struct bfq_entity *entity
++#if 0
++ , *new_next_in_service = NULL
++#endif
++ ;
++ u64 new_vtime;
++ struct bfq_queue *bfqq;
++
++ if (RB_EMPTY_ROOT(&st->active))
++ return NULL;
++
++ /*
++ * Get the value of the system virtual time for which at
++ * least one entity is eligible.
++ */
++ new_vtime = bfq_calc_vtime_jump(st);
++
++ /*
++ * If there is no in-service entity for the sched_data this
++ * active tree belongs to, then push the system virtual time
++ * up to the value that guarantees that at least one entity is
++ * eligible. If, instead, there is an in-service entity, then
++ * do not make any such update, because there is already an
++ * eligible entity, namely the in-service one (even if the
++ * entity is not on st, because it was extracted when set in
++ * service).
++ */
++ if (!in_service)
++ bfq_update_vtime(st, new_vtime);
++
++ entity = bfq_first_active_entity(st, new_vtime);
++ BUG_ON(bfq_gt(entity->start, new_vtime));
++
++ /* Log some information */
++ bfqq = bfq_entity_to_bfqq(entity);
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "__lookup_next: start %llu vtime %llu st %p",
++ ((entity->start>>10)*1000)>>12,
++ ((new_vtime>>10)*1000)>>12, st);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "__lookup_next: start %llu vtime %llu st %p",
++ ((entity->start>>10)*1000)>>12,
++ ((new_vtime>>10)*1000)>>12, st);
++ }
++#endif
++
++ BUG_ON(!entity);
++
++ return entity;
++}
++
++/**
++ * bfq_lookup_next_entity - return the first eligible entity in @sd.
++ * @sd: the sched_data.
++ *
++ * This function is invoked when there has been a change in the trees
++ * for sd, and we need know what is the new next entity after this
++ * change.
++ */
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd)
++{
++ struct bfq_service_tree *st = sd->service_tree;
++ struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1);
++ struct bfq_entity *entity = NULL;
++ struct bfq_queue *bfqq;
++ int class_idx = 0;
++
++ BUG_ON(!sd);
++ BUG_ON(!st);
++ /*
++ * Choose from idle class, if needed to guarantee a minimum
++ * bandwidth to this class (and if there is some active entity
++ * in idle class). This should also mitigate
++ * priority-inversion problems in case a low priority task is
++ * holding file system resources.
++ */
++ if (time_is_before_jiffies(sd->bfq_class_idle_last_service +
++ BFQ_CL_IDLE_TIMEOUT)) {
++ if (!RB_EMPTY_ROOT(&idle_class_st->active))
++ class_idx = BFQ_IOPRIO_CLASSES - 1;
++ /* About to be served if backlogged, or not yet backlogged */
++ sd->bfq_class_idle_last_service = jiffies;
++ }
++
++ /*
++ * Find the next entity to serve for the highest-priority
++ * class, unless the idle class needs to be served.
++ */
++ for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) {
++ entity = __bfq_lookup_next_entity(st + class_idx,
++ sd->in_service_entity);
++
++ if (entity)
++ break;
++ }
++
++ BUG_ON(!entity &&
++ (!RB_EMPTY_ROOT(&st->active) || !RB_EMPTY_ROOT(&(st+1)->active) ||
++ !RB_EMPTY_ROOT(&(st+2)->active)));
++
++ if (!entity)
++ return NULL;
++
++ /* Log some information */
++ bfqq = bfq_entity_to_bfqq(entity);
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "chosen from st %p %d",
++ st + class_idx, class_idx);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "chosen from st %p %d",
++ st + class_idx, class_idx);
++ }
++#endif
++
++ return entity;
++}
++
++static bool next_queue_may_preempt(struct bfq_data *bfqd)
++{
++ struct bfq_sched_data *sd = &bfqd->root_group->sched_data;
++
++ return sd->next_in_service != sd->in_service_entity;
++}
++
++/*
++ * Get next queue for service.
++ */
++static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
++{
++ struct bfq_entity *entity = NULL;
++ struct bfq_sched_data *sd;
++ struct bfq_queue *bfqq;
++
++ BUG_ON(bfqd->in_service_queue);
++
++ if (bfqd->busy_queues == 0)
++ return NULL;
++
++ /*
++ * Traverse the path from the root to the leaf entity to
++ * serve. Set in service all the entities visited along the
++ * way.
++ */
++ sd = &bfqd->root_group->sched_data;
++ for (; sd ; sd = entity->my_sched_data) {
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ if (entity) {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg(bfqd, bfqg,
++ "get_next_queue: lookup in this group");
++ if (!sd->next_in_service)
++ pr_crit("get_next_queue: lookup in this group");
++ } else {
++ bfq_log_bfqg(bfqd, bfqd->root_group,
++ "get_next_queue: lookup in root group");
++ if (!sd->next_in_service)
++ pr_crit("get_next_queue: lookup in root group");
++ }
++#endif
++
++ BUG_ON(!sd->next_in_service);
++
++ /*
++ * WARNING. We are about to set the in-service entity
++ * to sd->next_in_service, i.e., to the (cached) value
++ * returned by bfq_lookup_next_entity(sd) the last
++ * time it was invoked, i.e., the last time when the
++ * service order in sd changed as a consequence of the
++ * activation or deactivation of an entity. In this
++ * respect, if we execute bfq_lookup_next_entity(sd)
++ * in this very moment, it may, although with low
++ * probability, yield a different entity than that
++ * pointed to by sd->next_in_service. This rare event
++ * happens in case there was no CLASS_IDLE entity to
++ * serve for sd when bfq_lookup_next_entity(sd) was
++ * invoked for the last time, while there is now one
++ * such entity.
++ *
++ * If the above event happens, then the scheduling of
++ * such entity in CLASS_IDLE is postponed until the
++ * service of the sd->next_in_service entity
++ * finishes. In fact, when the latter is expired,
++ * bfq_lookup_next_entity(sd) gets called again,
++ * exactly to update sd->next_in_service.
++ */
++
++ /* Make next_in_service entity become in_service_entity */
++ entity = sd->next_in_service;
++ sd->in_service_entity = entity;
++
++ /*
++ * Reset the accumulator of the amount of service that
++ * the entity is about to receive.
++ */
++ entity->service = 0;
++
++ /*
++ * If entity is no longer a candidate for next
++ * service, then we extract it from its active tree,
++ * for the following reason. To further boost the
++ * throughput in some special case, BFQ needs to know
++ * which is the next candidate entity to serve, while
++ * there is already an entity in service. In this
++ * respect, to make it easy to compute/update the next
++ * candidate entity to serve after the current
++ * candidate has been set in service, there is a case
++ * where it is necessary to extract the current
++ * candidate from its service tree. Such a case is
++ * when the entity just set in service cannot be also
++ * a candidate for next service. Details about when
++ * this conditions holds are reported in the comments
++ * on the function bfq_no_longer_next_in_service()
++ * invoked below.
++ */
++ if (bfq_no_longer_next_in_service(entity))
++ bfq_active_extract(bfq_entity_service_tree(entity),
++ entity);
++
++ /*
++ * For the same reason why we may have just extracted
++ * entity from its active tree, we may need to update
++ * next_in_service for the sched_data of entity too,
++ * regardless of whether entity has been extracted.
++ * In fact, even if entity has not been extracted, a
++ * descendant entity may get extracted. Such an event
++ * would cause a change in next_in_service for the
++ * level of the descendant entity, and thus possibly
++ * back to upper levels.
++ *
++ * We cannot perform the resulting needed update
++ * before the end of this loop, because, to know which
++ * is the correct next-to-serve candidate entity for
++ * each level, we need first to find the leaf entity
++ * to set in service. In fact, only after we know
++ * which is the next-to-serve leaf entity, we can
++ * discover whether the parent entity of the leaf
++ * entity becomes the next-to-serve, and so on.
++ */
++
++ /* Log some information */
++ bfqq = bfq_entity_to_bfqq(entity);
++ if (bfqq)
++ bfq_log_bfqq(bfqd, bfqq,
++ "get_next_queue: this queue, finish %llu",
++ (((entity->finish>>10)*1000)>>10)>>2);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg(bfqd, bfqg,
++ "get_next_queue: this entity, finish %llu",
++ (((entity->finish>>10)*1000)>>10)>>2);
++ }
++#endif
++
++ }
++
++ BUG_ON(!entity);
++ bfqq = bfq_entity_to_bfqq(entity);
++ BUG_ON(!bfqq);
++
++ /*
++ * We can finally update all next-to-serve entities along the
++ * path from the leaf entity just set in service to the root.
++ */
++ for_each_entity(entity) {
++ struct bfq_sched_data *sd = entity->sched_data;
++
++ if(!bfq_update_next_in_service(sd, NULL))
++ break;
++ }
++
++ return bfqq;
++}
++
++static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
++{
++ struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
++ struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
++ struct bfq_entity *entity = in_serv_entity;
++
++ if (bfqd->in_service_bic) {
++ put_io_context(bfqd->in_service_bic->icq.ioc);
++ bfqd->in_service_bic = NULL;
++ }
++
++ bfq_clear_bfqq_wait_request(in_serv_bfqq);
++ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
++ bfqd->in_service_queue = NULL;
++
++ /*
++ * When this function is called, all in-service entities have
++ * been properly deactivated or requeued, so we can safely
++ * execute the final step: reset in_service_entity along the
++ * path from entity to the root.
++ */
++ for_each_entity(entity)
++ entity->sched_data->in_service_entity = NULL;
++
++ /*
++ * in_serv_entity is no longer in service, so, if it is in no
++ * service tree either, then release the service reference to
++ * the queue it represents (taken with bfq_get_entity).
++ */
++ if (!in_serv_entity->on_st)
++ bfq_put_queue(in_serv_bfqq);
++}
++
++static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ bool ins_into_idle_tree, bool expiration)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ bfq_deactivate_entity(entity, ins_into_idle_tree, expiration);
++}
++
++static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++
++ BUG_ON(bfqq == bfqd->in_service_queue);
++ BUG_ON(entity->tree != &st->active && entity->tree != &st->idle &&
++ entity->on_st);
++
++ bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq),
++ false);
++ bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
++}
++
++static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ bfq_activate_requeue_entity(entity, false,
++ bfqq == bfqd->in_service_queue);
++}
++
++static void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
++
++/*
++ * Called when the bfqq no longer has requests pending, remove it from
++ * the service tree. As a special case, it can be invoked during an
++ * expiration.
++ */
++static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ bool expiration)
++{
++ BUG_ON(!bfq_bfqq_busy(bfqq));
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ bfq_log_bfqq(bfqd, bfqq, "del from busy");
++
++ bfq_clear_bfqq_busy(bfqq);
++
++ BUG_ON(bfqd->busy_queues == 0);
++ bfqd->busy_queues--;
++
++ if (!bfqq->dispatched)
++ bfq_weights_tree_remove(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++
++ if (bfqq->wr_coeff > 1) {
++ bfqd->wr_busy_queues--;
++ BUG_ON(bfqd->wr_busy_queues < 0);
++ }
++
++ bfqg_stats_update_dequeue(bfqq_group(bfqq));
++
++ BUG_ON(bfqq->entity.budget < 0);
++
++ bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
++}
++
++/*
++ * Called when an inactive queue receives a new request.
++ */
++static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfq_bfqq_busy(bfqq));
++ BUG_ON(bfqq == bfqd->in_service_queue);
++
++ bfq_log_bfqq(bfqd, bfqq, "add to busy");
++
++ bfq_activate_bfqq(bfqd, bfqq);
++
++ bfq_mark_bfqq_busy(bfqq);
++ bfqd->busy_queues++;
++
++ if (!bfqq->dispatched)
++ if (bfqq->wr_coeff == 1)
++ bfq_weights_tree_add(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++
++ if (bfqq->wr_coeff > 1) {
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ }
++
++}
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+new file mode 100644
+index 000000000000..65e7c7e77f3c
+--- /dev/null
++++ b/block/bfq-sq-iosched.c
+@@ -0,0 +1,5379 @@
++/*
++ * Budget Fair Queueing (BFQ) I/O scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ *
++ * BFQ is a proportional-share I/O scheduler, with some extra
++ * low-latency capabilities. BFQ also supports full hierarchical
++ * scheduling through cgroups. Next paragraphs provide an introduction
++ * on BFQ inner workings. Details on BFQ benefits and usage can be
++ * found in Documentation/block/bfq-iosched.txt.
++ *
++ * BFQ is a proportional-share storage-I/O scheduling algorithm based
++ * on the slice-by-slice service scheme of CFQ. But BFQ assigns
++ * budgets, measured in number of sectors, to processes instead of
++ * time slices. The device is not granted to the in-service process
++ * for a given time slice, but until it has exhausted its assigned
++ * budget. This change from the time to the service domain enables BFQ
++ * to distribute the device throughput among processes as desired,
++ * without any distortion due to throughput fluctuations, or to device
++ * internal queueing. BFQ uses an ad hoc internal scheduler, called
++ * B-WF2Q+, to schedule processes according to their budgets. More
++ * precisely, BFQ schedules queues associated with processes. Thanks to
++ * the accurate policy of B-WF2Q+, BFQ can afford to assign high
++ * budgets to I/O-bound processes issuing sequential requests (to
++ * boost the throughput), and yet guarantee a low latency to
++ * interactive and soft real-time applications.
++ *
++ * NOTE: if the main or only goal, with a given device, is to achieve
++ * the maximum-possible throughput at all times, then do switch off
++ * all low-latency heuristics for that device, by setting low_latency
++ * to 0.
++ *
++ * BFQ is described in [1], where also a reference to the initial, more
++ * theoretical paper on BFQ can be found. The interested reader can find
++ * in the latter paper full details on the main algorithm, as well as
++ * formulas of the guarantees and formal proofs of all the properties.
++ * With respect to the version of BFQ presented in these papers, this
++ * implementation adds a few more heuristics, such as the one that
++ * guarantees a low latency to soft real-time applications, and a
++ * hierarchical extension based on H-WF2Q+.
++ *
++ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with
++ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
++ * complexity derives from the one introduced with EEVDF in [3].
++ *
++ * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
++ * Scheduler", Proceedings of the First Workshop on Mobile System
++ * Technologies (MST-2015), May 2015.
++ * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
++ *
++ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf
++ *
++ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
++ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
++ * Oct 1997.
++ *
++ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
++ *
++ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
++ * First: A Flexible and Accurate Mechanism for Proportional Share
++ * Resource Allocation,'' technical report.
++ *
++ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
++ */
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/cgroup.h>
++#include <linux/elevator.h>
++#include <linux/jiffies.h>
++#include <linux/rbtree.h>
++#include <linux/ioprio.h>
++#include "blk.h"
++#include "bfq.h"
++
++/* Expiration time of sync (0) and async (1) requests, in ns. */
++static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
++
++/* Maximum backwards seek, in KiB. */
++static const int bfq_back_max = (16 * 1024);
++
++/* Penalty of a backwards seek, in number of sectors. */
++static const int bfq_back_penalty = 2;
++
++/* Idling period duration, in ns. */
++static u32 bfq_slice_idle = (NSEC_PER_SEC / 125);
++
++/* Minimum number of assigned budgets for which stats are safe to compute. */
++static const int bfq_stats_min_budgets = 194;
++
++/* Default maximum budget values, in sectors and number of requests. */
++static const int bfq_default_max_budget = (16 * 1024);
++
++/*
++ * Async to sync throughput distribution is controlled as follows:
++ * when an async request is served, the entity is charged the number
++ * of sectors of the request, multiplied by the factor below
++ */
++static const int bfq_async_charge_factor = 10;
++
++/* Default timeout values, in jiffies, approximating CFQ defaults. */
++static const int bfq_timeout = (HZ / 8);
++
++static struct kmem_cache *bfq_pool;
++
++/* Below this threshold (in ns), we consider thinktime immediate. */
++#define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
++
++/* hw_tag detection: parallel requests threshold and min samples needed. */
++#define BFQ_HW_QUEUE_THRESHOLD 4
++#define BFQ_HW_QUEUE_SAMPLES 32
++
++#define BFQQ_SEEK_THR (sector_t)(8 * 100)
++#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
++#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
++#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
++
++/* Min number of samples required to perform peak-rate update */
++#define BFQ_RATE_MIN_SAMPLES 32
++/* Min observation time interval required to perform a peak-rate update (ns) */
++#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
++/* Target observation time interval for a peak-rate update (ns) */
++#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
++
++/* Shift used for peak rate fixed precision calculations. */
++#define BFQ_RATE_SHIFT 16
++
++/*
++ * By default, BFQ computes the duration of the weight raising for
++ * interactive applications automatically, using the following formula:
++ * duration = (R / r) * T, where r is the peak rate of the device, and
++ * R and T are two reference parameters.
++ * In particular, R is the peak rate of the reference device (see below),
++ * and T is a reference time: given the systems that are likely to be
++ * installed on the reference device according to its speed class, T is
++ * about the maximum time needed, under BFQ and while reading two files in
++ * parallel, to load typical large applications on these systems.
++ * In practice, the slower/faster the device at hand is, the more/less it
++ * takes to load applications with respect to the reference device.
++ * Accordingly, the longer/shorter BFQ grants weight raising to interactive
++ * applications.
++ *
++ * BFQ uses four different reference pairs (R, T), depending on:
++ * . whether the device is rotational or non-rotational;
++ * . whether the device is slow, such as old or portable HDDs, as well as
++ * SD cards, or fast, such as newer HDDs and SSDs.
++ *
++ * The device's speed class is dynamically (re)detected in
++ * bfq_update_peak_rate() every time the estimated peak rate is updated.
++ *
++ * In the following definitions, R_slow[0]/R_fast[0] and
++ * T_slow[0]/T_fast[0] are the reference values for a slow/fast
++ * rotational device, whereas R_slow[1]/R_fast[1] and
++ * T_slow[1]/T_fast[1] are the reference values for a slow/fast
++ * non-rotational device. Finally, device_speed_thresh are the
++ * thresholds used to switch between speed classes. The reference
++ * rates are not the actual peak rates of the devices used as a
++ * reference, but slightly lower values. The reason for using these
++ * slightly lower values is that the peak-rate estimator tends to
++ * yield slightly lower values than the actual peak rate (it can yield
++ * the actual peak rate only if there is only one process doing I/O,
++ * and the process does sequential I/O).
++ *
++ * Both the reference peak rates and the thresholds are measured in
++ * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
++ */
++static int R_slow[2] = {1000, 10700};
++static int R_fast[2] = {14000, 33000};
++/*
++ * To improve readability, a conversion function is used to initialize the
++ * following arrays, which entails that they can be initialized only in a
++ * function.
++ */
++static int T_slow[2];
++static int T_fast[2];
++static int device_speed_thresh[2];
++
++#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
++ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
++
++#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
++
++static void bfq_schedule_dispatch(struct bfq_data *bfqd);
++
++#include "bfq-ioc.c"
++#include "bfq-sched.c"
++#include "bfq-cgroup-included.c"
++
++#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
++#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
++
++#define bfq_sample_valid(samples) ((samples) > 80)
++
++/*
++ * Scheduler run of queue, if there are requests pending and no one in the
++ * driver that will restart queueing.
++ */
++static void bfq_schedule_dispatch(struct bfq_data *bfqd)
++{
++ if (bfqd->queued != 0) {
++ bfq_log(bfqd, "schedule dispatch");
++ kblockd_schedule_work(&bfqd->unplug_work);
++ }
++}
++
++/*
++ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
++ * We choose the request that is closesr to the head right now. Distance
++ * behind the head is penalized and only allowed to a certain extent.
++ */
++static struct request *bfq_choose_req(struct bfq_data *bfqd,
++ struct request *rq1,
++ struct request *rq2,
++ sector_t last)
++{
++ sector_t s1, s2, d1 = 0, d2 = 0;
++ unsigned long back_max;
++#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
++#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
++ unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
++
++ if (!rq1 || rq1 == rq2)
++ return rq2;
++ if (!rq2)
++ return rq1;
++
++ if (rq_is_sync(rq1) && !rq_is_sync(rq2))
++ return rq1;
++ else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
++ return rq2;
++ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
++ return rq1;
++ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
++ return rq2;
++
++ s1 = blk_rq_pos(rq1);
++ s2 = blk_rq_pos(rq2);
++
++ /*
++ * By definition, 1KiB is 2 sectors.
++ */
++ back_max = bfqd->bfq_back_max * 2;
++
++ /*
++ * Strict one way elevator _except_ in the case where we allow
++ * short backward seeks which are biased as twice the cost of a
++ * similar forward seek.
++ */
++ if (s1 >= last)
++ d1 = s1 - last;
++ else if (s1 + back_max >= last)
++ d1 = (last - s1) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ1_WRAP;
++
++ if (s2 >= last)
++ d2 = s2 - last;
++ else if (s2 + back_max >= last)
++ d2 = (last - s2) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ2_WRAP;
++
++ /* Found required data */
++
++ /*
++ * By doing switch() on the bit mask "wrap" we avoid having to
++ * check two variables for all permutations: --> faster!
++ */
++ switch (wrap) {
++ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
++ if (d1 < d2)
++ return rq1;
++ else if (d2 < d1)
++ return rq2;
++
++ if (s1 >= s2)
++ return rq1;
++ else
++ return rq2;
++
++ case BFQ_RQ2_WRAP:
++ return rq1;
++ case BFQ_RQ1_WRAP:
++ return rq2;
++ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
++ default:
++ /*
++ * Since both rqs are wrapped,
++ * start with the one that's further behind head
++ * (--> only *one* back seek required),
++ * since back seek takes more time than forward.
++ */
++ if (s1 <= s2)
++ return rq1;
++ else
++ return rq2;
++ }
++}
++
++static struct bfq_queue *
++bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
++ sector_t sector, struct rb_node **ret_parent,
++ struct rb_node ***rb_link)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *bfqq = NULL;
++
++ parent = NULL;
++ p = &root->rb_node;
++ while (*p) {
++ struct rb_node **n;
++
++ parent = *p;
++ bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++
++ /*
++ * Sort strictly based on sector. Smallest to the left,
++ * largest to the right.
++ */
++ if (sector > blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_right;
++ else if (sector < blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_left;
++ else
++ break;
++ p = n;
++ bfqq = NULL;
++ }
++
++ *ret_parent = parent;
++ if (rb_link)
++ *rb_link = p;
++
++ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++ (unsigned long long) sector,
++ bfqq ? bfqq->pid : 0);
++
++ return bfqq;
++}
++
++static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *__bfqq;
++
++ if (bfqq->pos_root) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++
++ if (bfq_class_idle(bfqq))
++ return;
++ if (!bfqq->next_rq)
++ return;
++
++ bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
++ blk_rq_pos(bfqq->next_rq), &parent, &p);
++ if (!__bfqq) {
++ rb_link_node(&bfqq->pos_node, parent, p);
++ rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
++ } else
++ bfqq->pos_root = NULL;
++}
++
++/*
++ * Tell whether there are active queues or groups with differentiated weights.
++ */
++static bool bfq_differentiated_weights(struct bfq_data *bfqd)
++{
++ /*
++ * For weights to differ, at least one of the trees must contain
++ * at least two nodes.
++ */
++ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
++ (bfqd->queue_weights_tree.rb_node->rb_left ||
++ bfqd->queue_weights_tree.rb_node->rb_right)
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ ) ||
++ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
++ (bfqd->group_weights_tree.rb_node->rb_left ||
++ bfqd->group_weights_tree.rb_node->rb_right)
++#endif
++ );
++}
++
++/*
++ * The following function returns true if every queue must receive the
++ * same share of the throughput (this condition is used when deciding
++ * whether idling may be disabled, see the comments in the function
++ * bfq_bfqq_may_idle()).
++ *
++ * Such a scenario occurs when:
++ * 1) all active queues have the same weight,
++ * 2) all active groups at the same level in the groups tree have the same
++ * weight,
++ * 3) all active groups at the same level in the groups tree have the same
++ * number of children.
++ *
++ * Unfortunately, keeping the necessary state for evaluating exactly the
++ * above symmetry conditions would be quite complex and time-consuming.
++ * Therefore this function evaluates, instead, the following stronger
++ * sub-conditions, for which it is much easier to maintain the needed
++ * state:
++ * 1) all active queues have the same weight,
++ * 2) all active groups have the same weight,
++ * 3) all active groups have at most one active child each.
++ * In particular, the last two conditions are always true if hierarchical
++ * support and the cgroups interface are not enabled, thus no state needs
++ * to be maintained in this case.
++ */
++static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
++{
++ return !bfq_differentiated_weights(bfqd);
++}
++
++/*
++ * If the weight-counter tree passed as input contains no counter for
++ * the weight of the input entity, then add that counter; otherwise just
++ * increment the existing counter.
++ *
++ * Note that weight-counter trees contain few nodes in mostly symmetric
++ * scenarios. For example, if all queues have the same weight, then the
++ * weight-counter tree for the queues may contain at most one node.
++ * This holds even if low_latency is on, because weight-raised queues
++ * are not inserted in the tree.
++ * In most scenarios, the rate at which nodes are created/destroyed
++ * should be low too.
++ */
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ struct rb_node **new = &(root->rb_node), *parent = NULL;
++
++ /*
++ * Do not insert if the entity is already associated with a
++ * counter, which happens if:
++ * 1) the entity is associated with a queue,
++ * 2) a request arrival has caused the queue to become both
++ * non-weight-raised, and hence change its weight, and
++ * backlogged; in this respect, each of the two events
++ * causes an invocation of this function,
++ * 3) this is the invocation of this function caused by the
++ * second event. This second invocation is actually useless,
++ * and we handle this fact by exiting immediately. More
++ * efficient or clearer solutions might possibly be adopted.
++ */
++ if (entity->weight_counter)
++ return;
++
++ while (*new) {
++ struct bfq_weight_counter *__counter = container_of(*new,
++ struct bfq_weight_counter,
++ weights_node);
++ parent = *new;
++
++ if (entity->weight == __counter->weight) {
++ entity->weight_counter = __counter;
++ goto inc_counter;
++ }
++ if (entity->weight < __counter->weight)
++ new = &((*new)->rb_left);
++ else
++ new = &((*new)->rb_right);
++ }
++
++ entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
++ GFP_ATOMIC);
++
++ /*
++ * In the unlucky event of an allocation failure, we just
++ * exit. This will cause the weight of entity to not be
++ * considered in bfq_differentiated_weights, which, in its
++ * turn, causes the scenario to be deemed wrongly symmetric in
++ * case entity's weight would have been the only weight making
++ * the scenario asymmetric. On the bright side, no unbalance
++ * will however occur when entity becomes inactive again (the
++ * invocation of this function is triggered by an activation
++ * of entity). In fact, bfq_weights_tree_remove does nothing
++ * if !entity->weight_counter.
++ */
++ if (unlikely(!entity->weight_counter))
++ return;
++
++ entity->weight_counter->weight = entity->weight;
++ rb_link_node(&entity->weight_counter->weights_node, parent, new);
++ rb_insert_color(&entity->weight_counter->weights_node, root);
++
++inc_counter:
++ entity->weight_counter->num_active++;
++}
++
++/*
++ * Decrement the weight counter associated with the entity, and, if the
++ * counter reaches 0, remove the counter from the tree.
++ * See the comments to the function bfq_weights_tree_add() for considerations
++ * about overhead.
++ */
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ if (!entity->weight_counter)
++ return;
++
++ BUG_ON(RB_EMPTY_ROOT(root));
++ BUG_ON(entity->weight_counter->weight != entity->weight);
++
++ BUG_ON(!entity->weight_counter->num_active);
++ entity->weight_counter->num_active--;
++ if (entity->weight_counter->num_active > 0)
++ goto reset_entity_pointer;
++
++ rb_erase(&entity->weight_counter->weights_node, root);
++ kfree(entity->weight_counter);
++
++reset_entity_pointer:
++ entity->weight_counter = NULL;
++}
++
++/*
++ * Return expired entry, or NULL to just start from scratch in rbtree.
++ */
++static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
++ struct request *last)
++{
++ struct request *rq;
++
++ if (bfq_bfqq_fifo_expire(bfqq))
++ return NULL;
++
++ bfq_mark_bfqq_fifo_expire(bfqq);
++
++ rq = rq_entry_fifo(bfqq->fifo.next);
++
++ if (rq == last || ktime_get_ns() < rq->fifo_time)
++ return NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
++ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
++ return rq;
++}
++
++static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct request *last)
++{
++ struct rb_node *rbnext = rb_next(&last->rb_node);
++ struct rb_node *rbprev = rb_prev(&last->rb_node);
++ struct request *next, *prev = NULL;
++
++ BUG_ON(list_empty(&bfqq->fifo));
++
++ /* Follow expired path, else get first next available. */
++ next = bfq_check_fifo(bfqq, last);
++ if (next) {
++ BUG_ON(next == last);
++ return next;
++ }
++
++ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
++
++ if (rbprev)
++ prev = rb_entry_rq(rbprev);
++
++ if (rbnext)
++ next = rb_entry_rq(rbnext);
++ else {
++ rbnext = rb_first(&bfqq->sort_list);
++ if (rbnext && rbnext != &last->rb_node)
++ next = rb_entry_rq(rbnext);
++ }
++
++ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
++}
++
++/* see the definition of bfq_async_charge_factor for details */
++static unsigned long bfq_serv_to_charge(struct request *rq,
++ struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
++ return blk_rq_sectors(rq);
++
++ /*
++ * If there are no weight-raised queues, then amplify service
++ * by just the async charge factor; otherwise amplify service
++ * by twice the async charge factor, to further reduce latency
++ * for weight-raised queues.
++ */
++ if (bfqq->bfqd->wr_busy_queues == 0)
++ return blk_rq_sectors(rq) * bfq_async_charge_factor;
++
++ return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
++}
++
++/**
++ * bfq_updated_next_req - update the queue after a new next_rq selection.
++ * @bfqd: the device data the queue belongs to.
++ * @bfqq: the queue to update.
++ *
++ * If the first request of a queue changes we make sure that the queue
++ * has enough budget to serve at least its first request (if the
++ * request has grown). We do this because if the queue has not enough
++ * budget for its first request, it has to go through two dispatch
++ * rounds to actually get it dispatched.
++ */
++static void bfq_updated_next_req(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ struct request *next_rq = bfqq->next_rq;
++ unsigned long new_budget;
++
++ if (!next_rq)
++ return;
++
++ if (bfqq == bfqd->in_service_queue)
++ /*
++ * In order not to break guarantees, budgets cannot be
++ * changed after an entity has been selected.
++ */
++ return;
++
++ BUG_ON(entity->tree != &st->active);
++ BUG_ON(entity == entity->sched_data->in_service_entity);
++
++ new_budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ if (entity->budget != new_budget) {
++ entity->budget = new_budget;
++ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
++ new_budget);
++ bfq_requeue_bfqq(bfqd, bfqq);
++ }
++}
++
++static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
++{
++ u64 dur;
++
++ if (bfqd->bfq_wr_max_time > 0)
++ return bfqd->bfq_wr_max_time;
++
++ dur = bfqd->RT_prod;
++ do_div(dur, bfqd->peak_rate);
++
++ /*
++ * Limit duration between 3 and 13 seconds. Tests show that
++ * higher values than 13 seconds often yield the opposite of
++ * the desired result, i.e., worsen responsiveness by letting
++ * non-interactive and non-soft-real-time applications
++ * preserve weight raising for a too long time interval.
++ *
++ * On the other end, lower values than 3 seconds make it
++ * difficult for most interactive tasks to complete their jobs
++ * before weight-raising finishes.
++ */
++ if (dur > msecs_to_jiffies(13000))
++ dur = msecs_to_jiffies(13000);
++ else if (dur < msecs_to_jiffies(3000))
++ dur = msecs_to_jiffies(3000);
++
++ return dur;
++}
++
++static void
++bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
++ struct bfq_io_cq *bic, bool bfq_already_existing)
++{
++ unsigned int old_wr_coeff;
++ bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
++
++ if (bic->saved_idle_window)
++ bfq_mark_bfqq_idle_window(bfqq);
++ else
++ bfq_clear_bfqq_idle_window(bfqq);
++
++ if (bic->saved_IO_bound)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ else
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ if (unlikely(busy))
++ old_wr_coeff = bfqq->wr_coeff;
++
++ bfqq->wr_coeff = bic->saved_wr_coeff;
++ bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
++ BUG_ON(time_is_after_jiffies(bfqq->wr_start_at_switch_to_srt));
++ bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
++ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++
++ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
++ time_is_before_jiffies(bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time))) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "resume state: switching off wr (%lu + %lu < %lu)",
++ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time,
++ jiffies);
++
++ bfqq->wr_coeff = 1;
++ }
++
++ /* make sure weight will be updated, however we got here */
++ bfqq->entity.prio_changed = 1;
++
++ if (likely(!busy))
++ return;
++
++ if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) {
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ } else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) {
++ bfqd->wr_busy_queues--;
++ BUG_ON(bfqd->wr_busy_queues < 0);
++ }
++}
++
++static int bfqq_process_refs(struct bfq_queue *bfqq)
++{
++ int process_refs, io_refs;
++
++ lockdep_assert_held(bfqq->bfqd->queue->queue_lock);
++
++ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++ process_refs = bfqq->ref - io_refs - bfqq->entity.on_st;
++ BUG_ON(process_refs < 0);
++ return process_refs;
++}
++
++/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
++static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_queue *item;
++ struct hlist_node *n;
++
++ hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
++ hlist_del_init(&item->burst_list_node);
++ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++ bfqd->burst_size = 1;
++ bfqd->burst_parent_entity = bfqq->entity.parent;
++}
++
++/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
++static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ /* Increment burst size to take into account also bfqq */
++ bfqd->burst_size++;
++
++ bfq_log_bfqq(bfqd, bfqq, "add_to_burst %d", bfqd->burst_size);
++
++ BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh);
++
++ if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
++ struct bfq_queue *pos, *bfqq_item;
++ struct hlist_node *n;
++
++ /*
++ * Enough queues have been activated shortly after each
++ * other to consider this burst as large.
++ */
++ bfqd->large_burst = true;
++ bfq_log_bfqq(bfqd, bfqq, "add_to_burst: large burst started");
++
++ /*
++ * We can now mark all queues in the burst list as
++ * belonging to a large burst.
++ */
++ hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
++ burst_list_node) {
++ bfq_mark_bfqq_in_large_burst(bfqq_item);
++ bfq_log_bfqq(bfqd, bfqq_item, "marked in large burst");
++ }
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ bfq_log_bfqq(bfqd, bfqq, "marked in large burst");
++
++ /*
++ * From now on, and until the current burst finishes, any
++ * new queue being activated shortly after the last queue
++ * was inserted in the burst can be immediately marked as
++ * belonging to a large burst. So the burst list is not
++ * needed any more. Remove it.
++ */
++ hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
++ burst_list_node)
++ hlist_del_init(&pos->burst_list_node);
++ } else /*
++ * Burst not yet large: add bfqq to the burst list. Do
++ * not increment the ref counter for bfqq, because bfqq
++ * is removed from the burst list before freeing bfqq
++ * in put_queue.
++ */
++ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++}
++
++/*
++ * If many queues belonging to the same group happen to be created
++ * shortly after each other, then the processes associated with these
++ * queues have typically a common goal. In particular, bursts of queue
++ * creations are usually caused by services or applications that spawn
++ * many parallel threads/processes. Examples are systemd during boot,
++ * or git grep. To help these processes get their job done as soon as
++ * possible, it is usually better to not grant either weight-raising
++ * or device idling to their queues.
++ *
++ * In this comment we describe, firstly, the reasons why this fact
++ * holds, and, secondly, the next function, which implements the main
++ * steps needed to properly mark these queues so that they can then be
++ * treated in a different way.
++ *
++ * The above services or applications benefit mostly from a high
++ * throughput: the quicker the requests of the activated queues are
++ * cumulatively served, the sooner the target job of these queues gets
++ * completed. As a consequence, weight-raising any of these queues,
++ * which also implies idling the device for it, is almost always
++ * counterproductive. In most cases it just lowers throughput.
++ *
++ * On the other hand, a burst of queue creations may be caused also by
++ * the start of an application that does not consist of a lot of
++ * parallel I/O-bound threads. In fact, with a complex application,
++ * several short processes may need to be executed to start-up the
++ * application. In this respect, to start an application as quickly as
++ * possible, the best thing to do is in any case to privilege the I/O
++ * related to the application with respect to all other
++ * I/O. Therefore, the best strategy to start as quickly as possible
++ * an application that causes a burst of queue creations is to
++ * weight-raise all the queues created during the burst. This is the
++ * exact opposite of the best strategy for the other type of bursts.
++ *
++ * In the end, to take the best action for each of the two cases, the
++ * two types of bursts need to be distinguished. Fortunately, this
++ * seems relatively easy, by looking at the sizes of the bursts. In
++ * particular, we found a threshold such that only bursts with a
++ * larger size than that threshold are apparently caused by
++ * services or commands such as systemd or git grep. For brevity,
++ * hereafter we call just 'large' these bursts. BFQ *does not*
++ * weight-raise queues whose creation occurs in a large burst. In
++ * addition, for each of these queues BFQ performs or does not perform
++ * idling depending on which choice boosts the throughput more. The
++ * exact choice depends on the device and request pattern at
++ * hand.
++ *
++ * Unfortunately, false positives may occur while an interactive task
++ * is starting (e.g., an application is being started). The
++ * consequence is that the queues associated with the task do not
++ * enjoy weight raising as expected. Fortunately these false positives
++ * are very rare. They typically occur if some service happens to
++ * start doing I/O exactly when the interactive task starts.
++ *
++ * Turning back to the next function, it implements all the steps
++ * needed to detect the occurrence of a large burst and to properly
++ * mark all the queues belonging to it (so that they can then be
++ * treated in a different way). This goal is achieved by maintaining a
++ * "burst list" that holds, temporarily, the queues that belong to the
++ * burst in progress. The list is then used to mark these queues as
++ * belonging to a large burst if the burst does become large. The main
++ * steps are the following.
++ *
++ * . when the very first queue is created, the queue is inserted into the
++ * list (as it could be the first queue in a possible burst)
++ *
++ * . if the current burst has not yet become large, and a queue Q that does
++ * not yet belong to the burst is activated shortly after the last time
++ * at which a new queue entered the burst list, then the function appends
++ * Q to the burst list
++ *
++ * . if, as a consequence of the previous step, the burst size reaches
++ * the large-burst threshold, then
++ *
++ * . all the queues in the burst list are marked as belonging to a
++ * large burst
++ *
++ * . the burst list is deleted; in fact, the burst list already served
++ * its purpose (keeping temporarily track of the queues in a burst,
++ * so as to be able to mark them as belonging to a large burst in the
++ * previous sub-step), and now is not needed any more
++ *
++ * . the device enters a large-burst mode
++ *
++ * . if a queue Q that does not belong to the burst is created while
++ * the device is in large-burst mode and shortly after the last time
++ * at which a queue either entered the burst list or was marked as
++ * belonging to the current large burst, then Q is immediately marked
++ * as belonging to a large burst.
++ *
++ * . if a queue Q that does not belong to the burst is created a while
++ * later, i.e., not shortly after, than the last time at which a queue
++ * either entered the burst list or was marked as belonging to the
++ * current large burst, then the current burst is deemed as finished and:
++ *
++ * . the large-burst mode is reset if set
++ *
++ * . the burst list is emptied
++ *
++ * . Q is inserted in the burst list, as Q may be the first queue
++ * in a possible new burst (then the burst list contains just Q
++ * after this step).
++ */
++static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq is already in the burst list or is part of a large
++ * burst, or finally has just been split, then there is
++ * nothing else to do.
++ */
++ if (!hlist_unhashed(&bfqq->burst_list_node) ||
++ bfq_bfqq_in_large_burst(bfqq) ||
++ time_is_after_eq_jiffies(bfqq->split_time +
++ msecs_to_jiffies(10)))
++ return;
++
++ /*
++ * If bfqq's creation happens late enough, or bfqq belongs to
++ * a different group than the burst group, then the current
++ * burst is finished, and related data structures must be
++ * reset.
++ *
++ * In this respect, consider the special case where bfqq is
++ * the very first queue created after BFQ is selected for this
++ * device. In this case, last_ins_in_burst and
++ * burst_parent_entity are not yet significant when we get
++ * here. But it is easy to verify that, whether or not the
++ * following condition is true, bfqq will end up being
++ * inserted into the burst list. In particular the list will
++ * happen to contain only bfqq. And this is exactly what has
++ * to happen, as bfqq may be the first queue of the first
++ * burst.
++ */
++ if (time_is_before_jiffies(bfqd->last_ins_in_burst +
++ bfqd->bfq_burst_interval) ||
++ bfqq->entity.parent != bfqd->burst_parent_entity) {
++ bfqd->large_burst = false;
++ bfq_reset_burst_list(bfqd, bfqq);
++ bfq_log_bfqq(bfqd, bfqq,
++ "handle_burst: late activation or different group");
++ goto end;
++ }
++
++ /*
++ * If we get here, then bfqq is being activated shortly after the
++ * last queue. So, if the current burst is also large, we can mark
++ * bfqq as belonging to this large burst immediately.
++ */
++ if (bfqd->large_burst) {
++ bfq_log_bfqq(bfqd, bfqq, "handle_burst: marked in burst");
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ goto end;
++ }
++
++ /*
++ * If we get here, then a large-burst state has not yet been
++ * reached, but bfqq is being activated shortly after the last
++ * queue. Then we add bfqq to the burst.
++ */
++ bfq_add_to_burst(bfqd, bfqq);
++end:
++ /*
++ * At this point, bfqq either has been added to the current
++ * burst or has caused the current burst to terminate and a
++ * possible new burst to start. In particular, in the second
++ * case, bfqq has become the first queue in the possible new
++ * burst. In both cases last_ins_in_burst needs to be moved
++ * forward.
++ */
++ bfqd->last_ins_in_burst = jiffies;
++
++}
++
++static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ return entity->budget - entity->service;
++}
++
++/*
++ * If enough samples have been computed, return the current max budget
++ * stored in bfqd, which is dynamically updated according to the
++ * estimated disk peak rate; otherwise return the default max budget
++ */
++static int bfq_max_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < bfq_stats_min_budgets)
++ return bfq_default_max_budget;
++ else
++ return bfqd->bfq_max_budget;
++}
++
++/*
++ * Return min budget, which is a fraction of the current or default
++ * max budget (trying with 1/32)
++ */
++static int bfq_min_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < bfq_stats_min_budgets)
++ return bfq_default_max_budget / 32;
++ else
++ return bfqd->bfq_max_budget / 32;
++}
++
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool compensate,
++ enum bfqq_expiration reason);
++
++/*
++ * The next function, invoked after the input queue bfqq switches from
++ * idle to busy, updates the budget of bfqq. The function also tells
++ * whether the in-service queue should be expired, by returning
++ * true. The purpose of expiring the in-service queue is to give bfqq
++ * the chance to possibly preempt the in-service queue, and the reason
++ * for preempting the in-service queue is to achieve one of the two
++ * goals below.
++ *
++ * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
++ * expired because it has remained idle. In particular, bfqq may have
++ * expired for one of the following two reasons:
++ *
++ * - BFQ_BFQQ_NO_MORE_REQUEST bfqq did not enjoy any device idling and
++ * did not make it to issue a new request before its last request
++ * was served;
++ *
++ * - BFQ_BFQQ_TOO_IDLE bfqq did enjoy device idling, but did not issue
++ * a new request before the expiration of the idling-time.
++ *
++ * Even if bfqq has expired for one of the above reasons, the process
++ * associated with the queue may be however issuing requests greedily,
++ * and thus be sensitive to the bandwidth it receives (bfqq may have
++ * remained idle for other reasons: CPU high load, bfqq not enjoying
++ * idling, I/O throttling somewhere in the path from the process to
++ * the I/O scheduler, ...). But if, after every expiration for one of
++ * the above two reasons, bfqq has to wait for the service of at least
++ * one full budget of another queue before being served again, then
++ * bfqq is likely to get a much lower bandwidth or resource time than
++ * its reserved ones. To address this issue, two countermeasures need
++ * to be taken.
++ *
++ * First, the budget and the timestamps of bfqq need to be updated in
++ * a special way on bfqq reactivation: they need to be updated as if
++ * bfqq did not remain idle and did not expire. In fact, if they are
++ * computed as if bfqq expired and remained idle until reactivation,
++ * then the process associated with bfqq is treated as if, instead of
++ * being greedy, it stopped issuing requests when bfqq remained idle,
++ * and restarts issuing requests only on this reactivation. In other
++ * words, the scheduler does not help the process recover the "service
++ * hole" between bfqq expiration and reactivation. As a consequence,
++ * the process receives a lower bandwidth than its reserved one. In
++ * contrast, to recover this hole, the budget must be updated as if
++ * bfqq was not expired at all before this reactivation, i.e., it must
++ * be set to the value of the remaining budget when bfqq was
++ * expired. Along the same line, timestamps need to be assigned the
++ * value they had the last time bfqq was selected for service, i.e.,
++ * before last expiration. Thus timestamps need to be back-shifted
++ * with respect to their normal computation (see [1] for more details
++ * on this tricky aspect).
++ *
++ * Secondly, to allow the process to recover the hole, the in-service
++ * queue must be expired too, to give bfqq the chance to preempt it
++ * immediately. In fact, if bfqq has to wait for a full budget of the
++ * in-service queue to be completed, then it may become impossible to
++ * let the process recover the hole, even if the back-shifted
++ * timestamps of bfqq are lower than those of the in-service queue. If
++ * this happens for most or all of the holes, then the process may not
++ * receive its reserved bandwidth. In this respect, it is worth noting
++ * that, being the service of outstanding requests unpreemptible, a
++ * little fraction of the holes may however be unrecoverable, thereby
++ * causing a little loss of bandwidth.
++ *
++ * The last important point is detecting whether bfqq does need this
++ * bandwidth recovery. In this respect, the next function deems the
++ * process associated with bfqq greedy, and thus allows it to recover
++ * the hole, if: 1) the process is waiting for the arrival of a new
++ * request (which implies that bfqq expired for one of the above two
++ * reasons), and 2) such a request has arrived soon. The first
++ * condition is controlled through the flag non_blocking_wait_rq,
++ * while the second through the flag arrived_in_time. If both
++ * conditions hold, then the function computes the budget in the
++ * above-described special way, and signals that the in-service queue
++ * should be expired. Timestamp back-shifting is done later in
++ * __bfq_activate_entity.
++ *
++ * 2. Reduce latency. Even if timestamps are not backshifted to let
++ * the process associated with bfqq recover a service hole, bfqq may
++ * however happen to have, after being (re)activated, a lower finish
++ * timestamp than the in-service queue. That is, the next budget of
++ * bfqq may have to be completed before the one of the in-service
++ * queue. If this is the case, then preempting the in-service queue
++ * allows this goal to be achieved, apart from the unpreemptible,
++ * outstanding requests mentioned above.
++ *
++ * Unfortunately, regardless of which of the above two goals one wants
++ * to achieve, service trees need first to be updated to know whether
++ * the in-service queue must be preempted. To have service trees
++ * correctly updated, the in-service queue must be expired and
++ * rescheduled, and bfqq must be scheduled too. This is one of the
++ * most costly operations (in future versions, the scheduling
++ * mechanism may be re-designed in such a way to make it possible to
++ * know whether preemption is needed without needing to update service
++ * trees). In addition, queue preemptions almost always cause random
++ * I/O, and thus loss of throughput. Because of these facts, the next
++ * function adopts the following simple scheme to avoid both costly
++ * operations and too frequent preemptions: it requests the expiration
++ * of the in-service queue (unconditionally) only for queues that need
++ * to recover a hole, or that either are weight-raised or deserve to
++ * be weight-raised.
++ */
++static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool arrived_in_time,
++ bool wr_or_deserves_wr)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
++ /*
++ * We do not clear the flag non_blocking_wait_rq here, as
++ * the latter is used in bfq_activate_bfqq to signal
++ * that timestamps need to be back-shifted (and is
++ * cleared right after).
++ */
++
++ /*
++ * In next assignment we rely on that either
++ * entity->service or entity->budget are not updated
++ * on expiration if bfqq is empty (see
++ * __bfq_bfqq_recalc_budget). Thus both quantities
++ * remain unchanged after such an expiration, and the
++ * following statement therefore assigns to
++ * entity->budget the remaining budget on such an
++ * expiration. For clarity, entity->service is not
++ * updated on expiration in any case, and, in normal
++ * operation, is reset only when bfqq is selected for
++ * service (see bfq_get_next_queue).
++ */
++ BUG_ON(bfqq->max_budget < 0);
++ entity->budget = min_t(unsigned long,
++ bfq_bfqq_budget_left(bfqq),
++ bfqq->max_budget);
++
++ BUG_ON(entity->budget < 0);
++ return true;
++ }
++
++ BUG_ON(bfqq->max_budget < 0);
++ entity->budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(bfqq->next_rq, bfqq));
++ BUG_ON(entity->budget < 0);
++
++ bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
++ return wr_or_deserves_wr;
++}
++
++static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ unsigned int old_wr_coeff,
++ bool wr_or_deserves_wr,
++ bool interactive,
++ bool in_burst,
++ bool soft_rt)
++{
++ if (old_wr_coeff == 1 && wr_or_deserves_wr) {
++ /* start a weight-raising period */
++ if (interactive) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ } else {
++ bfqq->wr_start_at_switch_to_srt = jiffies;
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff *
++ BFQ_SOFTRT_WEIGHT_FACTOR;
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ }
++ /*
++ * If needed, further reduce budget to make sure it is
++ * close to bfqq's backlog, so as to reduce the
++ * scheduling-error component due to a too large
++ * budget. Do not care about throughput consequences,
++ * but only about latency. Finally, do not assign a
++ * too small budget either, to avoid increasing
++ * latency by causing too frequent expirations.
++ */
++ bfqq->entity.budget = min_t(unsigned long,
++ bfqq->entity.budget,
++ 2 * bfq_min_budget(bfqd));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais starting at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ } else if (old_wr_coeff > 1) {
++ if (interactive) { /* update wr coeff and duration */
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ } else if (in_burst) {
++ bfqq->wr_coeff = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais ending at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->
++ wr_cur_max_time));
++ } else if (soft_rt) {
++ /*
++ * The application is now or still meeting the
++ * requirements for being deemed soft rt. We
++ * can then correctly and safely (re)charge
++ * the weight-raising duration for the
++ * application with the weight-raising
++ * duration for soft rt applications.
++ *
++ * In particular, doing this recharge now, i.e.,
++ * before the weight-raising period for the
++ * application finishes, reduces the probability
++ * of the following negative scenario:
++ * 1) the weight of a soft rt application is
++ * raised at startup (as for any newly
++ * created application),
++ * 2) since the application is not interactive,
++ * at a certain time weight-raising is
++ * stopped for the application,
++ * 3) at that time the application happens to
++ * still have pending requests, and hence
++ * is destined to not have a chance to be
++ * deemed soft rt before these requests are
++ * completed (see the comments to the
++ * function bfq_bfqq_softrt_next_start()
++ * for details on soft rt detection),
++ * 4) these pending requests experience a high
++ * latency because the application is not
++ * weight-raised while they are pending.
++ */
++ if (bfqq->wr_cur_max_time !=
++ bfqd->bfq_wr_rt_max_time) {
++ bfqq->wr_start_at_switch_to_srt =
++ bfqq->last_wr_start_finish;
++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff *
++ BFQ_SOFTRT_WEIGHT_FACTOR;
++ bfq_log_bfqq(bfqd, bfqq,
++ "switching to soft_rt wr");
++ } else
++ bfq_log_bfqq(bfqd, bfqq,
++ "moving forward soft_rt wr duration");
++ bfqq->last_wr_start_finish = jiffies;
++ }
++ }
++}
++
++static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ return bfqq->dispatched == 0 &&
++ time_is_before_jiffies(
++ bfqq->budget_timeout +
++ bfqd->bfq_wr_min_idle_time);
++}
++
++static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ int old_wr_coeff,
++ struct request *rq,
++ bool *interactive)
++{
++ bool soft_rt, in_burst, wr_or_deserves_wr,
++ bfqq_wants_to_preempt,
++ idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
++ /*
++ * See the comments on
++ * bfq_bfqq_update_budg_for_activation for
++ * details on the usage of the next variable.
++ */
++ arrived_in_time = ktime_get_ns() <=
++ RQ_BIC(rq)->ttime.last_end_request +
++ bfqd->bfq_slice_idle * 3;
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "bfq_add_request non-busy: "
++ "jiffies %lu, in_time %d, idle_long %d busyw %d "
++ "wr_coeff %u",
++ jiffies, arrived_in_time,
++ idle_for_long_time,
++ bfq_bfqq_non_blocking_wait_rq(bfqq),
++ old_wr_coeff);
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ BUG_ON(bfqq == bfqd->in_service_queue);
++ bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
++
++ /*
++ * bfqq deserves to be weight-raised if:
++ * - it is sync,
++ * - it does not belong to a large burst,
++ * - it has been idle for enough time or is soft real-time,
++ * - is linked to a bfq_io_cq (it is not shared in any sense)
++ */
++ in_burst = bfq_bfqq_in_large_burst(bfqq);
++ soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
++ !in_burst &&
++ time_is_before_jiffies(bfqq->soft_rt_next_start);
++ *interactive =
++ !in_burst &&
++ idle_for_long_time;
++ wr_or_deserves_wr = bfqd->low_latency &&
++ (bfqq->wr_coeff > 1 ||
++ (bfq_bfqq_sync(bfqq) &&
++ bfqq->bic && (*interactive || soft_rt)));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "bfq_add_request: "
++ "in_burst %d, "
++ "soft_rt %d (next %lu), inter %d, bic %p",
++ bfq_bfqq_in_large_burst(bfqq), soft_rt,
++ bfqq->soft_rt_next_start,
++ *interactive,
++ bfqq->bic);
++
++ /*
++ * Using the last flag, update budget and check whether bfqq
++ * may want to preempt the in-service queue.
++ */
++ bfqq_wants_to_preempt =
++ bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
++ arrived_in_time,
++ wr_or_deserves_wr);
++
++ /*
++ * If bfqq happened to be activated in a burst, but has been
++ * idle for much more than an interactive queue, then we
++ * assume that, in the overall I/O initiated in the burst, the
++ * I/O associated with bfqq is finished. So bfqq does not need
++ * to be treated as a queue belonging to a burst
++ * anymore. Accordingly, we reset bfqq's in_large_burst flag
++ * if set, and remove bfqq from the burst list if it's
++ * there. We do not decrement burst_size, because the fact
++ * that bfqq does not need to belong to the burst list any
++ * more does not invalidate the fact that bfqq was created in
++ * a burst.
++ */
++ if (likely(!bfq_bfqq_just_created(bfqq)) &&
++ idle_for_long_time &&
++ time_is_before_jiffies(
++ bfqq->budget_timeout +
++ msecs_to_jiffies(10000))) {
++ hlist_del_init(&bfqq->burst_list_node);
++ bfq_clear_bfqq_in_large_burst(bfqq);
++ }
++
++ bfq_clear_bfqq_just_created(bfqq);
++
++ if (!bfq_bfqq_IO_bound(bfqq)) {
++ if (arrived_in_time) {
++ bfqq->requests_within_timer++;
++ if (bfqq->requests_within_timer >=
++ bfqd->bfq_requests_within_timer)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ } else
++ bfqq->requests_within_timer = 0;
++ bfq_log_bfqq(bfqd, bfqq, "requests in time %d",
++ bfqq->requests_within_timer);
++ }
++
++ if (bfqd->low_latency) {
++ if (unlikely(time_is_after_jiffies(bfqq->split_time)))
++ /* wraparound */
++ bfqq->split_time =
++ jiffies - bfqd->bfq_wr_min_idle_time - 1;
++
++ if (time_is_before_jiffies(bfqq->split_time +
++ bfqd->bfq_wr_min_idle_time)) {
++ bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
++ old_wr_coeff,
++ wr_or_deserves_wr,
++ *interactive,
++ in_burst,
++ soft_rt);
++
++ if (old_wr_coeff != bfqq->wr_coeff)
++ bfqq->entity.prio_changed = 1;
++ }
++ }
++
++ bfqq->last_idle_bklogged = jiffies;
++ bfqq->service_from_backlogged = 0;
++ bfq_clear_bfqq_softrt_update(bfqq);
++
++ bfq_add_bfqq_busy(bfqd, bfqq);
++
++ /*
++ * Expire in-service queue only if preemption may be needed
++ * for guarantees. In this respect, the function
++ * next_queue_may_preempt just checks a simple, necessary
++ * condition, and not a sufficient condition based on
++ * timestamps. In fact, for the latter condition to be
++ * evaluated, timestamps would need first to be updated, and
++ * this operation is quite costly (see the comments on the
++ * function bfq_bfqq_update_budg_for_activation).
++ */
++ if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
++ bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
++ next_queue_may_preempt(bfqd)) {
++ struct bfq_queue *in_serv =
++ bfqd->in_service_queue;
++ BUG_ON(in_serv == bfqq);
++
++ bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
++ false, BFQ_BFQQ_PREEMPTED);
++ }
++}
++
++static void bfq_add_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *next_rq, *prev;
++ unsigned int old_wr_coeff = bfqq->wr_coeff;
++ bool interactive = false;
++
++ bfq_log_bfqq(bfqd, bfqq, "add_request: size %u %s",
++ blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A");
++
++ if (bfqq->wr_coeff > 1) /* queue is being weight-raised */
++ bfq_log_bfqq(bfqd, bfqq,
++ "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqq->wr_coeff,
++ bfqq->entity.weight, bfqq->entity.orig_weight);
++
++ bfqq->queued[rq_is_sync(rq)]++;
++ bfqd->queued++;
++
++ elv_rb_add(&bfqq->sort_list, rq);
++
++ /*
++ * Check if this request is a better next-to-serve candidate.
++ */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
++ BUG_ON(!next_rq);
++ bfqq->next_rq = next_rq;
++
++ /*
++ * Adjust priority tree position, if next_rq changes.
++ */
++ if (prev != bfqq->next_rq)
++ bfq_pos_tree_add_move(bfqd, bfqq);
++
++ if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
++ bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
++ rq, &interactive);
++ else {
++ if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
++ time_is_before_jiffies(
++ bfqq->last_wr_start_finish +
++ bfqd->bfq_wr_min_inter_arr_async)) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "non-idle wrais starting, "
++ "wr_max_time %u wr_busy %d",
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqd->wr_busy_queues);
++ }
++ if (prev != bfqq->next_rq)
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ /*
++ * Assign jiffies to last_wr_start_finish in the following
++ * cases:
++ *
++ * . if bfqq is not going to be weight-raised, because, for
++ * non weight-raised queues, last_wr_start_finish stores the
++ * arrival time of the last request; as of now, this piece
++ * of information is used only for deciding whether to
++ * weight-raise async queues
++ *
++ * . if bfqq is not weight-raised, because, if bfqq is now
++ * switching to weight-raised, then last_wr_start_finish
++ * stores the time when weight-raising starts
++ *
++ * . if bfqq is interactive, because, regardless of whether
++ * bfqq is currently weight-raised, the weight-raising
++ * period must start or restart (this case is considered
++ * separately because it is not detected by the above
++ * conditions, if bfqq is already weight-raised)
++ *
++ * last_wr_start_finish has to be updated also if bfqq is soft
++ * real-time, because the weight-raising period is constantly
++ * restarted on idle-to-busy transitions for these queues, but
++ * this is already done in bfq_bfqq_handle_idle_busy_switch if
++ * needed.
++ */
++ if (bfqd->low_latency &&
++ (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
++ bfqq->last_wr_start_finish = jiffies;
++}
++
++static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
++ struct bio *bio)
++{
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (!bic)
++ return NULL;
++
++ bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
++ if (bfqq)
++ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
++
++ return NULL;
++}
++
++static sector_t get_sdist(sector_t last_pos, struct request *rq)
++{
++ sector_t sdist = 0;
++
++ if (last_pos) {
++ if (last_pos < blk_rq_pos(rq))
++ sdist = blk_rq_pos(rq) - last_pos;
++ else
++ sdist = last_pos - blk_rq_pos(rq);
++ }
++
++ return sdist;
++}
++
++static void bfq_activate_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ bfqd->rq_in_driver++;
++}
++
++static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ BUG_ON(bfqd->rq_in_driver == 0);
++ bfqd->rq_in_driver--;
++}
++
++static void bfq_remove_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ const int sync = rq_is_sync(rq);
++
++ BUG_ON(bfqq->entity.service > bfqq->entity.budget &&
++ bfqq == bfqd->in_service_queue);
++
++ if (bfqq->next_rq == rq) {
++ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ if (rq->queuelist.prev != &rq->queuelist)
++ list_del_init(&rq->queuelist);
++ BUG_ON(bfqq->queued[sync] == 0);
++ bfqq->queued[sync]--;
++ bfqd->queued--;
++ elv_rb_del(&bfqq->sort_list, rq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ bfqq->next_rq = NULL;
++
++ BUG_ON(bfqq->entity.budget < 0);
++
++ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
++ BUG_ON(bfqq->ref < 2); /* referred by rq and on tree */
++ bfq_del_bfqq_busy(bfqd, bfqq, false);
++ /*
++ * bfqq emptied. In normal operation, when
++ * bfqq is empty, bfqq->entity.service and
++ * bfqq->entity.budget must contain,
++ * respectively, the service received and the
++ * budget used last time bfqq emptied. These
++ * facts do not hold in this case, as at least
++ * this last removal occurred while bfqq is
++ * not in service. To avoid inconsistencies,
++ * reset both bfqq->entity.service and
++ * bfqq->entity.budget, if bfqq has still a
++ * process that may issue I/O requests to it.
++ */
++ bfqq->entity.budget = bfqq->entity.service = 0;
++ }
++
++ /*
++ * Remove queue from request-position tree as it is empty.
++ */
++ if (bfqq->pos_root) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++ }
++
++ if (rq->cmd_flags & REQ_META) {
++ BUG_ON(bfqq->meta_pending == 0);
++ bfqq->meta_pending--;
++ }
++ bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
++}
++
++static enum elv_merge bfq_merge(struct request_queue *q, struct request **req,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct request *__rq;
++
++ __rq = bfq_find_rq_fmerge(bfqd, bio);
++ if (__rq && elv_bio_merge_ok(__rq, bio)) {
++ *req = __rq;
++ return ELEVATOR_FRONT_MERGE;
++ }
++
++ return ELEVATOR_NO_MERGE;
++}
++
++static void bfq_merged_request(struct request_queue *q, struct request *req,
++ enum elv_merge type)
++{
++ if (type == ELEVATOR_FRONT_MERGE &&
++ rb_prev(&req->rb_node) &&
++ blk_rq_pos(req) <
++ blk_rq_pos(container_of(rb_prev(&req->rb_node),
++ struct request, rb_node))) {
++ struct bfq_queue *bfqq = RQ_BFQQ(req);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *prev, *next_rq;
++
++ /* Reposition request in its sort_list */
++ elv_rb_del(&bfqq->sort_list, req);
++ elv_rb_add(&bfqq->sort_list, req);
++ /* Choose next request to be served for bfqq */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
++ bfqd->last_position);
++ BUG_ON(!next_rq);
++ bfqq->next_rq = next_rq;
++ /*
++ * If next_rq changes, update both the queue's budget to
++ * fit the new request and the queue's position in its
++ * rq_pos_tree.
++ */
++ if (prev != bfqq->next_rq) {
++ bfq_updated_next_req(bfqd, bfqq);
++ bfq_pos_tree_add_move(bfqd, bfqq);
++ }
++ }
++}
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static void bfq_bio_merged(struct request_queue *q, struct request *req,
++ struct bio *bio)
++{
++ bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_opf);
++}
++#endif
++
++static void bfq_merged_requests(struct request_queue *q, struct request *rq,
++ struct request *next)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
++
++ /*
++ * If next and rq belong to the same bfq_queue and next is older
++ * than rq, then reposition rq in the fifo (by substituting next
++ * with rq). Otherwise, if next and rq belong to different
++ * bfq_queues, never reposition rq: in fact, we would have to
++ * reposition it with respect to next's position in its own fifo,
++ * which would most certainly be too expensive with respect to
++ * the benefits.
++ */
++ if (bfqq == next_bfqq &&
++ !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
++ next->fifo_time < rq->fifo_time) {
++ list_del_init(&rq->queuelist);
++ list_replace_init(&next->queuelist, &rq->queuelist);
++ rq->fifo_time = next->fifo_time;
++ }
++
++ if (bfqq->next_rq == next)
++ bfqq->next_rq = rq;
++
++ bfq_remove_request(next);
++ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
++}
++
++/* Must be called with bfqq != NULL */
++static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
++{
++ BUG_ON(!bfqq);
++
++ if (bfq_bfqq_busy(bfqq)) {
++ bfqq->bfqd->wr_busy_queues--;
++ BUG_ON(bfqq->bfqd->wr_busy_queues < 0);
++ }
++ bfqq->wr_coeff = 1;
++ bfqq->wr_cur_max_time = 0;
++ bfqq->last_wr_start_finish = jiffies;
++ /*
++ * Trigger a weight change on the next invocation of
++ * __bfq_entity_update_weight_prio.
++ */
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "end_wr: wrais ending at %lu, rais_max_time %u",
++ bfqq->last_wr_start_finish,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "end_wr: wr_busy %d",
++ bfqq->bfqd->wr_busy_queues);
++}
++
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ if (bfqg->async_bfqq[i][j])
++ bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
++ if (bfqg->async_idle_bfqq)
++ bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
++}
++
++static void bfq_end_wr(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ bfq_end_wr_async(bfqd);
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++}
++
++static sector_t bfq_io_struct_pos(void *io_struct, bool request)
++{
++ if (request)
++ return blk_rq_pos(io_struct);
++ else
++ return ((struct bio *)io_struct)->bi_iter.bi_sector;
++}
++
++static int bfq_rq_close_to_sector(void *io_struct, bool request,
++ sector_t sector)
++{
++ return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
++ BFQQ_CLOSE_THR;
++}
++
++static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ sector_t sector)
++{
++ struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
++ struct rb_node *parent, *node;
++ struct bfq_queue *__bfqq;
++
++ if (RB_EMPTY_ROOT(root))
++ return NULL;
++
++ /*
++ * First, if we find a request starting at the end of the last
++ * request, choose it.
++ */
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
++ if (__bfqq)
++ return __bfqq;
++
++ /*
++ * If the exact sector wasn't found, the parent of the NULL leaf
++ * will contain the closest sector (rq_pos_tree sorted by
++ * next_request position).
++ */
++ __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ if (blk_rq_pos(__bfqq->next_rq) < sector)
++ node = rb_next(&__bfqq->pos_node);
++ else
++ node = rb_prev(&__bfqq->pos_node);
++ if (!node)
++ return NULL;
++
++ __bfqq = rb_entry(node, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ return NULL;
++}
++
++static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
++ struct bfq_queue *cur_bfqq,
++ sector_t sector)
++{
++ struct bfq_queue *bfqq;
++
++ /*
++ * We shall notice if some of the queues are cooperating,
++ * e.g., working closely on the same area of the device. In
++ * that case, we can group them together and: 1) don't waste
++ * time idling, and 2) serve the union of their requests in
++ * the best possible order for throughput.
++ */
++ bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
++ if (!bfqq || bfqq == cur_bfqq)
++ return NULL;
++
++ return bfqq;
++}
++
++static struct bfq_queue *
++bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ int process_refs, new_process_refs;
++ struct bfq_queue *__bfqq;
++
++ /*
++ * If there are no process references on the new_bfqq, then it is
++ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
++ * may have dropped their last reference (not just their last process
++ * reference).
++ */
++ if (!bfqq_process_refs(new_bfqq))
++ return NULL;
++
++ /* Avoid a circular list and skip interim queue merges. */
++ while ((__bfqq = new_bfqq->new_bfqq)) {
++ if (__bfqq == bfqq)
++ return NULL;
++ new_bfqq = __bfqq;
++ }
++
++ process_refs = bfqq_process_refs(bfqq);
++ new_process_refs = bfqq_process_refs(new_bfqq);
++ /*
++ * If the process for the bfqq has gone away, there is no
++ * sense in merging the queues.
++ */
++ if (process_refs == 0 || new_process_refs == 0)
++ return NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
++ new_bfqq->pid);
++
++ /*
++ * Merging is just a redirection: the requests of the process
++ * owning one of the two queues are redirected to the other queue.
++ * The latter queue, in its turn, is set as shared if this is the
++ * first time that the requests of some process are redirected to
++ * it.
++ *
++ * We redirect bfqq to new_bfqq and not the opposite, because we
++ * are in the context of the process owning bfqq, hence we have
++ * the io_cq of this process. So we can immediately configure this
++ * io_cq to redirect the requests of the process to new_bfqq.
++ *
++ * NOTE, even if new_bfqq coincides with the in-service queue, the
++ * io_cq of new_bfqq is not available, because, if the in-service
++ * queue is shared, bfqd->in_service_bic may not point to the
++ * io_cq of the in-service queue.
++ * Redirecting the requests of the process owning bfqq to the
++ * currently in-service queue is in any case the best option, as
++ * we feed the in-service queue with new requests close to the
++ * last request served and, by doing so, hopefully increase the
++ * throughput.
++ */
++ bfqq->new_bfqq = new_bfqq;
++ new_bfqq->ref += process_refs;
++ return new_bfqq;
++}
++
++static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
++ struct bfq_queue *new_bfqq)
++{
++ if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
++ (bfqq->ioprio_class != new_bfqq->ioprio_class))
++ return false;
++
++ /*
++ * If either of the queues has already been detected as seeky,
++ * then merging it with the other queue is unlikely to lead to
++ * sequential I/O.
++ */
++ if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
++ return false;
++
++ /*
++ * Interleaved I/O is known to be done by (some) applications
++ * only for reads, so it does not make sense to merge async
++ * queues.
++ */
++ if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
++ return false;
++
++ return true;
++}
++
++/*
++ * If this function returns true, then bfqq cannot be merged. The idea
++ * is that true cooperation happens very early after processes start
++ * to do I/O. Usually, late cooperations are just accidental false
++ * positives. In case bfqq is weight-raised, such false positives
++ * would evidently degrade latency guarantees for bfqq.
++ */
++static bool wr_from_too_long(struct bfq_queue *bfqq)
++{
++ return bfqq->wr_coeff > 1 &&
++ time_is_before_jiffies(bfqq->last_wr_start_finish +
++ msecs_to_jiffies(100));
++}
++
++/*
++ * Attempt to schedule a merge of bfqq with the currently in-service
++ * queue or with a close queue among the scheduled queues. Return
++ * NULL if no merge was scheduled, a pointer to the shared bfq_queue
++ * structure otherwise.
++ *
++ * The OOM queue is not allowed to participate to cooperation: in fact, since
++ * the requests temporarily redirected to the OOM queue could be redirected
++ * again to dedicated queues at any time, the state needed to correctly
++ * handle merging with the OOM queue would be quite complex and expensive
++ * to maintain. Besides, in such a critical condition as an out of memory,
++ * the benefits of queue merging may be little relevant, or even negligible.
++ *
++ * Weight-raised queues can be merged only if their weight-raising
++ * period has just started. In fact cooperating processes are usually
++ * started together. Thus, with this filter we avoid false positives
++ * that would jeopardize low-latency guarantees.
++ *
++ * WARNING: queue merging may impair fairness among non-weight raised
++ * queues, for at least two reasons: 1) the original weight of a
++ * merged queue may change during the merged state, 2) even being the
++ * weight the same, a merged queue may be bloated with many more
++ * requests than the ones produced by its originally-associated
++ * process.
++ */
++static struct bfq_queue *
++bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ void *io_struct, bool request)
++{
++ struct bfq_queue *in_service_bfqq, *new_bfqq;
++
++ if (bfqq->new_bfqq)
++ return bfqq->new_bfqq;
++
++ if (io_struct && wr_from_too_long(bfqq) &&
++ likely(bfqq != &bfqd->oom_bfqq))
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have looked for coop, but bfq%d wr",
++ bfqq->pid);
++
++ if (!io_struct ||
++ wr_from_too_long(bfqq) ||
++ unlikely(bfqq == &bfqd->oom_bfqq))
++ return NULL;
++
++ /* If there is only one backlogged queue, don't search. */
++ if (bfqd->busy_queues == 1)
++ return NULL;
++
++ in_service_bfqq = bfqd->in_service_queue;
++
++ if (in_service_bfqq && in_service_bfqq != bfqq &&
++ bfqd->in_service_bic && wr_from_too_long(in_service_bfqq)
++ && likely(in_service_bfqq == &bfqd->oom_bfqq))
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have tried merge with in-service-queue, but wr");
++
++ if (!in_service_bfqq || in_service_bfqq == bfqq ||
++ !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) ||
++ unlikely(in_service_bfqq == &bfqd->oom_bfqq))
++ goto check_scheduled;
++
++ if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++ bfqq->entity.parent == in_service_bfqq->entity.parent &&
++ bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
++ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
++ if (new_bfqq)
++ return new_bfqq;
++ }
++ /*
++ * Check whether there is a cooperator among currently scheduled
++ * queues. The only thing we need is that the bio/request is not
++ * NULL, as we need it to establish whether a cooperator exists.
++ */
++check_scheduled:
++ new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
++ bfq_io_struct_pos(io_struct, request));
++
++ BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent);
++
++ if (new_bfqq && wr_from_too_long(new_bfqq) &&
++ likely(new_bfqq != &bfqd->oom_bfqq) &&
++ bfq_may_be_close_cooperator(bfqq, new_bfqq))
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have merged with bfq%d, but wr",
++ new_bfqq->pid);
++
++ if (new_bfqq && !wr_from_too_long(new_bfqq) &&
++ likely(new_bfqq != &bfqd->oom_bfqq) &&
++ bfq_may_be_close_cooperator(bfqq, new_bfqq))
++ return bfq_setup_merge(bfqq, new_bfqq);
++
++ return NULL;
++}
++
++static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
++{
++ struct bfq_io_cq *bic = bfqq->bic;
++
++ /*
++ * If !bfqq->bic, the queue is already shared or its requests
++ * have already been redirected to a shared queue; both idle window
++ * and weight raising state have already been saved. Do nothing.
++ */
++ if (!bic)
++ return;
++
++ bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
++ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
++ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
++ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
++ bic->saved_wr_coeff = bfqq->wr_coeff;
++ bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
++ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
++ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++}
++
++static void bfq_get_bic_reference(struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq->bic has a non-NULL value, the bic to which it belongs
++ * is about to begin using a shared bfq_queue.
++ */
++ if (bfqq->bic)
++ atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
++}
++
++static void
++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
++ (unsigned long) new_bfqq->pid);
++ /* Save weight raising and idle window of the merged queues */
++ bfq_bfqq_save_state(bfqq);
++ bfq_bfqq_save_state(new_bfqq);
++ if (bfq_bfqq_IO_bound(bfqq))
++ bfq_mark_bfqq_IO_bound(new_bfqq);
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ /*
++ * If bfqq is weight-raised, then let new_bfqq inherit
++ * weight-raising. To reduce false positives, neglect the case
++ * where bfqq has just been created, but has not yet made it
++ * to be weight-raised (which may happen because EQM may merge
++ * bfqq even before bfq_add_request is executed for the first
++ * time for bfqq). Handling this case would however be very
++ * easy, thanks to the flag just_created.
++ */
++ if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
++ new_bfqq->wr_coeff = bfqq->wr_coeff;
++ new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
++ new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
++ new_bfqq->wr_start_at_switch_to_srt =
++ bfqq->wr_start_at_switch_to_srt;
++ if (bfq_bfqq_busy(new_bfqq)) {
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ }
++
++ new_bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqd, new_bfqq,
++ "wr start after merge with %d, rais_max_time %u",
++ bfqq->pid,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
++ bfqq->wr_coeff = 1;
++ bfqq->entity.prio_changed = 1;
++ if (bfq_bfqq_busy(bfqq)) {
++ bfqd->wr_busy_queues--;
++ BUG_ON(bfqd->wr_busy_queues < 0);
++ }
++
++ }
++
++ bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
++ bfqd->wr_busy_queues);
++
++ /*
++ * Grab a reference to the bic, to prevent it from being destroyed
++ * before being possibly touched by a bfq_split_bfqq().
++ */
++ bfq_get_bic_reference(bfqq);
++ bfq_get_bic_reference(new_bfqq);
++ /*
++ * Merge queues (that is, let bic redirect its requests to new_bfqq)
++ */
++ bic_set_bfqq(bic, new_bfqq, 1);
++ bfq_mark_bfqq_coop(new_bfqq);
++ /*
++ * new_bfqq now belongs to at least two bics (it is a shared queue):
++ * set new_bfqq->bic to NULL. bfqq either:
++ * - does not belong to any bic any more, and hence bfqq->bic must
++ * be set to NULL, or
++ * - is a queue whose owning bics have already been redirected to a
++ * different queue, hence the queue is destined to not belong to
++ * any bic soon and bfqq->bic is already NULL (therefore the next
++ * assignment causes no harm).
++ */
++ new_bfqq->bic = NULL;
++ bfqq->bic = NULL;
++ /* release process reference to bfqq */
++ bfq_put_queue(bfqq);
++}
++
++static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ bool is_sync = op_is_sync(bio->bi_opf);
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq, *new_bfqq;
++
++ /*
++ * Disallow merge of a sync bio into an async request.
++ */
++ if (is_sync && !rq_is_sync(rq))
++ return false;
++
++ /*
++ * Lookup the bfqq that this bio will be queued with. Allow
++ * merge only if rq is queued there.
++ * Queue lock is held here.
++ */
++ bic = bfq_bic_lookup(bfqd, current->io_context);
++ if (!bic)
++ return false;
++
++ bfqq = bic_to_bfqq(bic, is_sync);
++ /*
++ * We take advantage of this function to perform an early merge
++ * of the queues of possible cooperating processes.
++ */
++ if (bfqq) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
++ if (new_bfqq) {
++ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
++ /*
++ * If we get here, the bio will be queued in the
++ * shared queue, i.e., new_bfqq, so use new_bfqq
++ * to decide whether bio and rq can be merged.
++ */
++ bfqq = new_bfqq;
++ }
++ }
++
++ return bfqq == RQ_BFQQ(rq);
++}
++
++static int bfq_allow_rq_merge(struct request_queue *q, struct request *rq,
++ struct request *next)
++{
++ return RQ_BFQQ(rq) == RQ_BFQQ(next);
++}
++
++/*
++ * Set the maximum time for the in-service queue to consume its
++ * budget. This prevents seeky processes from lowering the throughput.
++ * In practice, a time-slice service scheme is used with seeky
++ * processes.
++ */
++static void bfq_set_budget_timeout(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ unsigned int timeout_coeff;
++
++ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
++ timeout_coeff = 1;
++ else
++ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
++
++ bfqd->last_budget_start = ktime_get();
++
++ bfqq->budget_timeout = jiffies +
++ bfqd->bfq_timeout * timeout_coeff;
++
++ bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++ jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff));
++}
++
++static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ if (bfqq) {
++ bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
++ bfq_mark_bfqq_must_alloc(bfqq);
++ bfq_clear_bfqq_fifo_expire(bfqq);
++
++ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
++
++ BUG_ON(bfqq == bfqd->in_service_queue);
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
++ bfqq->wr_coeff > 1 &&
++ bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
++ time_is_before_jiffies(bfqq->budget_timeout)) {
++ /*
++ * For soft real-time queues, move the start
++ * of the weight-raising period forward by the
++ * time the queue has not received any
++ * service. Otherwise, a relatively long
++ * service delay is likely to cause the
++ * weight-raising period of the queue to end,
++ * because of the short duration of the
++ * weight-raising period of a soft real-time
++ * queue. It is worth noting that this move
++ * is not so dangerous for the other queues,
++ * because soft real-time queues are not
++ * greedy.
++ *
++ * To not add a further variable, we use the
++ * overloaded field budget_timeout to
++ * determine for how long the queue has not
++ * received service, i.e., how much time has
++ * elapsed since the queue expired. However,
++ * this is a little imprecise, because
++ * budget_timeout is set to jiffies if bfqq
++ * not only expires, but also remains with no
++ * request.
++ */
++ if (time_after(bfqq->budget_timeout,
++ bfqq->last_wr_start_finish))
++ bfqq->last_wr_start_finish +=
++ jiffies - bfqq->budget_timeout;
++ else
++ bfqq->last_wr_start_finish = jiffies;
++
++ if (time_is_after_jiffies(bfqq->last_wr_start_finish)) {
++ pr_crit(
++ "BFQ WARNING:last %lu budget %lu jiffies %lu",
++ bfqq->last_wr_start_finish,
++ bfqq->budget_timeout,
++ jiffies);
++ pr_crit("diff %lu", jiffies -
++ max_t(unsigned long,
++ bfqq->last_wr_start_finish,
++ bfqq->budget_timeout));
++ bfqq->last_wr_start_finish = jiffies;
++ }
++ }
++
++ bfq_set_budget_timeout(bfqd, bfqq);
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_in_service_queue, cur-budget = %d",
++ bfqq->entity.budget);
++ } else
++ bfq_log(bfqd, "set_in_service_queue: NULL");
++
++ bfqd->in_service_queue = bfqq;
++}
++
++/*
++ * Get and set a new queue for service.
++ */
++static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
++
++ __bfq_set_in_service_queue(bfqd, bfqq);
++ return bfqq;
++}
++
++static void bfq_arm_slice_timer(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfqd->in_service_queue;
++ struct bfq_io_cq *bic;
++ u32 sl;
++
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ /* Processes have exited, don't wait. */
++ bic = bfqd->in_service_bic;
++ if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0)
++ return;
++
++ bfq_mark_bfqq_wait_request(bfqq);
++
++ /*
++ * We don't want to idle for seeks, but we do want to allow
++ * fair distribution of slice time for a process doing back-to-back
++ * seeks. So allow a little bit of time for him to submit a new rq.
++ *
++ * To prevent processes with (partly) seeky workloads from
++ * being too ill-treated, grant them a small fraction of the
++ * assigned budget before reducing the waiting time to
++ * BFQ_MIN_TT. This happened to help reduce latency.
++ */
++ sl = bfqd->bfq_slice_idle;
++ /*
++ * Unless the queue is being weight-raised or the scenario is
++ * asymmetric, grant only minimum idle time if the queue
++ * is seeky. A long idling is preserved for a weight-raised
++ * queue, or, more in general, in an asymemtric scenario,
++ * because a long idling is needed for guaranteeing to a queue
++ * its reserved share of the throughput (in particular, it is
++ * needed if the queue has a higher weight than some other
++ * queue).
++ */
++ if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
++ bfq_symmetric_scenario(bfqd))
++ sl = min_t(u32, sl, BFQ_MIN_TT);
++
++ bfqd->last_idling_start = ktime_get();
++ hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
++ HRTIMER_MODE_REL);
++ bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
++ bfq_log(bfqd, "arm idle: %ld/%ld ms",
++ sl / NSEC_PER_MSEC, bfqd->bfq_slice_idle / NSEC_PER_MSEC);
++}
++
++/*
++ * In autotuning mode, max_budget is dynamically recomputed as the
++ * amount of sectors transferred in timeout at the estimated peak
++ * rate. This enables BFQ to utilize a full timeslice with a full
++ * budget, even if the in-service queue is served at peak rate. And
++ * this maximises throughput with sequential workloads.
++ */
++static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
++{
++ return (u64)bfqd->peak_rate * USEC_PER_MSEC *
++ jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
++}
++
++/*
++ * Update parameters related to throughput and responsiveness, as a
++ * function of the estimated peak rate. See comments on
++ * bfq_calc_max_budget(), and on T_slow and T_fast arrays.
++ */
++static void update_thr_responsiveness_params(struct bfq_data *bfqd)
++{
++ int dev_type = blk_queue_nonrot(bfqd->queue);
++
++ if (bfqd->bfq_user_max_budget == 0) {
++ bfqd->bfq_max_budget =
++ bfq_calc_max_budget(bfqd);
++ BUG_ON(bfqd->bfq_max_budget < 0);
++ bfq_log(bfqd, "new max_budget = %d",
++ bfqd->bfq_max_budget);
++ }
++
++ if (bfqd->device_speed == BFQ_BFQD_FAST &&
++ bfqd->peak_rate < device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_SLOW;
++ bfqd->RT_prod = R_slow[dev_type] *
++ T_slow[dev_type];
++ } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
++ bfqd->peak_rate > device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_FAST;
++ bfqd->RT_prod = R_fast[dev_type] *
++ T_fast[dev_type];
++ }
++
++ bfq_log(bfqd,
++"dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec",
++ dev_type == 0 ? "ROT" : "NONROT",
++ bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW",
++ bfqd->device_speed == BFQ_BFQD_FAST ?
++ (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT :
++ (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT,
++ (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>>
++ BFQ_RATE_SHIFT);
++}
++
++static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq)
++{
++ if (rq != NULL) { /* new rq dispatch now, reset accordingly */
++ bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns() ;
++ bfqd->peak_rate_samples = 1;
++ bfqd->sequential_samples = 0;
++ bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
++ blk_rq_sectors(rq);
++ } else /* no new rq dispatched, just reset the number of samples */
++ bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
++
++ bfq_log(bfqd,
++ "reset_rate_computation at end, sample %u/%u tot_sects %llu",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ bfqd->tot_sectors_dispatched);
++}
++
++static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
++{
++ u32 rate, weight, divisor;
++
++ /*
++ * For the convergence property to hold (see comments on
++ * bfq_update_peak_rate()) and for the assessment to be
++ * reliable, a minimum number of samples must be present, and
++ * a minimum amount of time must have elapsed. If not so, do
++ * not compute new rate. Just reset parameters, to get ready
++ * for a new evaluation attempt.
++ */
++ if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
++ bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) {
++ bfq_log(bfqd,
++ "update_rate_reset: only resetting, delta_first %lluus samples %d",
++ bfqd->delta_from_first>>10, bfqd->peak_rate_samples);
++ goto reset_computation;
++ }
++
++ /*
++ * If a new request completion has occurred after last
++ * dispatch, then, to approximate the rate at which requests
++ * have been served by the device, it is more precise to
++ * extend the observation interval to the last completion.
++ */
++ bfqd->delta_from_first =
++ max_t(u64, bfqd->delta_from_first,
++ bfqd->last_completion - bfqd->first_dispatch);
++
++ BUG_ON(bfqd->delta_from_first == 0);
++ /*
++ * Rate computed in sects/usec, and not sects/nsec, for
++ * precision issues.
++ */
++ rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
++ div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
++
++ bfq_log(bfqd,
++"update_rate_reset: tot_sects %llu delta_first %lluus rate %llu sects/s (%d)",
++ bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10,
++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
++ rate > 20<<BFQ_RATE_SHIFT);
++
++ /*
++ * Peak rate not updated if:
++ * - the percentage of sequential dispatches is below 3/4 of the
++ * total, and rate is below the current estimated peak rate
++ * - rate is unreasonably high (> 20M sectors/sec)
++ */
++ if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
++ rate <= bfqd->peak_rate) ||
++ rate > 20<<BFQ_RATE_SHIFT) {
++ bfq_log(bfqd,
++ "update_rate_reset: goto reset, samples %u/%u rate/peak %llu/%llu",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
++ goto reset_computation;
++ } else {
++ bfq_log(bfqd,
++ "update_rate_reset: do update, samples %u/%u rate/peak %llu/%llu",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
++ }
++
++ /*
++ * We have to update the peak rate, at last! To this purpose,
++ * we use a low-pass filter. We compute the smoothing constant
++ * of the filter as a function of the 'weight' of the new
++ * measured rate.
++ *
++ * As can be seen in next formulas, we define this weight as a
++ * quantity proportional to how sequential the workload is,
++ * and to how long the observation time interval is.
++ *
++ * The weight runs from 0 to 8. The maximum value of the
++ * weight, 8, yields the minimum value for the smoothing
++ * constant. At this minimum value for the smoothing constant,
++ * the measured rate contributes for half of the next value of
++ * the estimated peak rate.
++ *
++ * So, the first step is to compute the weight as a function
++ * of how sequential the workload is. Note that the weight
++ * cannot reach 9, because bfqd->sequential_samples cannot
++ * become equal to bfqd->peak_rate_samples, which, in its
++ * turn, holds true because bfqd->sequential_samples is not
++ * incremented for the first sample.
++ */
++ weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
++
++ /*
++ * Second step: further refine the weight as a function of the
++ * duration of the observation interval.
++ */
++ weight = min_t(u32, 8,
++ div_u64(weight * bfqd->delta_from_first,
++ BFQ_RATE_REF_INTERVAL));
++
++ /*
++ * Divisor ranging from 10, for minimum weight, to 2, for
++ * maximum weight.
++ */
++ divisor = 10 - weight;
++ BUG_ON(divisor == 0);
++
++ /*
++ * Finally, update peak rate:
++ *
++ * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
++ */
++ bfqd->peak_rate *= divisor-1;
++ bfqd->peak_rate /= divisor;
++ rate /= divisor; /* smoothing constant alpha = 1/divisor */
++
++ bfq_log(bfqd,
++ "update_rate_reset: divisor %d tmp_peak_rate %llu tmp_rate %u",
++ divisor,
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT),
++ (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT));
++
++ BUG_ON(bfqd->peak_rate == 0);
++ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
++
++ bfqd->peak_rate += rate;
++ update_thr_responsiveness_params(bfqd);
++ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
++
++reset_computation:
++ bfq_reset_rate_computation(bfqd, rq);
++}
++
++/*
++ * Update the read/write peak rate (the main quantity used for
++ * auto-tuning, see update_thr_responsiveness_params()).
++ *
++ * It is not trivial to estimate the peak rate (correctly): because of
++ * the presence of sw and hw queues between the scheduler and the
++ * device components that finally serve I/O requests, it is hard to
++ * say exactly when a given dispatched request is served inside the
++ * device, and for how long. As a consequence, it is hard to know
++ * precisely at what rate a given set of requests is actually served
++ * by the device.
++ *
++ * On the opposite end, the dispatch time of any request is trivially
++ * available, and, from this piece of information, the "dispatch rate"
++ * of requests can be immediately computed. So, the idea in the next
++ * function is to use what is known, namely request dispatch times
++ * (plus, when useful, request completion times), to estimate what is
++ * unknown, namely in-device request service rate.
++ *
++ * The main issue is that, because of the above facts, the rate at
++ * which a certain set of requests is dispatched over a certain time
++ * interval can vary greatly with respect to the rate at which the
++ * same requests are then served. But, since the size of any
++ * intermediate queue is limited, and the service scheme is lossless
++ * (no request is silently dropped), the following obvious convergence
++ * property holds: the number of requests dispatched MUST become
++ * closer and closer to the number of requests completed as the
++ * observation interval grows. This is the key property used in
++ * the next function to estimate the peak service rate as a function
++ * of the observed dispatch rate. The function assumes to be invoked
++ * on every request dispatch.
++ */
++static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
++{
++ u64 now_ns = ktime_get_ns();
++
++ if (bfqd->peak_rate_samples == 0) { /* first dispatch */
++ bfq_log(bfqd,
++ "update_peak_rate: goto reset, samples %d",
++ bfqd->peak_rate_samples) ;
++ bfq_reset_rate_computation(bfqd, rq);
++ goto update_last_values; /* will add one sample */
++ }
++
++ /*
++ * Device idle for very long: the observation interval lasting
++ * up to this dispatch cannot be a valid observation interval
++ * for computing a new peak rate (similarly to the late-
++ * completion event in bfq_completed_request()). Go to
++ * update_rate_and_reset to have the following three steps
++ * taken:
++ * - close the observation interval at the last (previous)
++ * request dispatch or completion
++ * - compute rate, if possible, for that observation interval
++ * - start a new observation interval with this dispatch
++ */
++ if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
++ bfqd->rq_in_driver == 0) {
++ bfq_log(bfqd,
++"update_peak_rate: jumping to updating&resetting delta_last %lluus samples %d",
++ (now_ns - bfqd->last_dispatch)>>10,
++ bfqd->peak_rate_samples) ;
++ goto update_rate_and_reset;
++ }
++
++ /* Update sampling information */
++ bfqd->peak_rate_samples++;
++
++ if ((bfqd->rq_in_driver > 0 ||
++ now_ns - bfqd->last_completion < BFQ_MIN_TT)
++ && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
++ bfqd->sequential_samples++;
++
++ bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
++
++ /* Reset max observed rq size every 32 dispatches */
++ if (likely(bfqd->peak_rate_samples % 32))
++ bfqd->last_rq_max_size =
++ max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
++ else
++ bfqd->last_rq_max_size = blk_rq_sectors(rq);
++
++ bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
++
++ bfq_log(bfqd,
++ "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ bfqd->tot_sectors_dispatched,
++ bfqd->delta_from_first>>10);
++
++ /* Target observation interval not yet reached, go on sampling */
++ if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
++ goto update_last_values;
++
++update_rate_and_reset:
++ bfq_update_rate_reset(bfqd, rq);
++update_last_values:
++ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
++ bfqd->last_dispatch = now_ns;
++
++ bfq_log(bfqd,
++ "update_peak_rate: delta_first %lluus last_pos %llu peak_rate %llu",
++ (now_ns - bfqd->first_dispatch)>>10,
++ (unsigned long long) bfqd->last_position,
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
++ bfq_log(bfqd,
++ "update_peak_rate: samples at end %d", bfqd->peak_rate_samples);
++}
++
++/*
++ * Move request from internal lists to the dispatch list of the request queue
++ */
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ /*
++ * For consistency, the next instruction should have been executed
++ * after removing the request from the queue and dispatching it.
++ * We execute instead this instruction before bfq_remove_request()
++ * (and hence introduce a temporary inconsistency), for efficiency.
++ * In fact, in a forced_dispatch, this prevents two counters related
++ * to bfqq->dispatched to risk to be uselessly decremented if bfqq
++ * is not in service, and then to be incremented again after
++ * incrementing bfqq->dispatched.
++ */
++ bfqq->dispatched++;
++ bfq_update_peak_rate(q->elevator->elevator_data, rq);
++
++ bfq_remove_request(rq);
++ elv_dispatch_sort(q, rq);
++}
++
++static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ /*
++ * If this bfqq is shared between multiple processes, check
++ * to make sure that those processes are still issuing I/Os
++ * within the mean seek distance. If not, it may be time to
++ * break the queues apart again.
++ */
++ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
++ bfq_mark_bfqq_split_coop(bfqq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ if (bfqq->dispatched == 0)
++ /*
++ * Overloading budget_timeout field to store
++ * the time at which the queue remains with no
++ * backlog and no outstanding request; used by
++ * the weight-raising mechanism.
++ */
++ bfqq->budget_timeout = jiffies;
++
++ bfq_del_bfqq_busy(bfqd, bfqq, true);
++ } else {
++ bfq_requeue_bfqq(bfqd, bfqq);
++ /*
++ * Resort priority tree of potential close cooperators.
++ */
++ bfq_pos_tree_add_move(bfqd, bfqq);
++ }
++
++ /*
++ * All in-service entities must have been properly deactivated
++ * or requeued before executing the next function, which
++ * resets all in-service entites as no more in service.
++ */
++ __bfq_bfqd_reset_in_service(bfqd);
++}
++
++/**
++ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
++ * @bfqd: device data.
++ * @bfqq: queue to update.
++ * @reason: reason for expiration.
++ *
++ * Handle the feedback on @bfqq budget at queue expiration.
++ * See the body for detailed comments.
++ */
++static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ enum bfqq_expiration reason)
++{
++ struct request *next_rq;
++ int budget, min_budget;
++
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ min_budget = bfq_min_budget(bfqd);
++
++ if (bfqq->wr_coeff == 1)
++ budget = bfqq->max_budget;
++ else /*
++ * Use a constant, low budget for weight-raised queues,
++ * to help achieve a low latency. Keep it slightly higher
++ * than the minimum possible budget, to cause a little
++ * bit fewer expirations.
++ */
++ budget = 2 * min_budget;
++
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
++ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
++ budget, bfq_min_budget(bfqd));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
++
++ if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
++ switch (reason) {
++ /*
++ * Caveat: in all the following cases we trade latency
++ * for throughput.
++ */
++ case BFQ_BFQQ_TOO_IDLE:
++ /*
++ * This is the only case where we may reduce
++ * the budget: if there is no request of the
++ * process still waiting for completion, then
++ * we assume (tentatively) that the timer has
++ * expired because the batch of requests of
++ * the process could have been served with a
++ * smaller budget. Hence, betting that
++ * process will behave in the same way when it
++ * becomes backlogged again, we reduce its
++ * next budget. As long as we guess right,
++ * this budget cut reduces the latency
++ * experienced by the process.
++ *
++ * However, if there are still outstanding
++ * requests, then the process may have not yet
++ * issued its next request just because it is
++ * still waiting for the completion of some of
++ * the still outstanding ones. So in this
++ * subcase we do not reduce its budget, on the
++ * contrary we increase it to possibly boost
++ * the throughput, as discussed in the
++ * comments to the BUDGET_TIMEOUT case.
++ */
++ if (bfqq->dispatched > 0) /* still outstanding reqs */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ else {
++ if (budget > 5 * min_budget)
++ budget -= 4 * min_budget;
++ else
++ budget = min_budget;
++ }
++ break;
++ case BFQ_BFQQ_BUDGET_TIMEOUT:
++ /*
++ * We double the budget here because it gives
++ * the chance to boost the throughput if this
++ * is not a seeky process (and has bumped into
++ * this timeout because of, e.g., ZBR).
++ */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_BUDGET_EXHAUSTED:
++ /*
++ * The process still has backlog, and did not
++ * let either the budget timeout or the disk
++ * idling timeout expire. Hence it is not
++ * seeky, has a short thinktime and may be
++ * happy with a higher budget too. So
++ * definitely increase the budget of this good
++ * candidate to boost the disk throughput.
++ */
++ budget = min(budget * 4, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_NO_MORE_REQUESTS:
++ /*
++ * For queues that expire for this reason, it
++ * is particularly important to keep the
++ * budget close to the actual service they
++ * need. Doing so reduces the timestamp
++ * misalignment problem described in the
++ * comments in the body of
++ * __bfq_activate_entity. In fact, suppose
++ * that a queue systematically expires for
++ * BFQ_BFQQ_NO_MORE_REQUESTS and presents a
++ * new request in time to enjoy timestamp
++ * back-shifting. The larger the budget of the
++ * queue is with respect to the service the
++ * queue actually requests in each service
++ * slot, the more times the queue can be
++ * reactivated with the same virtual finish
++ * time. It follows that, even if this finish
++ * time is pushed to the system virtual time
++ * to reduce the consequent timestamp
++ * misalignment, the queue unjustly enjoys for
++ * many re-activations a lower finish time
++ * than all newly activated queues.
++ *
++ * The service needed by bfqq is measured
++ * quite precisely by bfqq->entity.service.
++ * Since bfqq does not enjoy device idling,
++ * bfqq->entity.service is equal to the number
++ * of sectors that the process associated with
++ * bfqq requested to read/write before waiting
++ * for request completions, or blocking for
++ * other reasons.
++ */
++ budget = max_t(int, bfqq->entity.service, min_budget);
++ break;
++ default:
++ return;
++ }
++ } else if (!bfq_bfqq_sync(bfqq))
++ /*
++ * Async queues get always the maximum possible
++ * budget, as for them we do not care about latency
++ * (in addition, their ability to dispatch is limited
++ * by the charging factor).
++ */
++ budget = bfqd->bfq_max_budget;
++
++ bfqq->max_budget = budget;
++
++ if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
++ !bfqd->bfq_user_max_budget)
++ bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
++
++ /*
++ * If there is still backlog, then assign a new budget, making
++ * sure that it is large enough for the next request. Since
++ * the finish time of bfqq must be kept in sync with the
++ * budget, be sure to call __bfq_bfqq_expire() *after* this
++ * update.
++ *
++ * If there is no backlog, then no need to update the budget;
++ * it will be updated on the arrival of a new request.
++ */
++ next_rq = bfqq->next_rq;
++ if (next_rq) {
++ BUG_ON(reason == BFQ_BFQQ_TOO_IDLE ||
++ reason == BFQ_BFQQ_NO_MORE_REQUESTS);
++ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ BUG_ON(!bfq_bfqq_busy(bfqq));
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
++ next_rq ? blk_rq_sectors(next_rq) : 0,
++ bfqq->entity.budget);
++}
++
++/*
++ * Return true if the process associated with bfqq is "slow". The slow
++ * flag is used, in addition to the budget timeout, to reduce the
++ * amount of service provided to seeky processes, and thus reduce
++ * their chances to lower the throughput. More details in the comments
++ * on the function bfq_bfqq_expire().
++ *
++ * An important observation is in order: as discussed in the comments
++ * on the function bfq_update_peak_rate(), with devices with internal
++ * queues, it is hard if ever possible to know when and for how long
++ * an I/O request is processed by the device (apart from the trivial
++ * I/O pattern where a new request is dispatched only after the
++ * previous one has been completed). This makes it hard to evaluate
++ * the real rate at which the I/O requests of each bfq_queue are
++ * served. In fact, for an I/O scheduler like BFQ, serving a
++ * bfq_queue means just dispatching its requests during its service
++ * slot (i.e., until the budget of the queue is exhausted, or the
++ * queue remains idle, or, finally, a timeout fires). But, during the
++ * service slot of a bfq_queue, around 100 ms at most, the device may
++ * be even still processing requests of bfq_queues served in previous
++ * service slots. On the opposite end, the requests of the in-service
++ * bfq_queue may be completed after the service slot of the queue
++ * finishes.
++ *
++ * Anyway, unless more sophisticated solutions are used
++ * (where possible), the sum of the sizes of the requests dispatched
++ * during the service slot of a bfq_queue is probably the only
++ * approximation available for the service received by the bfq_queue
++ * during its service slot. And this sum is the quantity used in this
++ * function to evaluate the I/O speed of a process.
++ */
++static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ bool compensate, enum bfqq_expiration reason,
++ unsigned long *delta_ms)
++{
++ ktime_t delta_ktime;
++ u32 delta_usecs;
++ bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
++
++ if (!bfq_bfqq_sync(bfqq))
++ return false;
++
++ if (compensate)
++ delta_ktime = bfqd->last_idling_start;
++ else
++ delta_ktime = ktime_get();
++ delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
++ delta_usecs = ktime_to_us(delta_ktime);
++
++ /* don't use too short time intervals */
++ if (delta_usecs < 1000) {
++ if (blk_queue_nonrot(bfqd->queue))
++ /*
++ * give same worst-case guarantees as idling
++ * for seeky
++ */
++ *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
++ else /* charge at least one seek */
++ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
++
++ bfq_log(bfqd, "bfq_bfqq_is_slow: too short %u", delta_usecs);
++
++ return slow;
++ }
++
++ *delta_ms = delta_usecs / USEC_PER_MSEC;
++
++ /*
++ * Use only long (> 20ms) intervals to filter out excessive
++ * spikes in service rate estimation.
++ */
++ if (delta_usecs > 20000) {
++ /*
++ * Caveat for rotational devices: processes doing I/O
++ * in the slower disk zones tend to be slow(er) even
++ * if not seeky. In this respect, the estimated peak
++ * rate is likely to be an average over the disk
++ * surface. Accordingly, to not be too harsh with
++ * unlucky processes, a process is deemed slow only if
++ * its rate has been lower than half of the estimated
++ * peak rate.
++ */
++ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
++ bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d",
++ bfqq->entity.service, bfqd->bfq_max_budget);
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
++
++ return slow;
++}
++
++/*
++ * To be deemed as soft real-time, an application must meet two
++ * requirements. First, the application must not require an average
++ * bandwidth higher than the approximate bandwidth required to playback or
++ * record a compressed high-definition video.
++ * The next function is invoked on the completion of the last request of a
++ * batch, to compute the next-start time instant, soft_rt_next_start, such
++ * that, if the next request of the application does not arrive before
++ * soft_rt_next_start, then the above requirement on the bandwidth is met.
++ *
++ * The second requirement is that the request pattern of the application is
++ * isochronous, i.e., that, after issuing a request or a batch of requests,
++ * the application stops issuing new requests until all its pending requests
++ * have been completed. After that, the application may issue a new batch,
++ * and so on.
++ * For this reason the next function is invoked to compute
++ * soft_rt_next_start only for applications that meet this requirement,
++ * whereas soft_rt_next_start is set to infinity for applications that do
++ * not.
++ *
++ * Unfortunately, even a greedy application may happen to behave in an
++ * isochronous way if the CPU load is high. In fact, the application may
++ * stop issuing requests while the CPUs are busy serving other processes,
++ * then restart, then stop again for a while, and so on. In addition, if
++ * the disk achieves a low enough throughput with the request pattern
++ * issued by the application (e.g., because the request pattern is random
++ * and/or the device is slow), then the application may meet the above
++ * bandwidth requirement too. To prevent such a greedy application to be
++ * deemed as soft real-time, a further rule is used in the computation of
++ * soft_rt_next_start: soft_rt_next_start must be higher than the current
++ * time plus the maximum time for which the arrival of a request is waited
++ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
++ * This filters out greedy applications, as the latter issue instead their
++ * next request as soon as possible after the last one has been completed
++ * (in contrast, when a batch of requests is completed, a soft real-time
++ * application spends some time processing data).
++ *
++ * Unfortunately, the last filter may easily generate false positives if
++ * only bfqd->bfq_slice_idle is used as a reference time interval and one
++ * or both the following cases occur:
++ * 1) HZ is so low that the duration of a jiffy is comparable to or higher
++ * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
++ * HZ=100.
++ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
++ * for a while, then suddenly 'jump' by several units to recover the lost
++ * increments. This seems to happen, e.g., inside virtual machines.
++ * To address this issue, we do not use as a reference time interval just
++ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
++ * particular we add the minimum number of jiffies for which the filter
++ * seems to be quite precise also in embedded systems and KVM/QEMU virtual
++ * machines.
++ */
++static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqd, bfqq,
++"softrt_next_start: service_blkg %lu soft_rate %u sects/sec interval %u",
++ bfqq->service_from_backlogged,
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies_to_msecs(HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate));
++
++ return max(bfqq->last_idle_bklogged +
++ HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
++}
++
++/*
++ * Return the farthest future time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_greatest_from_now(void)
++{
++ return jiffies + MAX_JIFFY_OFFSET;
++}
++
++/*
++ * Return the farthest past time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_smallest_from_now(void)
++{
++ return jiffies - MAX_JIFFY_OFFSET;
++}
++
++/**
++ * bfq_bfqq_expire - expire a queue.
++ * @bfqd: device owning the queue.
++ * @bfqq: the queue to expire.
++ * @compensate: if true, compensate for the time spent idling.
++ * @reason: the reason causing the expiration.
++ *
++ * If the process associated with bfqq does slow I/O (e.g., because it
++ * issues random requests), we charge bfqq with the time it has been
++ * in service instead of the service it has received (see
++ * bfq_bfqq_charge_time for details on how this goal is achieved). As
++ * a consequence, bfqq will typically get higher timestamps upon
++ * reactivation, and hence it will be rescheduled as if it had
++ * received more service than what it has actually received. In the
++ * end, bfqq receives less service in proportion to how slowly its
++ * associated process consumes its budgets (and hence how seriously it
++ * tends to lower the throughput). In addition, this time-charging
++ * strategy guarantees time fairness among slow processes. In
++ * contrast, if the process associated with bfqq is not slow, we
++ * charge bfqq exactly with the service it has received.
++ *
++ * Charging time to the first type of queues and the exact service to
++ * the other has the effect of using the WF2Q+ policy to schedule the
++ * former on a timeslice basis, without violating service domain
++ * guarantees among the latter.
++ */
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool compensate,
++ enum bfqq_expiration reason)
++{
++ bool slow;
++ unsigned long delta = 0;
++ struct bfq_entity *entity = &bfqq->entity;
++ int ref;
++
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ /*
++ * Check whether the process is slow (see bfq_bfqq_is_slow).
++ */
++ slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
++
++ /*
++ * Increase service_from_backlogged before next statement,
++ * because the possible next invocation of
++ * bfq_bfqq_charge_time would likely inflate
++ * entity->service. In contrast, service_from_backlogged must
++ * contain real service, to enable the soft real-time
++ * heuristic to correctly compute the bandwidth consumed by
++ * bfqq.
++ */
++ bfqq->service_from_backlogged += entity->service;
++
++ /*
++ * As above explained, charge slow (typically seeky) and
++ * timed-out queues with the time and not the service
++ * received, to favor sequential workloads.
++ *
++ * Processes doing I/O in the slower disk zones will tend to
++ * be slow(er) even if not seeky. Therefore, since the
++ * estimated peak rate is actually an average over the disk
++ * surface, these processes may timeout just for bad luck. To
++ * avoid punishing them, do not charge time to processes that
++ * succeeded in consuming at least 2/3 of their budget. This
++ * allows BFQ to preserve enough elasticity to still perform
++ * bandwidth, and not time, distribution with little unlucky
++ * or quasi-sequential processes.
++ */
++ if (bfqq->wr_coeff == 1 &&
++ (slow ||
++ (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++ bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
++ bfq_bfqq_charge_time(bfqd, bfqq, delta);
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ if (reason == BFQ_BFQQ_TOO_IDLE &&
++ entity->service <= 2 * entity->budget / 10)
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ if (bfqd->low_latency && bfqq->wr_coeff == 1)
++ bfqq->last_wr_start_finish = jiffies;
++
++ if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ /*
++ * If we get here, and there are no outstanding
++ * requests, then the request pattern is isochronous
++ * (see the comments on the function
++ * bfq_bfqq_softrt_next_start()). Thus we can compute
++ * soft_rt_next_start. If, instead, the queue still
++ * has outstanding requests, then we have to wait for
++ * the completion of all the outstanding requests to
++ * discover whether the request pattern is actually
++ * isochronous.
++ */
++ BUG_ON(bfqd->busy_queues < 1);
++ if (bfqq->dispatched == 0) {
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++ bfq_log_bfqq(bfqd, bfqq, "new soft_rt_next %lu",
++ bfqq->soft_rt_next_start);
++ } else {
++ /*
++ * The application is still waiting for the
++ * completion of one or more requests:
++ * prevent it from possibly being incorrectly
++ * deemed as soft real-time by setting its
++ * soft_rt_next_start to infinity. In fact,
++ * without this assignment, the application
++ * would be incorrectly deemed as soft
++ * real-time if:
++ * 1) it issued a new request before the
++ * completion of all its in-flight
++ * requests, and
++ * 2) at that time, its soft_rt_next_start
++ * happened to be in the past.
++ */
++ bfqq->soft_rt_next_start =
++ bfq_greatest_from_now();
++ /*
++ * Schedule an update of soft_rt_next_start to when
++ * the task may be discovered to be isochronous.
++ */
++ bfq_mark_bfqq_softrt_update(bfqq);
++ }
++ }
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "expire (%d, slow %d, num_disp %d, idle_win %d, weight %d)",
++ reason, slow, bfqq->dispatched,
++ bfq_bfqq_idle_window(bfqq), entity->weight);
++
++ /*
++ * Increase, decrease or leave budget unchanged according to
++ * reason.
++ */
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
++ BUG_ON(bfqq->next_rq == NULL &&
++ bfqq->entity.budget < bfqq->entity.service);
++ ref = bfqq->ref;
++ __bfq_bfqq_expire(bfqd, bfqq);
++
++ BUG_ON(ref > 1 &&
++ !bfq_bfqq_busy(bfqq) && reason == BFQ_BFQQ_BUDGET_EXHAUSTED &&
++ !bfq_class_idle(bfqq));
++
++ /* mark bfqq as waiting a request only if a bic still points to it */
++ if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
++ reason != BFQ_BFQQ_BUDGET_TIMEOUT &&
++ reason != BFQ_BFQQ_BUDGET_EXHAUSTED)
++ bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
++}
++
++/*
++ * Budget timeout is not implemented through a dedicated timer, but
++ * just checked on request arrivals and completions, as well as on
++ * idle timer expirations.
++ */
++static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
++{
++ return time_is_before_eq_jiffies(bfqq->budget_timeout);
++}
++
++/*
++ * If we expire a queue that is actively waiting (i.e., with the
++ * device idled) for the arrival of a new request, then we may incur
++ * the timestamp misalignment problem described in the body of the
++ * function __bfq_activate_entity. Hence we return true only if this
++ * condition does not hold, or if the queue is slow enough to deserve
++ * only to be kicked off for preserving a high throughput.
++ */
++static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "may_budget_timeout: wait_request %d left %d timeout %d",
++ bfq_bfqq_wait_request(bfqq),
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
++ bfq_bfqq_budget_timeout(bfqq));
++
++ return (!bfq_bfqq_wait_request(bfqq) ||
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
++ &&
++ bfq_bfqq_budget_timeout(bfqq);
++}
++
++/*
++ * For a queue that becomes empty, device idling is allowed only if
++ * this function returns true for that queue. As a consequence, since
++ * device idling plays a critical role for both throughput boosting
++ * and service guarantees, the return value of this function plays a
++ * critical role as well.
++ *
++ * In a nutshell, this function returns true only if idling is
++ * beneficial for throughput or, even if detrimental for throughput,
++ * idling is however necessary to preserve service guarantees (low
++ * latency, desired throughput distribution, ...). In particular, on
++ * NCQ-capable devices, this function tries to return false, so as to
++ * help keep the drives' internal queues full, whenever this helps the
++ * device boost the throughput without causing any service-guarantee
++ * issue.
++ *
++ * In more detail, the return value of this function is obtained by,
++ * first, computing a number of boolean variables that take into
++ * account throughput and service-guarantee issues, and, then,
++ * combining these variables in a logical expression. Most of the
++ * issues taken into account are not trivial. We discuss these issues
++ * while introducing the variables.
++ */
++static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++ bool idling_boosts_thr, idling_boosts_thr_without_issues,
++ idling_needed_for_service_guarantees,
++ asymmetric_scenario;
++
++ if (bfqd->strict_guarantees)
++ return true;
++
++ /*
++ * The next variable takes into account the cases where idling
++ * boosts the throughput.
++ *
++ * The value of the variable is computed considering, first, that
++ * idling is virtually always beneficial for the throughput if:
++ * (a) the device is not NCQ-capable, or
++ * (b) regardless of the presence of NCQ, the device is rotational
++ * and the request pattern for bfqq is I/O-bound and sequential.
++ *
++ * Secondly, and in contrast to the above item (b), idling an
++ * NCQ-capable flash-based device would not boost the
++ * throughput even with sequential I/O; rather it would lower
++ * the throughput in proportion to how fast the device
++ * is. Accordingly, the next variable is true if any of the
++ * above conditions (a) and (b) is true, and, in particular,
++ * happens to be false if bfqd is an NCQ-capable flash-based
++ * device.
++ */
++ idling_boosts_thr = !bfqd->hw_tag ||
++ (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) &&
++ bfq_bfqq_idle_window(bfqq));
++
++ /*
++ * The value of the next variable,
++ * idling_boosts_thr_without_issues, is equal to that of
++ * idling_boosts_thr, unless a special case holds. In this
++ * special case, described below, idling may cause problems to
++ * weight-raised queues.
++ *
++ * When the request pool is saturated (e.g., in the presence
++ * of write hogs), if the processes associated with
++ * non-weight-raised queues ask for requests at a lower rate,
++ * then processes associated with weight-raised queues have a
++ * higher probability to get a request from the pool
++ * immediately (or at least soon) when they need one. Thus
++ * they have a higher probability to actually get a fraction
++ * of the device throughput proportional to their high
++ * weight. This is especially true with NCQ-capable drives,
++ * which enqueue several requests in advance, and further
++ * reorder internally-queued requests.
++ *
++ * For this reason, we force to false the value of
++ * idling_boosts_thr_without_issues if there are weight-raised
++ * busy queues. In this case, and if bfqq is not weight-raised,
++ * this guarantees that the device is not idled for bfqq (if,
++ * instead, bfqq is weight-raised, then idling will be
++ * guaranteed by another variable, see below). Combined with
++ * the timestamping rules of BFQ (see [1] for details), this
++ * behavior causes bfqq, and hence any sync non-weight-raised
++ * queue, to get a lower number of requests served, and thus
++ * to ask for a lower number of requests from the request
++ * pool, before the busy weight-raised queues get served
++ * again. This often mitigates starvation problems in the
++ * presence of heavy write workloads and NCQ, thereby
++ * guaranteeing a higher application and system responsiveness
++ * in these hostile scenarios.
++ */
++ idling_boosts_thr_without_issues = idling_boosts_thr &&
++ bfqd->wr_busy_queues == 0;
++
++ /*
++ * There is then a case where idling must be performed not
++ * for throughput concerns, but to preserve service
++ * guarantees.
++ *
++ * To introduce this case, we can note that allowing the drive
++ * to enqueue more than one request at a time, and hence
++ * delegating de facto final scheduling decisions to the
++ * drive's internal scheduler, entails loss of control on the
++ * actual request service order. In particular, the critical
++ * situation is when requests from different processes happen
++ * to be present, at the same time, in the internal queue(s)
++ * of the drive. In such a situation, the drive, by deciding
++ * the service order of the internally-queued requests, does
++ * determine also the actual throughput distribution among
++ * these processes. But the drive typically has no notion or
++ * concern about per-process throughput distribution, and
++ * makes its decisions only on a per-request basis. Therefore,
++ * the service distribution enforced by the drive's internal
++ * scheduler is likely to coincide with the desired
++ * device-throughput distribution only in a completely
++ * symmetric scenario where:
++ * (i) each of these processes must get the same throughput as
++ * the others;
++ * (ii) all these processes have the same I/O pattern
++ * (either sequential or random).
++ * In fact, in such a scenario, the drive will tend to treat
++ * the requests of each of these processes in about the same
++ * way as the requests of the others, and thus to provide
++ * each of these processes with about the same throughput
++ * (which is exactly the desired throughput distribution). In
++ * contrast, in any asymmetric scenario, device idling is
++ * certainly needed to guarantee that bfqq receives its
++ * assigned fraction of the device throughput (see [1] for
++ * details).
++ *
++ * We address this issue by controlling, actually, only the
++ * symmetry sub-condition (i), i.e., provided that
++ * sub-condition (i) holds, idling is not performed,
++ * regardless of whether sub-condition (ii) holds. In other
++ * words, only if sub-condition (i) holds, then idling is
++ * allowed, and the device tends to be prevented from queueing
++ * many requests, possibly of several processes. The reason
++ * for not controlling also sub-condition (ii) is that we
++ * exploit preemption to preserve guarantees in case of
++ * symmetric scenarios, even if (ii) does not hold, as
++ * explained in the next two paragraphs.
++ *
++ * Even if a queue, say Q, is expired when it remains idle, Q
++ * can still preempt the new in-service queue if the next
++ * request of Q arrives soon (see the comments on
++ * bfq_bfqq_update_budg_for_activation). If all queues and
++ * groups have the same weight, this form of preemption,
++ * combined with the hole-recovery heuristic described in the
++ * comments on function bfq_bfqq_update_budg_for_activation,
++ * are enough to preserve a correct bandwidth distribution in
++ * the mid term, even without idling. In fact, even if not
++ * idling allows the internal queues of the device to contain
++ * many requests, and thus to reorder requests, we can rather
++ * safely assume that the internal scheduler still preserves a
++ * minimum of mid-term fairness. The motivation for using
++ * preemption instead of idling is that, by not idling,
++ * service guarantees are preserved without minimally
++ * sacrificing throughput. In other words, both a high
++ * throughput and its desired distribution are obtained.
++ *
++ * More precisely, this preemption-based, idleless approach
++ * provides fairness in terms of IOPS, and not sectors per
++ * second. This can be seen with a simple example. Suppose
++ * that there are two queues with the same weight, but that
++ * the first queue receives requests of 8 sectors, while the
++ * second queue receives requests of 1024 sectors. In
++ * addition, suppose that each of the two queues contains at
++ * most one request at a time, which implies that each queue
++ * always remains idle after it is served. Finally, after
++ * remaining idle, each queue receives very quickly a new
++ * request. It follows that the two queues are served
++ * alternatively, preempting each other if needed. This
++ * implies that, although both queues have the same weight,
++ * the queue with large requests receives a service that is
++ * 1024/8 times as high as the service received by the other
++ * queue.
++ *
++ * On the other hand, device idling is performed, and thus
++ * pure sector-domain guarantees are provided, for the
++ * following queues, which are likely to need stronger
++ * throughput guarantees: weight-raised queues, and queues
++ * with a higher weight than other queues. When such queues
++ * are active, sub-condition (i) is false, which triggers
++ * device idling.
++ *
++ * According to the above considerations, the next variable is
++ * true (only) if sub-condition (i) holds. To compute the
++ * value of this variable, we not only use the return value of
++ * the function bfq_symmetric_scenario(), but also check
++ * whether bfqq is being weight-raised, because
++ * bfq_symmetric_scenario() does not take into account also
++ * weight-raised queues (see comments on
++ * bfq_weights_tree_add()).
++ *
++ * As a side note, it is worth considering that the above
++ * device-idling countermeasures may however fail in the
++ * following unlucky scenario: if idling is (correctly)
++ * disabled in a time period during which all symmetry
++ * sub-conditions hold, and hence the device is allowed to
++ * enqueue many requests, but at some later point in time some
++ * sub-condition stops to hold, then it may become impossible
++ * to let requests be served in the desired order until all
++ * the requests already queued in the device have been served.
++ */
++ asymmetric_scenario = bfqq->wr_coeff > 1 ||
++ !bfq_symmetric_scenario(bfqd);
++
++ /*
++ * Finally, there is a case where maximizing throughput is the
++ * best choice even if it may cause unfairness toward
++ * bfqq. Such a case is when bfqq became active in a burst of
++ * queue activations. Queues that became active during a large
++ * burst benefit only from throughput, as discussed in the
++ * comments on bfq_handle_burst. Thus, if bfqq became active
++ * in a burst and not idling the device maximizes throughput,
++ * then the device must no be idled, because not idling the
++ * device provides bfqq and all other queues in the burst with
++ * maximum benefit. Combining this and the above case, we can
++ * now establish when idling is actually needed to preserve
++ * service guarantees.
++ */
++ idling_needed_for_service_guarantees =
++ asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
++
++ /*
++ * We have now all the components we need to compute the return
++ * value of the function, which is true only if both the following
++ * conditions hold:
++ * 1) bfqq is sync, because idling make sense only for sync queues;
++ * 2) idling either boosts the throughput (without issues), or
++ * is necessary to preserve service guarantees.
++ */
++ bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d",
++ bfq_bfqq_sync(bfqq), idling_boosts_thr);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "may_idle: wr_busy %d boosts %d IO-bound %d guar %d",
++ bfqd->wr_busy_queues,
++ idling_boosts_thr_without_issues,
++ bfq_bfqq_IO_bound(bfqq),
++ idling_needed_for_service_guarantees);
++
++ return bfq_bfqq_sync(bfqq) &&
++ (idling_boosts_thr_without_issues ||
++ idling_needed_for_service_guarantees);
++}
++
++/*
++ * If the in-service queue is empty but the function bfq_bfqq_may_idle
++ * returns true, then:
++ * 1) the queue must remain in service and cannot be expired, and
++ * 2) the device must be idled to wait for the possible arrival of a new
++ * request for the queue.
++ * See the comments on the function bfq_bfqq_may_idle for the reasons
++ * why performing device idling is the best choice to boost the throughput
++ * and preserve service guarantees when bfq_bfqq_may_idle itself
++ * returns true.
++ */
++static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
++ bfq_bfqq_may_idle(bfqq);
++}
++
++/*
++ * Select a queue for service. If we have a current queue in service,
++ * check whether to continue servicing it, or retrieve and set a new one.
++ */
++static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++ struct request *next_rq;
++ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++
++ bfqq = bfqd->in_service_queue;
++ if (!bfqq)
++ goto new_queue;
++
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
++
++ if (bfq_may_expire_for_budg_timeout(bfqq) &&
++ !hrtimer_active(&bfqd->idle_slice_timer) &&
++ !bfq_bfqq_must_idle(bfqq))
++ goto expire;
++
++check_queue:
++ /*
++ * This loop is rarely executed more than once. Even when it
++ * happens, it is much more convenient to re-execute this loop
++ * than to return NULL and trigger a new dispatch to get a
++ * request served.
++ */
++ next_rq = bfqq->next_rq;
++ /*
++ * If bfqq has requests queued and it has enough budget left to
++ * serve them, keep the queue, otherwise expire it.
++ */
++ if (next_rq) {
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ if (bfq_serv_to_charge(next_rq, bfqq) >
++ bfq_bfqq_budget_left(bfqq)) {
++ /*
++ * Expire the queue for budget exhaustion,
++ * which makes sure that the next budget is
++ * enough to serve the next request, even if
++ * it comes from the fifo expired path.
++ */
++ reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
++ goto expire;
++ } else {
++ /*
++ * The idle timer may be pending because we may
++ * not disable disk idling even when a new request
++ * arrives.
++ */
++ if (bfq_bfqq_wait_request(bfqq)) {
++ BUG_ON(!hrtimer_active(&bfqd->idle_slice_timer));
++ /*
++ * If we get here: 1) at least a new request
++ * has arrived but we have not disabled the
++ * timer because the request was too small,
++ * 2) then the block layer has unplugged
++ * the device, causing the dispatch to be
++ * invoked.
++ *
++ * Since the device is unplugged, now the
++ * requests are probably large enough to
++ * provide a reasonable throughput.
++ * So we disable idling.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
++ bfqg_stats_update_idle_time(bfqq_group(bfqq));
++ }
++ goto keep_queue;
++ }
++ }
++
++ /*
++ * No requests pending. However, if the in-service queue is idling
++ * for a new request, or has requests waiting for a completion and
++ * may idle after their completion, then keep it anyway.
++ */
++ if (hrtimer_active(&bfqd->idle_slice_timer) ||
++ (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
++ bfqq = NULL;
++ goto keep_queue;
++ }
++
++ reason = BFQ_BFQQ_NO_MORE_REQUESTS;
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, false, reason);
++new_queue:
++ bfqq = bfq_set_in_service_queue(bfqd);
++ if (bfqq) {
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
++ goto check_queue;
++ }
++keep_queue:
++ if (bfqq)
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
++ else
++ bfq_log(bfqd, "select_queue: no queue returned");
++
++ return bfqq;
++}
++
++static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
++ BUG_ON(bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
++ time_is_after_jiffies(bfqq->last_wr_start_finish));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqq->wr_coeff,
++ bfqq->entity.weight, bfqq->entity.orig_weight);
++
++ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight !=
++ entity->orig_weight * bfqq->wr_coeff);
++ if (entity->prio_changed)
++ bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
++
++ /*
++ * If the queue was activated in a burst, or too much
++ * time has elapsed from the beginning of this
++ * weight-raising period, then end weight raising.
++ */
++ if (bfq_bfqq_in_large_burst(bfqq))
++ bfq_bfqq_end_wr(bfqq);
++ else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time)) {
++ if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
++ time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
++ bfq_wr_duration(bfqd)))
++ bfq_bfqq_end_wr(bfqq);
++ else {
++ /* switch back to interactive wr */
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ bfqq->last_wr_start_finish =
++ bfqq->wr_start_at_switch_to_srt;
++ BUG_ON(time_is_after_jiffies(
++ bfqq->last_wr_start_finish));
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "back to interactive wr");
++ }
++ }
++ }
++ /*
++ * To improve latency (for this or other queues), immediately
++ * update weight both if it must be raised and if it must be
++ * lowered. Since, entity may be on some active tree here, and
++ * might have a pending change of its ioprio class, invoke
++ * next function with the last parameter unset (see the
++ * comments on the function).
++ */
++ if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
++ __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
++ entity, false);
++}
++
++/*
++ * Dispatch one request from bfqq, moving it to the request queue
++ * dispatch list.
++ */
++static int bfq_dispatch_request(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++ struct request *rq = bfqq->next_rq;
++ unsigned long service_to_charge;
++
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++ BUG_ON(!rq);
++ service_to_charge = bfq_serv_to_charge(rq, bfqq);
++
++ BUG_ON(service_to_charge > bfq_bfqq_budget_left(bfqq));
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ bfq_bfqq_served(bfqq, service_to_charge);
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ bfq_dispatch_insert(bfqd->queue, rq);
++
++ /*
++ * If weight raising has to terminate for bfqq, then next
++ * function causes an immediate update of bfqq's weight,
++ * without waiting for next activation. As a consequence, on
++ * expiration, bfqq will be timestamped as if has never been
++ * weight-raised during this service slot, even if it has
++ * received part or even most of the service as a
++ * weight-raised queue. This inflates bfqq's timestamps, which
++ * is beneficial, as bfqq is then more willing to leave the
++ * device immediately to possible other weight-raised queues.
++ */
++ bfq_update_wr_data(bfqd, bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "dispatched %u sec req (%llu), budg left %d",
++ blk_rq_sectors(rq),
++ (unsigned long long) blk_rq_pos(rq),
++ bfq_bfqq_budget_left(bfqq));
++
++ dispatched++;
++
++ if (!bfqd->in_service_bic) {
++ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
++ bfqd->in_service_bic = RQ_BIC(rq);
++ }
++
++ if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
++ goto expire;
++
++ return dispatched;
++
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, false, BFQ_BFQQ_BUDGET_EXHAUSTED);
++ return dispatched;
++}
++
++static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++
++ while (bfqq->next_rq) {
++ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
++ dispatched++;
++ }
++
++ BUG_ON(!list_empty(&bfqq->fifo));
++ return dispatched;
++}
++
++/*
++ * Drain our current requests.
++ * Used for barriers and when switching io schedulers on-the-fly.
++ */
++static int bfq_forced_dispatch(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq, *n;
++ struct bfq_service_tree *st;
++ int dispatched = 0;
++
++ bfqq = bfqd->in_service_queue;
++ if (bfqq)
++ __bfq_bfqq_expire(bfqd, bfqq);
++
++ /*
++ * Loop through classes, and be careful to leave the scheduler
++ * in a consistent state, as feedback mechanisms and vtime
++ * updates cannot be disabled during the process.
++ */
++ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
++ st = bfq_entity_service_tree(&bfqq->entity);
++
++ dispatched += __bfq_forced_dispatch_bfqq(bfqq);
++
++ bfqq->max_budget = bfq_max_budget(bfqd);
++ bfq_forget_idle(st);
++ }
++
++ BUG_ON(bfqd->busy_queues != 0);
++
++ return dispatched;
++}
++
++static int bfq_dispatch_requests(struct request_queue *q, int force)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq;
++
++ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++
++ if (bfqd->busy_queues == 0)
++ return 0;
++
++ if (unlikely(force))
++ return bfq_forced_dispatch(bfqd);
++
++ /*
++ * Force device to serve one request at a time if
++ * strict_guarantees is true. Forcing this service scheme is
++ * currently the ONLY way to guarantee that the request
++ * service order enforced by the scheduler is respected by a
++ * queueing device. Otherwise the device is free even to make
++ * some unlucky request wait for as long as the device
++ * wishes.
++ *
++ * Of course, serving one request at at time may cause loss of
++ * throughput.
++ */
++ if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
++ return 0;
++
++ bfqq = bfq_select_queue(bfqd);
++ if (!bfqq)
++ return 0;
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ BUG_ON(bfq_bfqq_wait_request(bfqq));
++
++ if (!bfq_dispatch_request(bfqd, bfqq))
++ return 0;
++
++ bfq_log_bfqq(bfqd, bfqq, "dispatched %s request",
++ bfq_bfqq_sync(bfqq) ? "sync" : "async");
++
++ BUG_ON(bfqq->next_rq == NULL &&
++ bfqq->entity.budget < bfqq->entity.service);
++ return 1;
++}
++
++/*
++ * Task holds one reference to the queue, dropped when task exits. Each rq
++ * in-flight on this queue also holds a reference, dropped when rq is freed.
++ *
++ * Queue lock must be held here. Recall not to use bfqq after calling
++ * this function on it.
++ */
++static void bfq_put_queue(struct bfq_queue *bfqq)
++{
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ struct bfq_group *bfqg = bfqq_group(bfqq);
++#endif
++
++ BUG_ON(bfqq->ref <= 0);
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
++ bfqq->ref--;
++ if (bfqq->ref)
++ return;
++
++ BUG_ON(rb_first(&bfqq->sort_list));
++ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
++ BUG_ON(bfqq->entity.tree);
++ BUG_ON(bfq_bfqq_busy(bfqq));
++
++ if (bfq_bfqq_sync(bfqq))
++ /*
++ * The fact that this queue is being destroyed does not
++ * invalidate the fact that this queue may have been
++ * activated during the current burst. As a consequence,
++ * although the queue does not exist anymore, and hence
++ * needs to be removed from the burst list if there,
++ * the burst size has not to be decremented.
++ */
++ hlist_del_init(&bfqq->burst_list_node);
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
++
++ kmem_cache_free(bfq_pool, bfqq);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ bfqg_put(bfqg);
++#endif
++}
++
++static void bfq_put_cooperator(struct bfq_queue *bfqq)
++{
++ struct bfq_queue *__bfqq, *next;
++
++ /*
++ * If this queue was scheduled to merge with another queue, be
++ * sure to drop the reference taken on that queue (and others in
++ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
++ */
++ __bfqq = bfqq->new_bfqq;
++ while (__bfqq) {
++ if (__bfqq == bfqq)
++ break;
++ next = __bfqq->new_bfqq;
++ bfq_put_queue(__bfqq);
++ __bfqq = next;
++ }
++}
++
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ if (bfqq == bfqd->in_service_queue) {
++ __bfq_bfqq_expire(bfqd, bfqq);
++ bfq_schedule_dispatch(bfqd);
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq); /* release process reference */
++}
++
++static void bfq_init_icq(struct io_cq *icq)
++{
++ icq_to_bic(icq)->ttime.last_end_request = ktime_get_ns() - (1ULL<<32);
++}
++
++static void bfq_exit_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++
++ if (bic_to_bfqq(bic, false)) {
++ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, false));
++ bic_set_bfqq(bic, NULL, false);
++ }
++
++ if (bic_to_bfqq(bic, true)) {
++ /*
++ * If the bic is using a shared queue, put the reference
++ * taken on the io_context when the bic started using a
++ * shared bfq_queue.
++ */
++ if (bfq_bfqq_coop(bic_to_bfqq(bic, true)))
++ put_io_context(icq->ioc);
++ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, true));
++ bic_set_bfqq(bic, NULL, true);
++ }
++}
++
++/*
++ * Update the entity prio values; note that the new values will not
++ * be used until the next (re)activation.
++ */
++static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
++{
++ struct task_struct *tsk = current;
++ int ioprio_class;
++
++ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ switch (ioprio_class) {
++ default:
++ dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
++ "bfq: bad prio class %d\n", ioprio_class);
++ case IOPRIO_CLASS_NONE:
++ /*
++ * No prio set, inherit CPU scheduling settings.
++ */
++ bfqq->new_ioprio = task_nice_ioprio(tsk);
++ bfqq->new_ioprio_class = task_nice_ioclass(tsk);
++ break;
++ case IOPRIO_CLASS_RT:
++ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
++ break;
++ case IOPRIO_CLASS_BE:
++ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
++ break;
++ case IOPRIO_CLASS_IDLE:
++ bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
++ bfqq->new_ioprio = 7;
++ bfq_clear_bfqq_idle_window(bfqq);
++ break;
++ }
++
++ if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
++ pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
++ bfqq->new_ioprio);
++ BUG();
++ }
++
++ bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "set_next_ioprio_data: bic_class %d prio %d class %d",
++ ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class);
++}
++
++static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ struct bfq_queue *bfqq;
++ unsigned long uninitialized_var(flags);
++ int ioprio = bic->icq.ioc->ioprio;
++
++ /*
++ * This condition may trigger on a newly created bic, be sure to
++ * drop the lock before returning.
++ */
++ if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
++ return;
++
++ bic->ioprio = ioprio;
++
++ bfqq = bic_to_bfqq(bic, false);
++ if (bfqq) {
++ /* release process reference on this queue */
++ bfq_put_queue(bfqq);
++ bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
++ bic_set_bfqq(bic, bfqq, false);
++ bfq_log_bfqq(bfqd, bfqq,
++ "check_ioprio_change: bfqq %p %d",
++ bfqq, bfqq->ref);
++ }
++
++ bfqq = bic_to_bfqq(bic, true);
++ if (bfqq)
++ bfq_set_next_ioprio_data(bfqq, bic);
++}
++
++static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic, pid_t pid, int is_sync)
++{
++ RB_CLEAR_NODE(&bfqq->entity.rb_node);
++ INIT_LIST_HEAD(&bfqq->fifo);
++ INIT_HLIST_NODE(&bfqq->burst_list_node);
++ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
++
++ bfqq->ref = 0;
++ bfqq->bfqd = bfqd;
++
++ if (bic)
++ bfq_set_next_ioprio_data(bfqq, bic);
++
++ if (is_sync) {
++ if (!bfq_class_idle(bfqq))
++ bfq_mark_bfqq_idle_window(bfqq);
++ bfq_mark_bfqq_sync(bfqq);
++ bfq_mark_bfqq_just_created(bfqq);
++ } else
++ bfq_clear_bfqq_sync(bfqq);
++ bfq_mark_bfqq_IO_bound(bfqq);
++
++ /* Tentative initial value to trade off between thr and lat */
++ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
++ bfqq->pid = pid;
++
++ bfqq->wr_coeff = 1;
++ bfqq->last_wr_start_finish = jiffies;
++ bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
++ bfqq->budget_timeout = bfq_smallest_from_now();
++ bfqq->split_time = bfq_smallest_from_now();
++
++ /*
++ * Set to the value for which bfqq will not be deemed as
++ * soft rt when it becomes backlogged.
++ */
++ bfqq->soft_rt_next_start = bfq_greatest_from_now();
++
++ /* first request is almost certainly seeky */
++ bfqq->seek_history = 1;
++}
++
++static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ int ioprio_class, int ioprio)
++{
++ switch (ioprio_class) {
++ case IOPRIO_CLASS_RT:
++ return &bfqg->async_bfqq[0][ioprio];
++ case IOPRIO_CLASS_NONE:
++ ioprio = IOPRIO_NORM;
++ /* fall through */
++ case IOPRIO_CLASS_BE:
++ return &bfqg->async_bfqq[1][ioprio];
++ case IOPRIO_CLASS_IDLE:
++ return &bfqg->async_idle_bfqq;
++ default:
++ BUG();
++ }
++}
++
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bio *bio, bool is_sync,
++ struct bfq_io_cq *bic)
++{
++ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ struct bfq_queue **async_bfqq = NULL;
++ struct bfq_queue *bfqq;
++ struct bfq_group *bfqg;
++
++ rcu_read_lock();
++
++ bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
++ if (!bfqg) {
++ bfqq = &bfqd->oom_bfqq;
++ goto out;
++ }
++
++ if (!is_sync) {
++ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
++ ioprio);
++ bfqq = *async_bfqq;
++ if (bfqq)
++ goto out;
++ }
++
++ bfqq = kmem_cache_alloc_node(bfq_pool,
++ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
++ bfqd->queue->node);
++
++ if (bfqq) {
++ bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
++ is_sync);
++ bfq_init_entity(&bfqq->entity, bfqg);
++ bfq_log_bfqq(bfqd, bfqq, "allocated");
++ } else {
++ bfqq = &bfqd->oom_bfqq;
++ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
++ goto out;
++ }
++
++ /*
++ * Pin the queue now that it's allocated, scheduler exit will
++ * prune it.
++ */
++ if (async_bfqq) {
++ bfqq->ref++; /*
++ * Extra group reference, w.r.t. sync
++ * queue. This extra reference is removed
++ * only if bfqq->bfqg disappears, to
++ * guarantee that this queue is not freed
++ * until its group goes away.
++ */
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++ bfqq, bfqq->ref);
++ *async_bfqq = bfqq;
++ }
++
++out:
++ bfqq->ref++; /* get a process reference to this queue */
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
++ rcu_read_unlock();
++ return bfqq;
++}
++
++static void bfq_update_io_thinktime(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic)
++{
++ struct bfq_ttime *ttime = &bic->ttime;
++ u64 elapsed = ktime_get_ns() - bic->ttime.last_end_request;
++
++ elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle);
++
++ ttime->ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
++ ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
++ ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
++ ttime->ttime_samples);
++}
++
++static void
++bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ bfqq->seek_history <<= 1;
++ bfqq->seek_history |=
++ get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
++ (!blk_queue_nonrot(bfqd->queue) ||
++ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
++}
++
++/*
++ * Disable idle window if the process thinks too long or seeks so much that
++ * it doesn't matter.
++ */
++static void bfq_update_idle_window(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
++{
++ int enable_idle;
++
++ /* Don't idle for async or idle io prio class. */
++ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
++ return;
++
++ /* Idle window just restored, statistics are meaningless. */
++ if (time_is_after_eq_jiffies(bfqq->split_time +
++ bfqd->bfq_wr_min_idle_time))
++ return;
++
++ enable_idle = bfq_bfqq_idle_window(bfqq);
++
++ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
++ bfqd->bfq_slice_idle == 0 ||
++ (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
++ bfqq->wr_coeff == 1))
++ enable_idle = 0;
++ else if (bfq_sample_valid(bic->ttime.ttime_samples)) {
++ if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
++ bfqq->wr_coeff == 1)
++ enable_idle = 0;
++ else
++ enable_idle = 1;
++ }
++ bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
++ enable_idle);
++
++ if (enable_idle)
++ bfq_mark_bfqq_idle_window(bfqq);
++ else
++ bfq_clear_bfqq_idle_window(bfqq);
++}
++
++/*
++ * Called when a new fs request (rq) is added to bfqq. Check if there's
++ * something we should do about it.
++ */
++static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ struct bfq_io_cq *bic = RQ_BIC(rq);
++
++ if (rq->cmd_flags & REQ_META)
++ bfqq->meta_pending++;
++
++ bfq_update_io_thinktime(bfqd, bic);
++ bfq_update_io_seektime(bfqd, bfqq, rq);
++ if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
++ !BFQQ_SEEKY(bfqq))
++ bfq_update_idle_window(bfqd, bfqq, bic);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "rq_enqueued: idle_window=%d (seeky %d)",
++ bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq));
++
++ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
++
++ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
++ bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
++ blk_rq_sectors(rq) < 32;
++ bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
++
++ /*
++ * There is just this request queued: if the request
++ * is small and the queue is not to be expired, then
++ * just exit.
++ *
++ * In this way, if the device is being idled to wait
++ * for a new request from the in-service queue, we
++ * avoid unplugging the device and committing the
++ * device to serve just a small request. On the
++ * contrary, we wait for the block layer to decide
++ * when to unplug the device: hopefully, new requests
++ * will be merged to this one quickly, then the device
++ * will be unplugged and larger requests will be
++ * dispatched.
++ */
++ if (small_req && !budget_timeout)
++ return;
++
++ /*
++ * A large enough request arrived, or the queue is to
++ * be expired: in both cases disk idling is to be
++ * stopped, so clear wait_request flag and reset
++ * timer.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
++ bfqg_stats_update_idle_time(bfqq_group(bfqq));
++
++ /*
++ * The queue is not empty, because a new request just
++ * arrived. Hence we can safely expire the queue, in
++ * case of budget timeout, without risking that the
++ * timestamps of the queue are not updated correctly.
++ * See [1] for more details.
++ */
++ if (budget_timeout)
++ bfq_bfqq_expire(bfqd, bfqq, false,
++ BFQ_BFQQ_BUDGET_TIMEOUT);
++
++ /*
++ * Let the request rip immediately, or let a new queue be
++ * selected if bfqq has just been expired.
++ */
++ __blk_run_queue(bfqd->queue);
++ }
++}
++
++static void bfq_insert_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++
++ /*
++ * An unplug may trigger a requeue of a request from the device
++ * driver: make sure we are in process context while trying to
++ * merge two bfq_queues.
++ */
++ if (!in_interrupt()) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
++ if (new_bfqq) {
++ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
++ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
++ /*
++ * Release the request's reference to the old bfqq
++ * and make sure one is taken to the shared queue.
++ */
++ new_bfqq->allocated[rq_data_dir(rq)]++;
++ bfqq->allocated[rq_data_dir(rq)]--;
++ new_bfqq->ref++;
++ bfq_clear_bfqq_just_created(bfqq);
++ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
++ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
++ bfqq, new_bfqq);
++ /*
++ * rq is about to be enqueued into new_bfqq,
++ * release rq reference on bfqq
++ */
++ bfq_put_queue(bfqq);
++ rq->elv.priv[1] = new_bfqq;
++ bfqq = new_bfqq;
++ }
++ }
++
++ bfq_add_request(rq);
++
++ rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
++ list_add_tail(&rq->queuelist, &bfqq->fifo);
++
++ bfq_rq_enqueued(bfqd, bfqq, rq);
++}
++
++static void bfq_update_hw_tag(struct bfq_data *bfqd)
++{
++ bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
++ bfqd->rq_in_driver);
++
++ if (bfqd->hw_tag == 1)
++ return;
++
++ /*
++ * This sample is valid if the number of outstanding requests
++ * is large enough to allow a queueing behavior. Note that the
++ * sum is not exact, as it's not taking into account deactivated
++ * requests.
++ */
++ if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
++ return;
++
++ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
++ return;
++
++ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
++ bfqd->max_rq_in_driver = 0;
++ bfqd->hw_tag_samples = 0;
++}
++
++static void bfq_completed_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ u64 now_ns;
++ u32 delta_us;
++
++ bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left",
++ blk_rq_sectors(rq));
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++ bfq_update_hw_tag(bfqd);
++
++ BUG_ON(!bfqd->rq_in_driver);
++ BUG_ON(!bfqq->dispatched);
++ bfqd->rq_in_driver--;
++ bfqq->dispatched--;
++ bfqg_stats_update_completion(bfqq_group(bfqq),
++ rq_start_time_ns(rq),
++ rq_io_start_time_ns(rq),
++ rq->cmd_flags);
++
++ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++ /*
++ * Set budget_timeout (which we overload to store the
++ * time at which the queue remains with no backlog and
++ * no outstanding request; used by the weight-raising
++ * mechanism).
++ */
++ bfqq->budget_timeout = jiffies;
++
++ bfq_weights_tree_remove(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++ }
++
++ now_ns = ktime_get_ns();
++
++ RQ_BIC(rq)->ttime.last_end_request = now_ns;
++
++ /*
++ * Using us instead of ns, to get a reasonable precision in
++ * computing rate in next check.
++ */
++ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
++
++ bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
++ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
++ (USEC_PER_SEC*
++ (u64)((bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us))
++ >>BFQ_RATE_SHIFT,
++ (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT);
++
++ /*
++ * If the request took rather long to complete, and, according
++ * to the maximum request size recorded, this completion latency
++ * implies that the request was certainly served at a very low
++ * rate (less than 1M sectors/sec), then the whole observation
++ * interval that lasts up to this time instant cannot be a
++ * valid time interval for computing a new peak rate. Invoke
++ * bfq_update_rate_reset to have the following three steps
++ * taken:
++ * - close the observation interval at the last (previous)
++ * request dispatch or completion
++ * - compute rate, if possible, for that observation interval
++ * - reset to zero samples, which will trigger a proper
++ * re-initialization of the observation interval on next
++ * dispatch
++ */
++ if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
++ (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
++ 1UL<<(BFQ_RATE_SHIFT - 10))
++ bfq_update_rate_reset(bfqd, NULL);
++ bfqd->last_completion = now_ns;
++
++ /*
++ * If we are waiting to discover whether the request pattern
++ * of the task associated with the queue is actually
++ * isochronous, and both requisites for this condition to hold
++ * are now satisfied, then compute soft_rt_next_start (see the
++ * comments on the function bfq_bfqq_softrt_next_start()). We
++ * schedule this delayed check when bfqq expires, if it still
++ * has in-flight requests.
++ */
++ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list))
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++
++ /*
++ * If this is the in-service queue, check if it needs to be expired,
++ * or if we want to idle in case it has no pending requests.
++ */
++ if (bfqd->in_service_queue == bfqq) {
++ if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
++ bfq_arm_slice_timer(bfqd);
++ goto out;
++ } else if (bfq_may_expire_for_budg_timeout(bfqq))
++ bfq_bfqq_expire(bfqd, bfqq, false,
++ BFQ_BFQQ_BUDGET_TIMEOUT);
++ else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
++ (bfqq->dispatched == 0 ||
++ !bfq_bfqq_may_idle(bfqq)))
++ bfq_bfqq_expire(bfqd, bfqq, false,
++ BFQ_BFQQ_NO_MORE_REQUESTS);
++ }
++
++ if (!bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++
++out:
++ return;
++}
++
++static int __bfq_may_queue(struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
++ bfq_clear_bfqq_must_alloc(bfqq);
++ return ELV_MQUEUE_MUST;
++ }
++
++ return ELV_MQUEUE_MAY;
++}
++
++static int bfq_may_queue(struct request_queue *q, unsigned int op)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ /*
++ * Don't force setup of a queue from here, as a call to may_queue
++ * does not necessarily imply that a request actually will be
++ * queued. So just lookup a possibly existing queue, or return
++ * 'may queue' if that fails.
++ */
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (!bic)
++ return ELV_MQUEUE_MAY;
++
++ bfqq = bic_to_bfqq(bic, op_is_sync(op));
++ if (bfqq)
++ return __bfq_may_queue(bfqq);
++
++ return ELV_MQUEUE_MAY;
++}
++
++/*
++ * Queue lock held here.
++ */
++static void bfq_put_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ if (bfqq) {
++ const int rw = rq_data_dir(rq);
++
++ BUG_ON(!bfqq->allocated[rw]);
++ bfqq->allocated[rw]--;
++
++ rq->elv.priv[0] = NULL;
++ rq->elv.priv[1] = NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
++ bfqq, bfqq->ref);
++ bfq_put_queue(bfqq);
++ }
++}
++
++/*
++ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
++ * was the last process referring to that bfqq.
++ */
++static struct bfq_queue *
++bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
++
++ put_io_context(bic->icq.ioc);
++
++ if (bfqq_process_refs(bfqq) == 1) {
++ bfqq->pid = current->pid;
++ bfq_clear_bfqq_coop(bfqq);
++ bfq_clear_bfqq_split_coop(bfqq);
++ return bfqq;
++ }
++
++ bic_set_bfqq(bic, NULL, 1);
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq);
++ return NULL;
++}
++
++/*
++ * Allocate bfq data structures associated with this request.
++ */
++static int bfq_set_request(struct request_queue *q, struct request *rq,
++ struct bio *bio, gfp_t gfp_mask)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
++ const int rw = rq_data_dir(rq);
++ const int is_sync = rq_is_sync(rq);
++ struct bfq_queue *bfqq;
++ unsigned long flags;
++ bool bfqq_already_existing = false, split = false;
++
++ spin_lock_irqsave(q->queue_lock, flags);
++
++ if (!bic)
++ goto queue_fail;
++
++ bfq_check_ioprio_change(bic, bio);
++
++ bfq_bic_update_cgroup(bic, bio);
++
++new_queue:
++ bfqq = bic_to_bfqq(bic, is_sync);
++ if (!bfqq || bfqq == &bfqd->oom_bfqq) {
++ if (bfqq)
++ bfq_put_queue(bfqq);
++ bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
++ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
++
++ bic_set_bfqq(bic, bfqq, is_sync);
++ if (split && is_sync) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_request: was_in_list %d "
++ "was_in_large_burst %d "
++ "large burst in progress %d",
++ bic->was_in_burst_list,
++ bic->saved_in_large_burst,
++ bfqd->large_burst);
++
++ if ((bic->was_in_burst_list && bfqd->large_burst) ||
++ bic->saved_in_large_burst) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_request: marking in "
++ "large burst");
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ } else {
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_request: clearing in "
++ "large burst");
++ bfq_clear_bfqq_in_large_burst(bfqq);
++ if (bic->was_in_burst_list)
++ hlist_add_head(&bfqq->burst_list_node,
++ &bfqd->burst_list);
++ }
++ bfqq->split_time = jiffies;
++ }
++ } else {
++ /* If the queue was seeky for too long, break it apart. */
++ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
++ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
++
++ /* Update bic before losing reference to bfqq */
++ if (bfq_bfqq_in_large_burst(bfqq))
++ bic->saved_in_large_burst = true;
++
++ bfqq = bfq_split_bfqq(bic, bfqq);
++ split = true;
++ if (!bfqq)
++ goto new_queue;
++ else
++ bfqq_already_existing = true;
++ }
++ }
++
++ bfqq->allocated[rw]++;
++ bfqq->ref++;
++ bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref);
++
++ rq->elv.priv[0] = bic;
++ rq->elv.priv[1] = bfqq;
++
++ /*
++ * If a bfq_queue has only one process reference, it is owned
++ * by only one bfq_io_cq: we can set the bic field of the
++ * bfq_queue to the address of that structure. Also, if the
++ * queue has just been split, mark a flag so that the
++ * information is available to the other scheduler hooks.
++ */
++ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
++ bfqq->bic = bic;
++ if (split) {
++ /*
++ * If the queue has just been split from a shared
++ * queue, restore the idle window and the possible
++ * weight raising period.
++ */
++ bfq_bfqq_resume_state(bfqq, bfqd, bic,
++ bfqq_already_existing);
++ }
++ }
++
++ if (unlikely(bfq_bfqq_just_created(bfqq)))
++ bfq_handle_burst(bfqd, bfqq);
++
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 0;
++
++queue_fail:
++ bfq_schedule_dispatch(bfqd);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 1;
++}
++
++static void bfq_kick_queue(struct work_struct *work)
++{
++ struct bfq_data *bfqd =
++ container_of(work, struct bfq_data, unplug_work);
++ struct request_queue *q = bfqd->queue;
++
++ spin_lock_irq(q->queue_lock);
++ __blk_run_queue(q);
++ spin_unlock_irq(q->queue_lock);
++}
++
++/*
++ * Handler of the expiration of the timer running if the in-service queue
++ * is idling inside its time slice.
++ */
++static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
++{
++ struct bfq_data *bfqd = container_of(timer, struct bfq_data,
++ idle_slice_timer);
++ struct bfq_queue *bfqq;
++ unsigned long flags;
++ enum bfqq_expiration reason;
++
++ spin_lock_irqsave(bfqd->queue->queue_lock, flags);
++
++ bfqq = bfqd->in_service_queue;
++ /*
++ * Theoretical race here: the in-service queue can be NULL or
++ * different from the queue that was idling if the timer handler
++ * spins on the queue_lock and a new request arrives for the
++ * current queue and there is a full dispatch cycle that changes
++ * the in-service queue. This can hardly happen, but in the worst
++ * case we just expire a queue too early.
++ */
++ if (bfqq) {
++ bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
++ bfq_clear_bfqq_wait_request(bfqq);
++
++ if (bfq_bfqq_budget_timeout(bfqq))
++ /*
++ * Also here the queue can be safely expired
++ * for budget timeout without wasting
++ * guarantees
++ */
++ reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
++ /*
++ * The queue may not be empty upon timer expiration,
++ * because we may not disable the timer when the
++ * first request of the in-service queue arrives
++ * during disk idling.
++ */
++ reason = BFQ_BFQQ_TOO_IDLE;
++ else
++ goto schedule_dispatch;
++
++ bfq_bfqq_expire(bfqd, bfqq, true, reason);
++ }
++
++schedule_dispatch:
++ bfq_schedule_dispatch(bfqd);
++
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
++ return HRTIMER_NORESTART;
++}
++
++static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
++{
++ hrtimer_cancel(&bfqd->idle_slice_timer);
++ cancel_work_sync(&bfqd->unplug_work);
++}
++
++static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
++ struct bfq_queue **bfqq_ptr)
++{
++ struct bfq_group *root_group = bfqd->root_group;
++ struct bfq_queue *bfqq = *bfqq_ptr;
++
++ bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++ if (bfqq) {
++ bfq_bfqq_move(bfqd, bfqq, root_group);
++ bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++ bfqq, bfqq->ref);
++ bfq_put_queue(bfqq);
++ *bfqq_ptr = NULL;
++ }
++}
++
++/*
++ * Release all the bfqg references to its async queues. If we are
++ * deallocating the group these queues may still contain requests, so
++ * we reparent them to the root cgroup (i.e., the only one that will
++ * exist for sure until all the requests on a device are gone).
++ */
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
++
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
++}
++
++static void bfq_exit_queue(struct elevator_queue *e)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ struct request_queue *q = bfqd->queue;
++ struct bfq_queue *bfqq, *n;
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ spin_lock_irq(q->queue_lock);
++
++ BUG_ON(bfqd->in_service_queue);
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
++ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
++
++ spin_unlock_irq(q->queue_lock);
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
++#else
++ bfq_put_async_queues(bfqd, bfqd->root_group);
++ kfree(bfqd->root_group);
++#endif
++
++ kfree(bfqd);
++}
++
++static void bfq_init_root_group(struct bfq_group *root_group,
++ struct bfq_data *bfqd)
++{
++ int i;
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ root_group->entity.parent = NULL;
++ root_group->my_entity = NULL;
++ root_group->bfqd = bfqd;
++#endif
++ root_group->rq_pos_tree = RB_ROOT;
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++ root_group->sched_data.bfq_class_idle_last_service = jiffies;
++}
++
++static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
++{
++ struct bfq_data *bfqd;
++ struct elevator_queue *eq;
++
++ eq = elevator_alloc(q, e);
++ if (!eq)
++ return -ENOMEM;
++
++ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
++ if (!bfqd) {
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++ }
++ eq->elevator_data = bfqd;
++
++ /*
++ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
++ * Grab a permanent reference to it, so that the normal code flow
++ * will not attempt to free it.
++ */
++ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
++ bfqd->oom_bfqq.ref++;
++ bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
++ bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
++ bfqd->oom_bfqq.entity.new_weight =
++ bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
++
++ /* oom_bfqq does not participate to bursts */
++ bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
++ /*
++ * Trigger weight initialization, according to ioprio, at the
++ * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
++ * class won't be changed any more.
++ */
++ bfqd->oom_bfqq.entity.prio_changed = 1;
++
++ bfqd->queue = q;
++
++ spin_lock_irq(q->queue_lock);
++ q->elevator = eq;
++ spin_unlock_irq(q->queue_lock);
++
++ bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
++ if (!bfqd->root_group)
++ goto out_free;
++ bfq_init_root_group(bfqd->root_group, bfqd);
++ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
++
++ hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
++ HRTIMER_MODE_REL);
++ bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
++
++ bfqd->queue_weights_tree = RB_ROOT;
++ bfqd->group_weights_tree = RB_ROOT;
++
++ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
++
++ INIT_LIST_HEAD(&bfqd->active_list);
++ INIT_LIST_HEAD(&bfqd->idle_list);
++ INIT_HLIST_HEAD(&bfqd->burst_list);
++
++ bfqd->hw_tag = -1;
++
++ bfqd->bfq_max_budget = bfq_default_max_budget;
++
++ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
++ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
++ bfqd->bfq_back_max = bfq_back_max;
++ bfqd->bfq_back_penalty = bfq_back_penalty;
++ bfqd->bfq_slice_idle = bfq_slice_idle;
++ bfqd->bfq_timeout = bfq_timeout;
++
++ bfqd->bfq_requests_within_timer = 120;
++
++ bfqd->bfq_large_burst_thresh = 8;
++ bfqd->bfq_burst_interval = msecs_to_jiffies(180);
++
++ bfqd->low_latency = true;
++
++ /*
++ * Trade-off between responsiveness and fairness.
++ */
++ bfqd->bfq_wr_coeff = 30;
++ bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
++ bfqd->bfq_wr_max_time = 0;
++ bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
++ bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
++ bfqd->bfq_wr_max_softrt_rate = 7000; /*
++ * Approximate rate required
++ * to playback or record a
++ * high-definition compressed
++ * video.
++ */
++ bfqd->wr_busy_queues = 0;
++
++ /*
++ * Begin by assuming, optimistically, that the device is a
++ * high-speed one, and that its peak rate is equal to 2/3 of
++ * the highest reference rate.
++ */
++ bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
++ T_fast[blk_queue_nonrot(bfqd->queue)];
++ bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
++ bfqd->device_speed = BFQ_BFQD_FAST;
++
++ return 0;
++
++out_free:
++ kfree(bfqd);
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++}
++
++static void bfq_slab_kill(void)
++{
++ kmem_cache_destroy(bfq_pool);
++}
++
++static int __init bfq_slab_setup(void)
++{
++ bfq_pool = KMEM_CACHE(bfq_queue, 0);
++ if (!bfq_pool)
++ return -ENOMEM;
++ return 0;
++}
++
++static ssize_t bfq_var_show(unsigned int var, char *page)
++{
++ return sprintf(page, "%u\n", var);
++}
++
++static ssize_t bfq_var_store(unsigned long *var, const char *page,
++ size_t count)
++{
++ unsigned long new_val;
++ int ret = kstrtoul(page, 10, &new_val);
++
++ if (ret == 0)
++ *var = new_val;
++
++ return count;
++}
++
++static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++
++ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
++ jiffies_to_msecs(bfqd->bfq_wr_max_time) :
++ jiffies_to_msecs(bfq_wr_duration(bfqd)));
++}
++
++static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_queue *bfqq;
++ struct bfq_data *bfqd = e->elevator_data;
++ ssize_t num_char = 0;
++
++ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
++ bfqd->queued);
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ num_char += sprintf(page + num_char, "Active:\n");
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, nr_queued %d %d, ",
++ bfqq->pid,
++ bfqq->entity.weight,
++ bfqq->queued[0],
++ bfqq->queued[1]);
++ num_char += sprintf(page + num_char,
++ "dur %d/%u\n",
++ jiffies_to_msecs(
++ jiffies -
++ bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ num_char += sprintf(page + num_char, "Idle:\n");
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, dur %d/%u\n",
++ bfqq->pid,
++ bfqq->entity.weight,
++ jiffies_to_msecs(jiffies -
++ bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++
++ return num_char;
++}
++
++#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
++static ssize_t __FUNC(struct elevator_queue *e, char *page) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ u64 __data = __VAR; \
++ if (__CONV == 1) \
++ __data = jiffies_to_msecs(__data); \
++ else if (__CONV == 2) \
++ __data = div_u64(__data, NSEC_PER_MSEC); \
++ return bfq_var_show(__data, (page)); \
++}
++SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
++SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
++SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
++SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
++SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
++SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
++SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
++SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
++SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
++SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0);
++SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1);
++SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1);
++SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async,
++ 1);
++SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0);
++#undef SHOW_FUNCTION
++
++#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
++static ssize_t __FUNC(struct elevator_queue *e, char *page) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ u64 __data = __VAR; \
++ __data = div_u64(__data, NSEC_PER_USEC); \
++ return bfq_var_show(__data, (page)); \
++}
++USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
++#undef USEC_SHOW_FUNCTION
++
++#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
++static ssize_t \
++__FUNC(struct elevator_queue *e, const char *page, size_t count) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned long uninitialized_var(__data); \
++ int ret = bfq_var_store(&__data, (page), count); \
++ if (__data < (MIN)) \
++ __data = (MIN); \
++ else if (__data > (MAX)) \
++ __data = (MAX); \
++ if (__CONV == 1) \
++ *(__PTR) = msecs_to_jiffies(__data); \
++ else if (__CONV == 2) \
++ *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
++ else \
++ *(__PTR) = __data; \
++ return ret; \
++}
++STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
++ INT_MAX, 2);
++STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
++ INT_MAX, 2);
++STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
++STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
++ INT_MAX, 0);
++STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
++STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX,
++ 1);
++STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_min_inter_arr_async_store,
++ &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0,
++ INT_MAX, 0);
++#undef STORE_FUNCTION
++
++#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
++static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned long uninitialized_var(__data); \
++ int ret = bfq_var_store(&__data, (page), count); \
++ if (__data < (MIN)) \
++ __data = (MIN); \
++ else if (__data > (MAX)) \
++ __data = (MAX); \
++ *(__PTR) = (u64)__data * NSEC_PER_USEC; \
++ return ret; \
++}
++USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
++ UINT_MAX);
++#undef USEC_STORE_FUNCTION
++
++/* do nothing for the moment */
++static ssize_t bfq_weights_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ return count;
++}
++
++static ssize_t bfq_max_budget_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data == 0)
++ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
++ else {
++ if (__data > INT_MAX)
++ __data = INT_MAX;
++ bfqd->bfq_max_budget = __data;
++ }
++
++ bfqd->bfq_user_max_budget = __data;
++
++ return ret;
++}
++
++/*
++ * Leaving this name to preserve name compatibility with cfq
++ * parameters, but this timeout is used for both sync and async.
++ */
++static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data < 1)
++ __data = 1;
++ else if (__data > INT_MAX)
++ __data = INT_MAX;
++
++ bfqd->bfq_timeout = msecs_to_jiffies(__data);
++ if (bfqd->bfq_user_max_budget == 0)
++ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
++
++ return ret;
++}
++
++static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data > 1)
++ __data = 1;
++ if (!bfqd->strict_guarantees && __data == 1
++ && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
++ bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
++
++ bfqd->strict_guarantees = __data;
++
++ return ret;
++}
++
++static ssize_t bfq_low_latency_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data > 1)
++ __data = 1;
++ if (__data == 0 && bfqd->low_latency != 0)
++ bfq_end_wr(bfqd);
++ bfqd->low_latency = __data;
++
++ return ret;
++}
++
++#define BFQ_ATTR(name) \
++ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
++
++static struct elv_fs_entry bfq_attrs[] = {
++ BFQ_ATTR(fifo_expire_sync),
++ BFQ_ATTR(fifo_expire_async),
++ BFQ_ATTR(back_seek_max),
++ BFQ_ATTR(back_seek_penalty),
++ BFQ_ATTR(slice_idle),
++ BFQ_ATTR(slice_idle_us),
++ BFQ_ATTR(max_budget),
++ BFQ_ATTR(timeout_sync),
++ BFQ_ATTR(strict_guarantees),
++ BFQ_ATTR(low_latency),
++ BFQ_ATTR(wr_coeff),
++ BFQ_ATTR(wr_max_time),
++ BFQ_ATTR(wr_rt_max_time),
++ BFQ_ATTR(wr_min_idle_time),
++ BFQ_ATTR(wr_min_inter_arr_async),
++ BFQ_ATTR(wr_max_softrt_rate),
++ BFQ_ATTR(weights),
++ __ATTR_NULL
++};
++
++static struct elevator_type iosched_bfq = {
++ .ops.sq = {
++ .elevator_merge_fn = bfq_merge,
++ .elevator_merged_fn = bfq_merged_request,
++ .elevator_merge_req_fn = bfq_merged_requests,
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ .elevator_bio_merged_fn = bfq_bio_merged,
++#endif
++ .elevator_allow_bio_merge_fn = bfq_allow_bio_merge,
++ .elevator_allow_rq_merge_fn = bfq_allow_rq_merge,
++ .elevator_dispatch_fn = bfq_dispatch_requests,
++ .elevator_add_req_fn = bfq_insert_request,
++ .elevator_activate_req_fn = bfq_activate_request,
++ .elevator_deactivate_req_fn = bfq_deactivate_request,
++ .elevator_completed_req_fn = bfq_completed_request,
++ .elevator_former_req_fn = elv_rb_former_request,
++ .elevator_latter_req_fn = elv_rb_latter_request,
++ .elevator_init_icq_fn = bfq_init_icq,
++ .elevator_exit_icq_fn = bfq_exit_icq,
++ .elevator_set_req_fn = bfq_set_request,
++ .elevator_put_req_fn = bfq_put_request,
++ .elevator_may_queue_fn = bfq_may_queue,
++ .elevator_init_fn = bfq_init_queue,
++ .elevator_exit_fn = bfq_exit_queue,
++ },
++ .icq_size = sizeof(struct bfq_io_cq),
++ .icq_align = __alignof__(struct bfq_io_cq),
++ .elevator_attrs = bfq_attrs,
++ .elevator_name = "bfq-sq",
++ .elevator_owner = THIS_MODULE,
++};
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static struct blkcg_policy blkcg_policy_bfq = {
++ .dfl_cftypes = bfq_blkg_files,
++ .legacy_cftypes = bfq_blkcg_legacy_files,
++
++ .cpd_alloc_fn = bfq_cpd_alloc,
++ .cpd_init_fn = bfq_cpd_init,
++ .cpd_bind_fn = bfq_cpd_init,
++ .cpd_free_fn = bfq_cpd_free,
++
++ .pd_alloc_fn = bfq_pd_alloc,
++ .pd_init_fn = bfq_pd_init,
++ .pd_offline_fn = bfq_pd_offline,
++ .pd_free_fn = bfq_pd_free,
++ .pd_reset_stats_fn = bfq_pd_reset_stats,
++};
++#endif
++
++static int __init bfq_init(void)
++{
++ int ret;
++ char msg[60] = "BFQ I/O-scheduler: v8r12";
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ ret = blkcg_policy_register(&blkcg_policy_bfq);
++ if (ret)
++ return ret;
++#endif
++
++ ret = -ENOMEM;
++ if (bfq_slab_setup())
++ goto err_pol_unreg;
++
++ /*
++ * Times to load large popular applications for the typical
++ * systems installed on the reference devices (see the
++ * comments before the definitions of the next two
++ * arrays). Actually, we use slightly slower values, as the
++ * estimated peak rate tends to be smaller than the actual
++ * peak rate. The reason for this last fact is that estimates
++ * are computed over much shorter time intervals than the long
++ * intervals typically used for benchmarking. Why? First, to
++ * adapt more quickly to variations. Second, because an I/O
++ * scheduler cannot rely on a peak-rate-evaluation workload to
++ * be run for a long time.
++ */
++ T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */
++ T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */
++ T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */
++ T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */
++
++ /*
++ * Thresholds that determine the switch between speed classes
++ * (see the comments before the definition of the array
++ * device_speed_thresh). These thresholds are biased towards
++ * transitions to the fast class. This is safer than the
++ * opposite bias. In fact, a wrong transition to the slow
++ * class results in short weight-raising periods, because the
++ * speed of the device then tends to be higher that the
++ * reference peak rate. On the opposite end, a wrong
++ * transition to the fast class tends to increase
++ * weight-raising periods, because of the opposite reason.
++ */
++ device_speed_thresh[0] = (4 * R_slow[0]) / 3;
++ device_speed_thresh[1] = (4 * R_slow[1]) / 3;
++
++ ret = elv_register(&iosched_bfq);
++ if (ret)
++ goto err_pol_unreg;
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ strcat(msg, " (with cgroups support)");
++#endif
++ pr_info("%s", msg);
++
++ return 0;
++
++err_pol_unreg:
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ blkcg_policy_unregister(&blkcg_policy_bfq);
++#endif
++ return ret;
++}
++
++static void __exit bfq_exit(void)
++{
++ elv_unregister(&iosched_bfq);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ blkcg_policy_unregister(&blkcg_policy_bfq);
++#endif
++ bfq_slab_kill();
++}
++
++module_init(bfq_init);
++module_exit(bfq_exit);
++
++MODULE_AUTHOR("Arianna Avanzini, Fabio Checconi, Paolo Valente");
++MODULE_LICENSE("GPL");
+diff --git a/block/bfq.h b/block/bfq.h
+new file mode 100644
+index 000000000000..f5751ea59d98
+--- /dev/null
++++ b/block/bfq.h
+@@ -0,0 +1,948 @@
++/*
++ * BFQ v8r12 for 4.11.0: data structures and common functions prototypes.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
++ */
++
++#ifndef _BFQ_H
++#define _BFQ_H
++
++#include <linux/blktrace_api.h>
++#include <linux/hrtimer.h>
++#include <linux/blk-cgroup.h>
++
++#define BFQ_IOPRIO_CLASSES 3
++#define BFQ_CL_IDLE_TIMEOUT (HZ/5)
++
++#define BFQ_MIN_WEIGHT 1
++#define BFQ_MAX_WEIGHT 1000
++#define BFQ_WEIGHT_CONVERSION_COEFF 10
++
++#define BFQ_DEFAULT_QUEUE_IOPRIO 4
++
++#define BFQ_WEIGHT_LEGACY_DFL 100
++#define BFQ_DEFAULT_GRP_IOPRIO 0
++#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
++
++/*
++ * Soft real-time applications are extremely more latency sensitive
++ * than interactive ones. Over-raise the weight of the former to
++ * privilege them against the latter.
++ */
++#define BFQ_SOFTRT_WEIGHT_FACTOR 100
++
++struct bfq_entity;
++
++/**
++ * struct bfq_service_tree - per ioprio_class service tree.
++ *
++ * Each service tree represents a B-WF2Q+ scheduler on its own. Each
++ * ioprio_class has its own independent scheduler, and so its own
++ * bfq_service_tree. All the fields are protected by the queue lock
++ * of the containing bfqd.
++ */
++struct bfq_service_tree {
++ /* tree for active entities (i.e., those backlogged) */
++ struct rb_root active;
++ /* tree for idle entities (i.e., not backlogged, with V <= F_i)*/
++ struct rb_root idle;
++
++ struct bfq_entity *first_idle; /* idle entity with minimum F_i */
++ struct bfq_entity *last_idle; /* idle entity with maximum F_i */
++
++ u64 vtime; /* scheduler virtual time */
++ /* scheduler weight sum; active and idle entities contribute to it */
++ unsigned long wsum;
++};
++
++/**
++ * struct bfq_sched_data - multi-class scheduler.
++ *
++ * bfq_sched_data is the basic scheduler queue. It supports three
++ * ioprio_classes, and can be used either as a toplevel queue or as an
++ * intermediate queue on a hierarchical setup. @next_in_service
++ * points to the active entity of the sched_data service trees that
++ * will be scheduled next. It is used to reduce the number of steps
++ * needed for each hierarchical-schedule update.
++ *
++ * The supported ioprio_classes are the same as in CFQ, in descending
++ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
++ * Requests from higher priority queues are served before all the
++ * requests from lower priority queues; among requests of the same
++ * queue requests are served according to B-WF2Q+.
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_sched_data {
++ struct bfq_entity *in_service_entity; /* entity in service */
++ /* head-of-the-line entity in the scheduler (see comments above) */
++ struct bfq_entity *next_in_service;
++ /* array of service trees, one per ioprio_class */
++ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
++ /* last time CLASS_IDLE was served */
++ unsigned long bfq_class_idle_last_service;
++
++};
++
++/**
++ * struct bfq_weight_counter - counter of the number of all active entities
++ * with a given weight.
++ */
++struct bfq_weight_counter {
++ unsigned int weight; /* weight of the entities this counter refers to */
++ unsigned int num_active; /* nr of active entities with this weight */
++ /*
++ * Weights tree member (see bfq_data's @queue_weights_tree and
++ * @group_weights_tree)
++ */
++ struct rb_node weights_node;
++};
++
++/**
++ * struct bfq_entity - schedulable entity.
++ *
++ * A bfq_entity is used to represent either a bfq_queue (leaf node in the
++ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each
++ * entity belongs to the sched_data of the parent group in the cgroup
++ * hierarchy. Non-leaf entities have also their own sched_data, stored
++ * in @my_sched_data.
++ *
++ * Each entity stores independently its priority values; this would
++ * allow different weights on different devices, but this
++ * functionality is not exported to userspace by now. Priorities and
++ * weights are updated lazily, first storing the new values into the
++ * new_* fields, then setting the @prio_changed flag. As soon as
++ * there is a transition in the entity state that allows the priority
++ * update to take place the effective and the requested priority
++ * values are synchronized.
++ *
++ * Unless cgroups are used, the weight value is calculated from the
++ * ioprio to export the same interface as CFQ. When dealing with
++ * ``well-behaved'' queues (i.e., queues that do not spend too much
++ * time to consume their budget and have true sequential behavior, and
++ * when there are no external factors breaking anticipation) the
++ * relative weights at each level of the cgroups hierarchy should be
++ * guaranteed. All the fields are protected by the queue lock of the
++ * containing bfqd.
++ */
++struct bfq_entity {
++ struct rb_node rb_node; /* service_tree member */
++ /* pointer to the weight counter associated with this entity */
++ struct bfq_weight_counter *weight_counter;
++
++ /*
++ * Flag, true if the entity is on a tree (either the active or
++ * the idle one of its service_tree) or is in service.
++ */
++ bool on_st;
++
++ u64 finish; /* B-WF2Q+ finish timestamp (aka F_i) */
++ u64 start; /* B-WF2Q+ start timestamp (aka S_i) */
++
++ /* tree the entity is enqueued into; %NULL if not on a tree */
++ struct rb_root *tree;
++
++ /*
++ * minimum start time of the (active) subtree rooted at this
++ * entity; used for O(log N) lookups into active trees
++ */
++ u64 min_start;
++
++ /* amount of service received during the last service slot */
++ int service;
++
++ /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */
++ int budget;
++
++ unsigned int weight; /* weight of the queue */
++ unsigned int new_weight; /* next weight if a change is in progress */
++
++ /* original weight, used to implement weight boosting */
++ unsigned int orig_weight;
++
++ /* parent entity, for hierarchical scheduling */
++ struct bfq_entity *parent;
++
++ /*
++ * For non-leaf nodes in the hierarchy, the associated
++ * scheduler queue, %NULL on leaf nodes.
++ */
++ struct bfq_sched_data *my_sched_data;
++ /* the scheduler queue this entity belongs to */
++ struct bfq_sched_data *sched_data;
++
++ /* flag, set to request a weight, ioprio or ioprio_class change */
++ int prio_changed;
++};
++
++struct bfq_group;
++
++/**
++ * struct bfq_queue - leaf schedulable entity.
++ *
++ * A bfq_queue is a leaf request queue; it can be associated with an
++ * io_context or more, if it is async or shared between cooperating
++ * processes. @cgroup holds a reference to the cgroup, to be sure that it
++ * does not disappear while a bfqq still references it (mostly to avoid
++ * races between request issuing and task migration followed by cgroup
++ * destruction).
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_queue {
++ /* reference counter */
++ int ref;
++ /* parent bfq_data */
++ struct bfq_data *bfqd;
++
++ /* current ioprio and ioprio class */
++ unsigned short ioprio, ioprio_class;
++ /* next ioprio and ioprio class if a change is in progress */
++ unsigned short new_ioprio, new_ioprio_class;
++
++ /*
++ * Shared bfq_queue if queue is cooperating with one or more
++ * other queues.
++ */
++ struct bfq_queue *new_bfqq;
++ /* request-position tree member (see bfq_group's @rq_pos_tree) */
++ struct rb_node pos_node;
++ /* request-position tree root (see bfq_group's @rq_pos_tree) */
++ struct rb_root *pos_root;
++
++ /* sorted list of pending requests */
++ struct rb_root sort_list;
++ /* if fifo isn't expired, next request to serve */
++ struct request *next_rq;
++ /* number of sync and async requests queued */
++ int queued[2];
++ /* number of sync and async requests currently allocated */
++ int allocated[2];
++ /* number of pending metadata requests */
++ int meta_pending;
++ /* fifo list of requests in sort_list */
++ struct list_head fifo;
++
++ /* entity representing this queue in the scheduler */
++ struct bfq_entity entity;
++
++ /* maximum budget allowed from the feedback mechanism */
++ int max_budget;
++ /* budget expiration (in jiffies) */
++ unsigned long budget_timeout;
++
++ /* number of requests on the dispatch list or inside driver */
++ int dispatched;
++
++ unsigned int flags; /* status flags.*/
++
++ /* node for active/idle bfqq list inside parent bfqd */
++ struct list_head bfqq_list;
++
++ /* bit vector: a 1 for each seeky requests in history */
++ u32 seek_history;
++
++ /* node for the device's burst list */
++ struct hlist_node burst_list_node;
++
++ /* position of the last request enqueued */
++ sector_t last_request_pos;
++
++ /* Number of consecutive pairs of request completion and
++ * arrival, such that the queue becomes idle after the
++ * completion, but the next request arrives within an idle
++ * time slice; used only if the queue's IO_bound flag has been
++ * cleared.
++ */
++ unsigned int requests_within_timer;
++
++ /* pid of the process owning the queue, used for logging purposes */
++ pid_t pid;
++
++ /*
++ * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL
++ * if the queue is shared.
++ */
++ struct bfq_io_cq *bic;
++
++ /* current maximum weight-raising time for this queue */
++ unsigned long wr_cur_max_time;
++ /*
++ * Minimum time instant such that, only if a new request is
++ * enqueued after this time instant in an idle @bfq_queue with
++ * no outstanding requests, then the task associated with the
++ * queue it is deemed as soft real-time (see the comments on
++ * the function bfq_bfqq_softrt_next_start())
++ */
++ unsigned long soft_rt_next_start;
++ /*
++ * Start time of the current weight-raising period if
++ * the @bfq-queue is being weight-raised, otherwise
++ * finish time of the last weight-raising period.
++ */
++ unsigned long last_wr_start_finish;
++ /* factor by which the weight of this queue is multiplied */
++ unsigned int wr_coeff;
++ /*
++ * Time of the last transition of the @bfq_queue from idle to
++ * backlogged.
++ */
++ unsigned long last_idle_bklogged;
++ /*
++ * Cumulative service received from the @bfq_queue since the
++ * last transition from idle to backlogged.
++ */
++ unsigned long service_from_backlogged;
++ /*
++ * Value of wr start time when switching to soft rt
++ */
++ unsigned long wr_start_at_switch_to_srt;
++
++ unsigned long split_time; /* time of last split */
++};
++
++/**
++ * struct bfq_ttime - per process thinktime stats.
++ */
++struct bfq_ttime {
++ u64 last_end_request; /* completion time of last request */
++
++ u64 ttime_total; /* total process thinktime */
++ unsigned long ttime_samples; /* number of thinktime samples */
++ u64 ttime_mean; /* average process thinktime */
++
++};
++
++/**
++ * struct bfq_io_cq - per (request_queue, io_context) structure.
++ */
++struct bfq_io_cq {
++ /* associated io_cq structure */
++ struct io_cq icq; /* must be the first member */
++ /* array of two process queues, the sync and the async */
++ struct bfq_queue *bfqq[2];
++ /* associated @bfq_ttime struct */
++ struct bfq_ttime ttime;
++ /* per (request_queue, blkcg) ioprio */
++ int ioprio;
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ uint64_t blkcg_serial_nr; /* the current blkcg serial */
++#endif
++
++ /*
++ * Snapshot of the idle window before merging; taken to
++ * remember this value while the queue is merged, so as to be
++ * able to restore it in case of split.
++ */
++ bool saved_idle_window;
++ /*
++ * Same purpose as the previous two fields for the I/O bound
++ * classification of a queue.
++ */
++ bool saved_IO_bound;
++
++ /*
++ * Same purpose as the previous fields for the value of the
++ * field keeping the queue's belonging to a large burst
++ */
++ bool saved_in_large_burst;
++ /*
++ * True if the queue belonged to a burst list before its merge
++ * with another cooperating queue.
++ */
++ bool was_in_burst_list;
++
++ /*
++ * Similar to previous fields: save wr information.
++ */
++ unsigned long saved_wr_coeff;
++ unsigned long saved_last_wr_start_finish;
++ unsigned long saved_wr_start_at_switch_to_srt;
++ unsigned int saved_wr_cur_max_time;
++};
++
++enum bfq_device_speed {
++ BFQ_BFQD_FAST,
++ BFQ_BFQD_SLOW,
++};
++
++/**
++ * struct bfq_data - per-device data structure.
++ *
++ * All the fields are protected by the @queue lock.
++ */
++struct bfq_data {
++ /* request queue for the device */
++ struct request_queue *queue;
++
++ /* root bfq_group for the device */
++ struct bfq_group *root_group;
++
++ /*
++ * rbtree of weight counters of @bfq_queues, sorted by
++ * weight. Used to keep track of whether all @bfq_queues have
++ * the same weight. The tree contains one counter for each
++ * distinct weight associated to some active and not
++ * weight-raised @bfq_queue (see the comments to the functions
++ * bfq_weights_tree_[add|remove] for further details).
++ */
++ struct rb_root queue_weights_tree;
++ /*
++ * rbtree of non-queue @bfq_entity weight counters, sorted by
++ * weight. Used to keep track of whether all @bfq_groups have
++ * the same weight. The tree contains one counter for each
++ * distinct weight associated to some active @bfq_group (see
++ * the comments to the functions bfq_weights_tree_[add|remove]
++ * for further details).
++ */
++ struct rb_root group_weights_tree;
++
++ /*
++ * Number of bfq_queues containing requests (including the
++ * queue in service, even if it is idling).
++ */
++ int busy_queues;
++ /* number of weight-raised busy @bfq_queues */
++ int wr_busy_queues;
++ /* number of queued requests */
++ int queued;
++ /* number of requests dispatched and waiting for completion */
++ int rq_in_driver;
++
++ /*
++ * Maximum number of requests in driver in the last
++ * @hw_tag_samples completed requests.
++ */
++ int max_rq_in_driver;
++ /* number of samples used to calculate hw_tag */
++ int hw_tag_samples;
++ /* flag set to one if the driver is showing a queueing behavior */
++ int hw_tag;
++
++ /* number of budgets assigned */
++ int budgets_assigned;
++
++ /*
++ * Timer set when idling (waiting) for the next request from
++ * the queue in service.
++ */
++ struct hrtimer idle_slice_timer;
++ /* delayed work to restart dispatching on the request queue */
++ struct work_struct unplug_work;
++
++ /* bfq_queue in service */
++ struct bfq_queue *in_service_queue;
++ /* bfq_io_cq (bic) associated with the @in_service_queue */
++ struct bfq_io_cq *in_service_bic;
++
++ /* on-disk position of the last served request */
++ sector_t last_position;
++
++ /* time of last request completion (ns) */
++ u64 last_completion;
++
++ /* time of first rq dispatch in current observation interval (ns) */
++ u64 first_dispatch;
++ /* time of last rq dispatch in current observation interval (ns) */
++ u64 last_dispatch;
++
++ /* beginning of the last budget */
++ ktime_t last_budget_start;
++ /* beginning of the last idle slice */
++ ktime_t last_idling_start;
++
++ /* number of samples in current observation interval */
++ int peak_rate_samples;
++ /* num of samples of seq dispatches in current observation interval */
++ u32 sequential_samples;
++ /* total num of sectors transferred in current observation interval */
++ u64 tot_sectors_dispatched;
++ /* max rq size seen during current observation interval (sectors) */
++ u32 last_rq_max_size;
++ /* time elapsed from first dispatch in current observ. interval (us) */
++ u64 delta_from_first;
++ /* current estimate of device peak rate */
++ u32 peak_rate;
++
++ /* maximum budget allotted to a bfq_queue before rescheduling */
++ int bfq_max_budget;
++
++ /* list of all the bfq_queues active on the device */
++ struct list_head active_list;
++ /* list of all the bfq_queues idle on the device */
++ struct list_head idle_list;
++
++ /*
++ * Timeout for async/sync requests; when it fires, requests
++ * are served in fifo order.
++ */
++ u64 bfq_fifo_expire[2];
++ /* weight of backward seeks wrt forward ones */
++ unsigned int bfq_back_penalty;
++ /* maximum allowed backward seek */
++ unsigned int bfq_back_max;
++ /* maximum idling time */
++ u32 bfq_slice_idle;
++
++ /* user-configured max budget value (0 for auto-tuning) */
++ int bfq_user_max_budget;
++ /*
++ * Timeout for bfq_queues to consume their budget; used to
++ * prevent seeky queues from imposing long latencies to
++ * sequential or quasi-sequential ones (this also implies that
++ * seeky queues cannot receive guarantees in the service
++ * domain; after a timeout they are charged for the time they
++ * have been in service, to preserve fairness among them, but
++ * without service-domain guarantees).
++ */
++ unsigned int bfq_timeout;
++
++ /*
++ * Number of consecutive requests that must be issued within
++ * the idle time slice to set again idling to a queue which
++ * was marked as non-I/O-bound (see the definition of the
++ * IO_bound flag for further details).
++ */
++ unsigned int bfq_requests_within_timer;
++
++ /*
++ * Force device idling whenever needed to provide accurate
++ * service guarantees, without caring about throughput
++ * issues. CAVEAT: this may even increase latencies, in case
++ * of useless idling for processes that did stop doing I/O.
++ */
++ bool strict_guarantees;
++
++ /*
++ * Last time at which a queue entered the current burst of
++ * queues being activated shortly after each other; for more
++ * details about this and the following parameters related to
++ * a burst of activations, see the comments on the function
++ * bfq_handle_burst.
++ */
++ unsigned long last_ins_in_burst;
++ /*
++ * Reference time interval used to decide whether a queue has
++ * been activated shortly after @last_ins_in_burst.
++ */
++ unsigned long bfq_burst_interval;
++ /* number of queues in the current burst of queue activations */
++ int burst_size;
++
++ /* common parent entity for the queues in the burst */
++ struct bfq_entity *burst_parent_entity;
++ /* Maximum burst size above which the current queue-activation
++ * burst is deemed as 'large'.
++ */
++ unsigned long bfq_large_burst_thresh;
++ /* true if a large queue-activation burst is in progress */
++ bool large_burst;
++ /*
++ * Head of the burst list (as for the above fields, more
++ * details in the comments on the function bfq_handle_burst).
++ */
++ struct hlist_head burst_list;
++
++ /* if set to true, low-latency heuristics are enabled */
++ bool low_latency;
++ /*
++ * Maximum factor by which the weight of a weight-raised queue
++ * is multiplied.
++ */
++ unsigned int bfq_wr_coeff;
++ /* maximum duration of a weight-raising period (jiffies) */
++ unsigned int bfq_wr_max_time;
++
++ /* Maximum weight-raising duration for soft real-time processes */
++ unsigned int bfq_wr_rt_max_time;
++ /*
++ * Minimum idle period after which weight-raising may be
++ * reactivated for a queue (in jiffies).
++ */
++ unsigned int bfq_wr_min_idle_time;
++ /*
++ * Minimum period between request arrivals after which
++ * weight-raising may be reactivated for an already busy async
++ * queue (in jiffies).
++ */
++ unsigned long bfq_wr_min_inter_arr_async;
++
++ /* Max service-rate for a soft real-time queue, in sectors/sec */
++ unsigned int bfq_wr_max_softrt_rate;
++ /*
++ * Cached value of the product R*T, used for computing the
++ * maximum duration of weight raising automatically.
++ */
++ u64 RT_prod;
++ /* device-speed class for the low-latency heuristic */
++ enum bfq_device_speed device_speed;
++
++ /* fallback dummy bfqq for extreme OOM conditions */
++ struct bfq_queue oom_bfqq;
++};
++
++enum bfqq_state_flags {
++ BFQ_BFQQ_FLAG_just_created = 0, /* queue just allocated */
++ BFQ_BFQQ_FLAG_busy, /* has requests or is in service */
++ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */
++ BFQ_BFQQ_FLAG_non_blocking_wait_rq, /*
++ * waiting for a request
++ * without idling the device
++ */
++ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
++ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
++ BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */
++ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
++ BFQ_BFQQ_FLAG_IO_bound, /*
++ * bfqq has timed-out at least once
++ * having consumed at most 2/10 of
++ * its budget
++ */
++ BFQ_BFQQ_FLAG_in_large_burst, /*
++ * bfqq activated in a large burst,
++ * see comments to bfq_handle_burst.
++ */
++ BFQ_BFQQ_FLAG_softrt_update, /*
++ * may need softrt-next-start
++ * update
++ */
++ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
++ BFQ_BFQQ_FLAG_split_coop /* shared bfqq will be split */
++};
++
++#define BFQ_BFQQ_FNS(name) \
++static void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
++{ \
++ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \
++}
++
++BFQ_BFQQ_FNS(just_created);
++BFQ_BFQQ_FNS(busy);
++BFQ_BFQQ_FNS(wait_request);
++BFQ_BFQQ_FNS(non_blocking_wait_rq);
++BFQ_BFQQ_FNS(must_alloc);
++BFQ_BFQQ_FNS(fifo_expire);
++BFQ_BFQQ_FNS(idle_window);
++BFQ_BFQQ_FNS(sync);
++BFQ_BFQQ_FNS(IO_bound);
++BFQ_BFQQ_FNS(in_large_burst);
++BFQ_BFQQ_FNS(coop);
++BFQ_BFQQ_FNS(split_coop);
++BFQ_BFQQ_FNS(softrt_update);
++#undef BFQ_BFQQ_FNS
++
++/* Logging facilities. */
++#ifdef CONFIG_BFQ_REDIRECT_TO_CONSOLE
++
++static const char *checked_dev_name(const struct device *dev)
++{
++ static const char nodev[] = "nodev";
++
++ if (dev)
++ return dev_name(dev);
++
++ return nodev;
++}
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
++static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ assert_spin_locked((bfqd)->queue->queue_lock); \
++ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
++ pr_crit("%s bfq%d%c %s " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ (bfqq)->pid, \
++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ __pbuf, ##args); \
++} while (0)
++
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
++ pr_crit("%s %s " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ __pbuf, ##args); \
++} while (0)
++
++#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++ pr_crit("%s bfq%d%c " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ ##args)
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
++
++#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++
++#define bfq_log(bfqd, fmt, args...) \
++ pr_crit("%s bfq " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ ##args)
++
++#else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
++static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ assert_spin_locked((bfqd)->queue->queue_lock); \
++ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \
++ (bfqq)->pid, \
++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ __pbuf, ##args); \
++} while (0)
++
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
++ blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
++} while (0)
++
++#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ ##args)
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
++
++#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++
++#define bfq_log(bfqd, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++#endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
++
++/* Expiration reasons. */
++enum bfqq_expiration {
++ BFQ_BFQQ_TOO_IDLE = 0, /*
++ * queue has been idling for
++ * too long
++ */
++ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */
++ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */
++ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */
++ BFQ_BFQQ_PREEMPTED /* preemption in progress */
++};
++
++
++struct bfqg_stats {
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ /* number of ios merged */
++ struct blkg_rwstat merged;
++ /* total time spent on device in ns, may not be accurate w/ queueing */
++ struct blkg_rwstat service_time;
++ /* total time spent waiting in scheduler queue in ns */
++ struct blkg_rwstat wait_time;
++ /* number of IOs queued up */
++ struct blkg_rwstat queued;
++ /* total disk time and nr sectors dispatched by this group */
++ struct blkg_stat time;
++ /* sum of number of ios queued across all samples */
++ struct blkg_stat avg_queue_size_sum;
++ /* count of samples taken for average */
++ struct blkg_stat avg_queue_size_samples;
++ /* how many times this group has been removed from service tree */
++ struct blkg_stat dequeue;
++ /* total time spent waiting for it to be assigned a timeslice. */
++ struct blkg_stat group_wait_time;
++ /* time spent idling for this blkcg_gq */
++ struct blkg_stat idle_time;
++ /* total time with empty current active q with other requests queued */
++ struct blkg_stat empty_time;
++ /* fields after this shouldn't be cleared on stat reset */
++ uint64_t start_group_wait_time;
++ uint64_t start_idle_time;
++ uint64_t start_empty_time;
++ uint16_t flags;
++#endif
++};
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++/*
++ * struct bfq_group_data - per-blkcg storage for the blkio subsystem.
++ *
++ * @ps: @blkcg_policy_storage that this structure inherits
++ * @weight: weight of the bfq_group
++ */
++struct bfq_group_data {
++ /* must be the first member */
++ struct blkcg_policy_data pd;
++
++ unsigned int weight;
++};
++
++/**
++ * struct bfq_group - per (device, cgroup) data structure.
++ * @entity: schedulable entity to insert into the parent group sched_data.
++ * @sched_data: own sched_data, to contain child entities (they may be
++ * both bfq_queues and bfq_groups).
++ * @bfqd: the bfq_data for the device this group acts upon.
++ * @async_bfqq: array of async queues for all the tasks belonging to
++ * the group, one queue per ioprio value per ioprio_class,
++ * except for the idle class that has only one queue.
++ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
++ * @my_entity: pointer to @entity, %NULL for the toplevel group; used
++ * to avoid too many special cases during group creation/
++ * migration.
++ * @active_entities: number of active entities belonging to the group;
++ * unused for the root group. Used to know whether there
++ * are groups with more than one active @bfq_entity
++ * (see the comments to the function
++ * bfq_bfqq_may_idle()).
++ * @rq_pos_tree: rbtree sorted by next_request position, used when
++ * determining if two or more queues have interleaving
++ * requests (see bfq_find_close_cooperator()).
++ *
++ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
++ * there is a set of bfq_groups, each one collecting the lower-level
++ * entities belonging to the group that are acting on the same device.
++ *
++ * Locking works as follows:
++ * o @bfqd is protected by the queue lock, RCU is used to access it
++ * from the readers.
++ * o All the other fields are protected by the @bfqd queue lock.
++ */
++struct bfq_group {
++ /* must be the first member */
++ struct blkg_policy_data pd;
++
++ struct bfq_entity entity;
++ struct bfq_sched_data sched_data;
++
++ void *bfqd;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++
++ struct bfq_entity *my_entity;
++
++ int active_entities;
++
++ struct rb_root rq_pos_tree;
++
++ struct bfqg_stats stats;
++};
++
++#else
++struct bfq_group {
++ struct bfq_sched_data sched_data;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++
++ struct rb_root rq_pos_tree;
++};
++#endif
++
++static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
++
++static unsigned int bfq_class_idx(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ return bfqq ? bfqq->ioprio_class - 1 :
++ BFQ_DEFAULT_GRP_CLASS - 1;
++}
++
++static struct bfq_service_tree *
++bfq_entity_service_tree(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sched_data = entity->sched_data;
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ unsigned int idx = bfq_class_idx(entity);
++
++ BUG_ON(idx >= BFQ_IOPRIO_CLASSES);
++ BUG_ON(sched_data == NULL);
++
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "entity_service_tree %p %d",
++ sched_data->service_tree + idx, idx);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "entity_service_tree %p %d",
++ sched_data->service_tree + idx, idx);
++ }
++#endif
++ return sched_data->service_tree + idx;
++}
++
++static struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
++{
++ return bic->bfqq[is_sync];
++}
++
++static void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq,
++ bool is_sync)
++{
++ bic->bfqq[is_sync] = bfqq;
++}
++
++static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
++{
++ return bic->icq.q->elevator->elevator_data;
++}
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++
++static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *group_entity = bfqq->entity.parent;
++
++ if (!group_entity)
++ group_entity = &bfqq->bfqd->root_group->entity;
++
++ return container_of(group_entity, struct bfq_group, entity);
++}
++
++#else
++
++static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
++{
++ return bfqq->bfqd->root_group;
++}
++
++#endif
++
++static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio);
++static void bfq_put_queue(struct bfq_queue *bfqq);
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bio *bio, bool is_sync,
++ struct bfq_io_cq *bic);
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
++#endif
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
++
++#endif /* _BFQ_H */
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 8da66379f7ea..bf000c58644b 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -54,7 +54,7 @@ struct blk_stat_callback;
+ * Maximum number of blkcg policies allowed to be registered concurrently.
+ * Defined here to simplify include dependency.
+ */
+-#define BLKCG_MAX_POLS 3
++#define BLKCG_MAX_POLS 4
+
+ typedef void (rq_end_io_fn)(struct request *, blk_status_t);
+
+
+From 9916fed6c89c61a2b26053be04501784570bbec8 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 20 Jul 2017 10:46:39 +0200
+Subject: [PATCH 02/51] Add extra checks related to entity scheduling
+
+- extra checks related to ioprioi-class changes
+- specific check on st->idle in __bfq_requeue_entity
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-sched.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index ac8991bca9fa..5ddf9af4261e 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -812,6 +812,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
+ }
+ #endif
+
++ BUG_ON(entity->tree && update_class_too);
+ BUG_ON(old_st->wsum < entity->weight);
+ old_st->wsum -= entity->weight;
+
+@@ -883,8 +884,10 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
+
+ new_st->wsum += entity->weight;
+
+- if (new_st != old_st)
++ if (new_st != old_st) {
++ BUG_ON(!update_class_too);
+ entity->start = new_st->vtime;
++ }
+ }
+
+ return new_st;
+@@ -993,6 +996,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ * tree, then it is safe to invoke next function with the last
+ * parameter set (see the comments on the function).
+ */
++ BUG_ON(entity->tree);
+ st = __bfq_entity_update_weight_prio(st, entity, true);
+ bfq_calc_finish(entity, entity->budget);
+
+@@ -1113,9 +1117,11 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
+ * check for that.
+ */
+ bfq_idle_extract(st, entity);
++ BUG_ON(entity->tree);
+ entity->start = bfq_gt(min_vstart, entity->finish) ?
+ min_vstart : entity->finish;
+ } else {
++ BUG_ON(entity->tree);
+ /*
+ * The finish time of the entity may be invalid, and
+ * it is in the past for sure, otherwise the queue
+@@ -1203,6 +1209,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
+ */
+ bfq_calc_finish(entity, entity->service);
+ entity->start = entity->finish;
++ BUG_ON(entity->tree && entity->tree == &st->idle);
+ BUG_ON(entity->tree && entity->tree != &st->active);
+ /*
+ * In addition, if the entity had more than one child
+
+From 8f5b2c25dcbe31dda524e85b921b3aa1fe11d111 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 21 Jul 2017 12:08:57 +0200
+Subject: [PATCH 03/51] block, bfq: reset in_service_entity if it becomes idle
+
+BFQ implements hierarchical scheduling by representing each group of
+queues with a generic parent entity. For each parent entity, BFQ
+maintains an in_service_entity pointer: if one of the child entities
+happens to be in service, in_service_entity points to it. The
+resetting of these pointers happens only on queue expirations: when
+the in-service queue is expired, i.e., stops to be the queue in
+service, BFQ resets all in_service_entity pointers along the
+parent-entity path from this queue to the root entity.
+
+Functions handling the scheduling of entities assume, naturally, that
+in-service entities are active, i.e., have pending I/O requests (or,
+as a special case, even if they have no pending requests, they are
+expected to receive a new request very soon, with the scheduler idling
+the storage device while waiting for such an event). Unfortunately,
+the above resetting scheme of the in_service_entity pointers may cause
+this assumption to be violated. For example, the in-service queue may
+happen to remain without requests because of a request merge. In this
+case the queue does become idle, and all related data structures are
+updated accordingly. But in_service_entity still points to the queue
+in the parent entity. This inconsistency may even propagate to
+higher-level parent entities, if they happen to become idle as well,
+as a consequence of the leaf queue becoming idle. For this queue and
+parent entities, scheduling functions have an undefined behaviour,
+and, as reported, may easily lead to kernel crashes or hangs.
+
+This commit addresses this issue by simply resetting the
+in_service_entity field also when it is detected to point to an entity
+becoming idle (regardless of why the entity becomes idle).
+
+Reported-by: Laurentiu Nicola <lnicola@dend.ro>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Tested-by: Laurentiu Nicola <lnicola@dend.ro>
+---
+ block/bfq-sched.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 5ddf9af4261e..a07a06eb5c72 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -1336,8 +1336,10 @@ static bool __bfq_deactivate_entity(struct bfq_entity *entity,
+
+ BUG_ON(is_in_service && entity->tree && entity->tree != &st->active);
+
+- if (is_in_service)
++ if (is_in_service) {
+ bfq_calc_finish(entity, entity->service);
++ sd->in_service_entity = NULL;
++ }
+
+ if (entity->tree == &st->active)
+ bfq_active_extract(st, entity);
+
+From 600ea668e2d340c95724bcf981d88812d6900342 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 28 Jul 2017 21:09:51 +0200
+Subject: [PATCH 04/51] block, bfq: consider also in_service_entity to state
+ whether an entity is active
+
+Groups of BFQ queues are represented by generic entities in BFQ. When
+a queue belonging to a parent entity is deactivated, the parent entity
+may need to be deactivated too, in case the deactivated queue was the
+only active queue for the parent entity. This deactivation may need to
+be propagated upwards if the entity belongs, in its turn, to a further
+higher-level entity, and so on. In particular, the upward propagation
+of deactivation stops at the first parent entity that remains active
+even if one of its child entities has been deactivated.
+
+To decide whether the last non-deactivation condition holds for a
+parent entity, BFQ checks whether the field next_in_service is still
+not NULL for the parent entity, after the deactivation of one of its
+child entity. If it is not NULL, then there are certainly other active
+entities in the parent entity, and deactivations can stop.
+
+Unfortunately, this check misses a corner case: if in_service_entity
+is not NULL, then next_in_service may happen to be NULL, although the
+parent entity is evidently active. This happens if: 1) the entity
+pointed by in_service_entity is the only active entity in the parent
+entity, and 2) according to the definition of next_in_service, the
+in_service_entity cannot be considered as next_in_service. See the
+comments on the definition of next_in_service for details on this
+second point.
+
+Hitting the above corner case causes crashes.
+
+To address this issue, this commit:
+1) Extends the above check on only next_in_service to controlling both
+next_in_service and in_service_entity (if any of them is not NULL,
+then no further deactivation is performed)
+2) Improves the (important) comments on how next_in_service is defined
+and updated; in particular it fixes a few rather obscure paragraphs
+
+Reported-by: Eric Wheeler <bfq-sched@lists.ewheeler.net>
+Reported-by: Rick Yiu <rick_yiu@htc.com>
+Reported-by: Tom X Nguyen <tom81094@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Tested-by: Eric Wheeler <bfq-sched@lists.ewheeler.net>
+Tested-by: Rick Yiu <rick_yiu@htc.com>
+Tested-by: Laurentiu Nicola <lnicola@dend.ro>
+Tested-by: Tom X Nguyen <tom81094@gmail.com>
+---
+ block/bfq-sched.c | 140 ++++++++++++++++++++++++++++++------------------------
+ block/bfq.h | 23 +++++++--
+ 2 files changed, 95 insertions(+), 68 deletions(-)
+
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index a07a06eb5c72..5c0f9290a79c 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -196,21 +196,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
+
+ /*
+ * This function tells whether entity stops being a candidate for next
+- * service, according to the following logic.
++ * service, according to the restrictive definition of the field
++ * next_in_service. In particular, this function is invoked for an
++ * entity that is about to be set in service.
+ *
+- * This function is invoked for an entity that is about to be set in
+- * service. If such an entity is a queue, then the entity is no longer
+- * a candidate for next service (i.e, a candidate entity to serve
+- * after the in-service entity is expired). The function then returns
+- * true.
++ * If entity is a queue, then the entity is no longer a candidate for
++ * next service according to the that definition, because entity is
++ * about to become the in-service queue. This function then returns
++ * true if entity is a queue.
+ *
+- * In contrast, the entity could stil be a candidate for next service
+- * if it is not a queue, and has more than one child. In fact, even if
+- * one of its children is about to be set in service, other children
+- * may still be the next to serve. As a consequence, a non-queue
+- * entity is not a candidate for next-service only if it has only one
+- * child. And only if this condition holds, then the function returns
+- * true for a non-queue entity.
++ * In contrast, entity could still be a candidate for next service if
++ * it is not a queue, and has more than one active child. In fact,
++ * even if one of its children is about to be set in service, other
++ * active children may still be the next to serve, for the parent
++ * entity, even according to the above definition. As a consequence, a
++ * non-queue entity is not a candidate for next-service only if it has
++ * only one active child. And only if this condition holds, then this
++ * function returns true for a non-queue entity.
+ */
+ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
+ {
+@@ -223,6 +225,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
+
+ BUG_ON(bfqg == ((struct bfq_data *)(bfqg->bfqd))->root_group);
+ BUG_ON(bfqg->active_entities == 0);
++ /*
++ * The field active_entities does not always contain the
++ * actual number of active children entities: it happens to
++ * not account for the in-service entity in case the latter is
++ * removed from its active tree (which may get done after
++ * invoking the function bfq_no_longer_next_in_service in
++ * bfq_get_next_queue). Fortunately, here, i.e., while
++ * bfq_no_longer_next_in_service is not yet completed in
++ * bfq_get_next_queue, bfq_active_extract has not yet been
++ * invoked, and thus active_entities still coincides with the
++ * actual number of active entities.
++ */
+ if (bfqg->active_entities == 1)
+ return true;
+
+@@ -1089,7 +1103,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ * one of its children receives a new request.
+ *
+ * Basically, this function updates the timestamps of entity and
+- * inserts entity into its active tree, ater possible extracting it
++ * inserts entity into its active tree, ater possibly extracting it
+ * from its idle tree.
+ */
+ static void __bfq_activate_entity(struct bfq_entity *entity,
+@@ -1213,7 +1227,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
+ BUG_ON(entity->tree && entity->tree != &st->active);
+ /*
+ * In addition, if the entity had more than one child
+- * when set in service, then was not extracted from
++ * when set in service, then it was not extracted from
+ * the active tree. This implies that the position of
+ * the entity in the active tree may need to be
+ * changed now, because we have just updated the start
+@@ -1221,9 +1235,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
+ * time in a moment (the requeueing is then, more
+ * precisely, a repositioning in this case). To
+ * implement this repositioning, we: 1) dequeue the
+- * entity here, 2) update the finish time and
+- * requeue the entity according to the new
+- * timestamps below.
++ * entity here, 2) update the finish time and requeue
++ * the entity according to the new timestamps below.
+ */
+ if (entity->tree)
+ bfq_active_extract(st, entity);
+@@ -1270,9 +1283,9 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
+
+
+ /**
+- * bfq_activate_entity - activate or requeue an entity representing a bfq_queue,
+- * and activate, requeue or reposition all ancestors
+- * for which such an update becomes necessary.
++ * bfq_activate_requeue_entity - activate or requeue an entity representing a bfq_queue,
++ * and activate, requeue or reposition all ancestors
++ * for which such an update becomes necessary.
+ * @entity: the entity to activate.
+ * @non_blocking_wait_rq: true if this entity was waiting for a request
+ * @requeue: true if this is a requeue, which implies that bfqq is
+@@ -1308,9 +1321,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
+ * @ins_into_idle_tree: if false, the entity will not be put into the
+ * idle tree.
+ *
+- * Deactivates an entity, independently from its previous state. Must
++ * Deactivates an entity, independently of its previous state. Must
+ * be invoked only if entity is on a service tree. Extracts the entity
+- * from that tree, and if necessary and allowed, puts it on the idle
++ * from that tree, and if necessary and allowed, puts it into the idle
+ * tree.
+ */
+ static bool __bfq_deactivate_entity(struct bfq_entity *entity,
+@@ -1359,7 +1372,7 @@ static bool __bfq_deactivate_entity(struct bfq_entity *entity,
+ /**
+ * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
+ * @entity: the entity to deactivate.
+- * @ins_into_idle_tree: true if the entity can be put on the idle tree
++ * @ins_into_idle_tree: true if the entity can be put into the idle tree
+ */
+ static void bfq_deactivate_entity(struct bfq_entity *entity,
+ bool ins_into_idle_tree,
+@@ -1406,16 +1419,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
+ */
+ bfq_update_next_in_service(sd, NULL);
+
+- if (sd->next_in_service) {
++ if (sd->next_in_service || sd->in_service_entity) {
+ /*
+- * The parent entity is still backlogged,
+- * because next_in_service is not NULL. So, no
+- * further upwards deactivation must be
+- * performed. Yet, next_in_service has
+- * changed. Then the schedule does need to be
+- * updated upwards.
++ * The parent entity is still active, because
++ * either next_in_service or in_service_entity
++ * is not NULL. So, no further upwards
++ * deactivation must be performed. Yet,
++ * next_in_service has changed. Then the
++ * schedule does need to be updated upwards.
++ *
++ * NOTE If in_service_entity is not NULL, then
++ * next_in_service may happen to be NULL,
++ * although the parent entity is evidently
++ * active. This happens if 1) the entity
++ * pointed by in_service_entity is the only
++ * active entity in the parent entity, and 2)
++ * according to the definition of
++ * next_in_service, the in_service_entity
++ * cannot be considered as
++ * next_in_service. See the comments on the
++ * definition of next_in_service for details.
+ */
+ BUG_ON(sd->next_in_service == entity);
++ BUG_ON(sd->in_service_entity == entity);
+ break;
+ }
+
+@@ -1806,45 +1832,33 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+
+ /*
+ * If entity is no longer a candidate for next
+- * service, then we extract it from its active tree,
+- * for the following reason. To further boost the
+- * throughput in some special case, BFQ needs to know
+- * which is the next candidate entity to serve, while
+- * there is already an entity in service. In this
+- * respect, to make it easy to compute/update the next
+- * candidate entity to serve after the current
+- * candidate has been set in service, there is a case
+- * where it is necessary to extract the current
+- * candidate from its service tree. Such a case is
+- * when the entity just set in service cannot be also
+- * a candidate for next service. Details about when
+- * this conditions holds are reported in the comments
+- * on the function bfq_no_longer_next_in_service()
+- * invoked below.
++ * service, then it must be extracted from its active
++ * tree, so as to make sure that it won't be
++ * considered when computing next_in_service. See the
++ * comments on the function
++ * bfq_no_longer_next_in_service() for details.
+ */
+ if (bfq_no_longer_next_in_service(entity))
+ bfq_active_extract(bfq_entity_service_tree(entity),
+ entity);
+
+ /*
+- * For the same reason why we may have just extracted
+- * entity from its active tree, we may need to update
+- * next_in_service for the sched_data of entity too,
+- * regardless of whether entity has been extracted.
+- * In fact, even if entity has not been extracted, a
+- * descendant entity may get extracted. Such an event
+- * would cause a change in next_in_service for the
+- * level of the descendant entity, and thus possibly
+- * back to upper levels.
++ * Even if entity is not to be extracted according to
++ * the above check, a descendant entity may get
++ * extracted in one of the next iterations of this
++ * loop. Such an event could cause a change in
++ * next_in_service for the level of the descendant
++ * entity, and thus possibly back to this level.
+ *
+- * We cannot perform the resulting needed update
+- * before the end of this loop, because, to know which
+- * is the correct next-to-serve candidate entity for
+- * each level, we need first to find the leaf entity
+- * to set in service. In fact, only after we know
+- * which is the next-to-serve leaf entity, we can
+- * discover whether the parent entity of the leaf
+- * entity becomes the next-to-serve, and so on.
++ * However, we cannot perform the resulting needed
++ * update of next_in_service for this level before the
++ * end of the whole loop, because, to know which is
++ * the correct next-to-serve candidate entity for each
++ * level, we need first to find the leaf entity to set
++ * in service. In fact, only after we know which is
++ * the next-to-serve leaf entity, we can discover
++ * whether the parent entity of the leaf entity
++ * becomes the next-to-serve, and so on.
+ */
+
+ /* Log some information */
+diff --git a/block/bfq.h b/block/bfq.h
+index f5751ea59d98..ebd9688b9f61 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -68,17 +68,30 @@ struct bfq_service_tree {
+ *
+ * bfq_sched_data is the basic scheduler queue. It supports three
+ * ioprio_classes, and can be used either as a toplevel queue or as an
+- * intermediate queue on a hierarchical setup. @next_in_service
+- * points to the active entity of the sched_data service trees that
+- * will be scheduled next. It is used to reduce the number of steps
+- * needed for each hierarchical-schedule update.
++ * intermediate queue in a hierarchical setup.
+ *
+ * The supported ioprio_classes are the same as in CFQ, in descending
+ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
+ * Requests from higher priority queues are served before all the
+ * requests from lower priority queues; among requests of the same
+ * queue requests are served according to B-WF2Q+.
+- * All the fields are protected by the queue lock of the containing bfqd.
++ *
++ * The schedule is implemented by the service trees, plus the field
++ * @next_in_service, which points to the entity on the active trees
++ * that will be served next, if 1) no changes in the schedule occurs
++ * before the current in-service entity is expired, 2) the in-service
++ * queue becomes idle when it expires, and 3) if the entity pointed by
++ * in_service_entity is not a queue, then the in-service child entity
++ * of the entity pointed by in_service_entity becomes idle on
++ * expiration. This peculiar definition allows for the following
++ * optimization, not yet exploited: while a given entity is still in
++ * service, we already know which is the best candidate for next
++ * service among the other active entitities in the same parent
++ * entity. We can then quickly compare the timestamps of the
++ * in-service entity with those of such best candidate.
++ *
++ * All the fields are protected by the queue lock of the containing
++ * bfqd.
+ */
+ struct bfq_sched_data {
+ struct bfq_entity *in_service_entity; /* entity in service */
+
+From 6b5effd10bc6711a862e7cbd7cd2dd0146defa01 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 4 May 2017 10:53:43 +0200
+Subject: [PATCH 05/51] block, bfq: improve and refactor throughput-boosting
+ logic
+
+When a queue associated with a process remains empty, there are cases
+where throughput gets boosted if the device is idled to await the
+arrival of a new I/O request for that queue. Currently, BFQ assumes
+that one of these cases is when the device has no internal queueing
+(regardless of the properties of the I/O being served). Unfortunately,
+this condition has proved to be too general. So, this commit refines it
+as "the device has no internal queueing and is rotational".
+
+This refinement provides a significant throughput boost with random
+I/O, on flash-based storage without internal queueing. For example, on
+a HiKey board, throughput increases by up to 125%, growing, e.g., from
+6.9MB/s to 15.6MB/s with two or three random readers in parallel.
+
+This commit also refactors the code related to device idling, for the
+following reason. Finding the change that provides the above large
+improvement has been slightly more difficult than it had to be,
+because the logic that decides whether to idle the device is still
+scattered across three functions. Almost all of the logic is in the
+function bfq_bfqq_may_idle, but (1) part of the decision is made in
+bfq_update_idle_window, and (2) the function bfq_bfqq_must_idle may
+switch off idling regardless of the output of bfq_bfqq_may_idle. In
+addition, both bfq_update_idle_window and bfq_bfqq_must_idle make
+their decisions as a function of parameters that are used, for similar
+purposes, also in bfq_bfqq_may_idle. This commit addresses this issue
+by moving all the logic into bfq_bfqq_may_idle.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Luca Miccio <lucmiccio@gmail.com>
+---
+ block/bfq-sq-iosched.c | 141 +++++++++++++++++++++++++++----------------------
+ block/bfq.h | 12 ++---
+ 2 files changed, 83 insertions(+), 70 deletions(-)
+
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 65e7c7e77f3c..30d019fc67e0 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -684,10 +684,10 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ unsigned int old_wr_coeff;
+ bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
+
+- if (bic->saved_idle_window)
+- bfq_mark_bfqq_idle_window(bfqq);
++ if (bic->saved_has_short_ttime)
++ bfq_mark_bfqq_has_short_ttime(bfqq);
+ else
+- bfq_clear_bfqq_idle_window(bfqq);
++ bfq_clear_bfqq_has_short_ttime(bfqq);
+
+ if (bic->saved_IO_bound)
+ bfq_mark_bfqq_IO_bound(bfqq);
+@@ -2047,7 +2047,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ if (!bic)
+ return;
+
+- bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
++ bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
+ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
+@@ -3214,9 +3214,9 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
+ }
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "expire (%d, slow %d, num_disp %d, idle_win %d, weight %d)",
++ "expire (%d, slow %d, num_disp %d, short_ttime %d, weight %d)",
+ reason, slow, bfqq->dispatched,
+- bfq_bfqq_idle_window(bfqq), entity->weight);
++ bfq_bfqq_has_short_ttime(bfqq), entity->weight);
+
+ /*
+ * Increase, decrease or leave budget unchanged according to
+@@ -3298,7 +3298,10 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
+ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ {
+ struct bfq_data *bfqd = bfqq->bfqd;
+- bool idling_boosts_thr, idling_boosts_thr_without_issues,
++ bool rot_without_queueing =
++ !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
++ bfqq_sequential_and_IO_bound,
++ idling_boosts_thr, idling_boosts_thr_without_issues,
+ idling_needed_for_service_guarantees,
+ asymmetric_scenario;
+
+@@ -3306,27 +3309,44 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ return true;
+
+ /*
++ * Idling is performed only if slice_idle > 0. In addition, we
++ * do not idle if
++ * (a) bfqq is async
++ * (b) bfqq is in the idle io prio class: in this case we do
++ * not idle because we want to minimize the bandwidth that
++ * queues in this class can steal to higher-priority queues
++ */
++ if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
++ bfq_class_idle(bfqq))
++ return false;
++
++ bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
++ bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
++ /*
+ * The next variable takes into account the cases where idling
+ * boosts the throughput.
+ *
+ * The value of the variable is computed considering, first, that
+ * idling is virtually always beneficial for the throughput if:
+- * (a) the device is not NCQ-capable, or
+- * (b) regardless of the presence of NCQ, the device is rotational
+- * and the request pattern for bfqq is I/O-bound and sequential.
++ * (a) the device is not NCQ-capable and rotational, or
++ * (b) regardless of the presence of NCQ, the device is rotational and
++ * the request pattern for bfqq is I/O-bound and sequential, or
++ * (c) regardless of whether it is rotational, the device is
++ * not NCQ-capable and the request pattern for bfqq is
++ * I/O-bound and sequential.
+ *
+ * Secondly, and in contrast to the above item (b), idling an
+ * NCQ-capable flash-based device would not boost the
+ * throughput even with sequential I/O; rather it would lower
+ * the throughput in proportion to how fast the device
+ * is. Accordingly, the next variable is true if any of the
+- * above conditions (a) and (b) is true, and, in particular,
+- * happens to be false if bfqd is an NCQ-capable flash-based
+- * device.
++ * above conditions (a), (b) or (c) is true, and, in
++ * particular, happens to be false if bfqd is an NCQ-capable
++ * flash-based device.
+ */
+- idling_boosts_thr = !bfqd->hw_tag ||
+- (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) &&
+- bfq_bfqq_idle_window(bfqq));
++ idling_boosts_thr = rot_without_queueing ||
++ ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
++ bfqq_sequential_and_IO_bound);
+
+ /*
+ * The value of the next variable,
+@@ -3497,12 +3517,10 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
+
+ /*
+- * We have now all the components we need to compute the return
+- * value of the function, which is true only if both the following
+- * conditions hold:
+- * 1) bfqq is sync, because idling make sense only for sync queues;
+- * 2) idling either boosts the throughput (without issues), or
+- * is necessary to preserve service guarantees.
++ * We have now all the components we need to compute the
++ * return value of the function, which is true only if idling
++ * either boosts the throughput (without issues), or is
++ * necessary to preserve service guarantees.
+ */
+ bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d",
+ bfq_bfqq_sync(bfqq), idling_boosts_thr);
+@@ -3514,9 +3532,8 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ bfq_bfqq_IO_bound(bfqq),
+ idling_needed_for_service_guarantees);
+
+- return bfq_bfqq_sync(bfqq) &&
+- (idling_boosts_thr_without_issues ||
+- idling_needed_for_service_guarantees);
++ return idling_boosts_thr_without_issues ||
++ idling_needed_for_service_guarantees;
+ }
+
+ /*
+@@ -3532,10 +3549,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ */
+ static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
+ {
+- struct bfq_data *bfqd = bfqq->bfqd;
+-
+- return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
+- bfq_bfqq_may_idle(bfqq);
++ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq);
+ }
+
+ /*
+@@ -3994,7 +4008,6 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
+ case IOPRIO_CLASS_IDLE:
+ bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
+ bfqq->new_ioprio = 7;
+- bfq_clear_bfqq_idle_window(bfqq);
+ break;
+ }
+
+@@ -4058,8 +4071,14 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfq_set_next_ioprio_data(bfqq, bic);
+
+ if (is_sync) {
++ /*
++ * No need to mark as has_short_ttime if in
++ * idle_class, because no device idling is performed
++ * for queues in idle class
++ */
+ if (!bfq_class_idle(bfqq))
+- bfq_mark_bfqq_idle_window(bfqq);
++ /* tentatively mark as has_short_ttime */
++ bfq_mark_bfqq_has_short_ttime(bfqq);
+ bfq_mark_bfqq_sync(bfqq);
+ bfq_mark_bfqq_just_created(bfqq);
+ } else
+@@ -4195,18 +4214,19 @@ bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
+ }
+
+-/*
+- * Disable idle window if the process thinks too long or seeks so much that
+- * it doesn't matter.
+- */
+-static void bfq_update_idle_window(struct bfq_data *bfqd,
+- struct bfq_queue *bfqq,
+- struct bfq_io_cq *bic)
++static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
+ {
+- int enable_idle;
++ bool has_short_ttime = true;
+
+- /* Don't idle for async or idle io prio class. */
+- if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
++ /*
++ * No need to update has_short_ttime if bfqq is async or in
++ * idle io prio class, or if bfq_slice_idle is zero, because
++ * no device idling is performed for bfqq in this case.
++ */
++ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
++ bfqd->bfq_slice_idle == 0)
+ return;
+
+ /* Idle window just restored, statistics are meaningless. */
+@@ -4214,27 +4234,22 @@ static void bfq_update_idle_window(struct bfq_data *bfqd,
+ bfqd->bfq_wr_min_idle_time))
+ return;
+
+- enable_idle = bfq_bfqq_idle_window(bfqq);
+-
++ /* Think time is infinite if no process is linked to
++ * bfqq. Otherwise check average think time to
++ * decide whether to mark as has_short_ttime
++ */
+ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
+- bfqd->bfq_slice_idle == 0 ||
+- (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
+- bfqq->wr_coeff == 1))
+- enable_idle = 0;
+- else if (bfq_sample_valid(bic->ttime.ttime_samples)) {
+- if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
+- bfqq->wr_coeff == 1)
+- enable_idle = 0;
+- else
+- enable_idle = 1;
+- }
+- bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
+- enable_idle);
++ (bfq_sample_valid(bic->ttime.ttime_samples) &&
++ bic->ttime.ttime_mean > bfqd->bfq_slice_idle))
++ has_short_ttime = false;
++
++ bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
++ has_short_ttime);
+
+- if (enable_idle)
+- bfq_mark_bfqq_idle_window(bfqq);
++ if (has_short_ttime)
++ bfq_mark_bfqq_has_short_ttime(bfqq);
+ else
+- bfq_clear_bfqq_idle_window(bfqq);
++ bfq_clear_bfqq_has_short_ttime(bfqq);
+ }
+
+ /*
+@@ -4250,14 +4265,12 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfqq->meta_pending++;
+
+ bfq_update_io_thinktime(bfqd, bic);
++ bfq_update_has_short_ttime(bfqd, bfqq, bic);
+ bfq_update_io_seektime(bfqd, bfqq, rq);
+- if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
+- !BFQQ_SEEKY(bfqq))
+- bfq_update_idle_window(bfqd, bfqq, bic);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "rq_enqueued: idle_window=%d (seeky %d)",
+- bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq));
++ "rq_enqueued: has_short_ttime=%d (seeky %d)",
++ bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
+
+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
+
+diff --git a/block/bfq.h b/block/bfq.h
+index ebd9688b9f61..34fc4697fd89 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -349,11 +349,11 @@ struct bfq_io_cq {
+ #endif
+
+ /*
+- * Snapshot of the idle window before merging; taken to
+- * remember this value while the queue is merged, so as to be
+- * able to restore it in case of split.
++ * Snapshot of the has_short_time flag before merging; taken
++ * to remember its value while the queue is merged, so as to
++ * be able to restore it in case of split.
+ */
+- bool saved_idle_window;
++ bool saved_has_short_ttime;
+ /*
+ * Same purpose as the previous two fields for the I/O bound
+ * classification of a queue.
+@@ -610,7 +610,7 @@ enum bfqq_state_flags {
+ */
+ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
+ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
+- BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */
++ BFQ_BFQQ_FLAG_has_short_ttime, /* queue has a short think time */
+ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
+ BFQ_BFQQ_FLAG_IO_bound, /*
+ * bfqq has timed-out at least once
+@@ -649,7 +649,7 @@ BFQ_BFQQ_FNS(wait_request);
+ BFQ_BFQQ_FNS(non_blocking_wait_rq);
+ BFQ_BFQQ_FNS(must_alloc);
+ BFQ_BFQQ_FNS(fifo_expire);
+-BFQ_BFQQ_FNS(idle_window);
++BFQ_BFQQ_FNS(has_short_ttime);
+ BFQ_BFQQ_FNS(sync);
+ BFQ_BFQQ_FNS(IO_bound);
+ BFQ_BFQQ_FNS(in_large_burst);
+
+From b5e746fa99d961a5642cffb27c19a77e8b638007 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 19 Dec 2016 16:59:33 +0100
+Subject: [PATCH 06/51] FIRST BFQ-MQ COMMIT: Copy bfq-sq-iosched.c as
+ bfq-mq-iosched.c
+
+This commit introduces bfq-mq-iosched.c, the main source file that
+will contain the code of bfq for blk-mq. I name tentatively
+bfq-mq this version of bfq.
+
+For the moment, the file bfq-mq-iosched.c is just a copy of
+bfq-sq-iosched.c, i.e, of the main source file of bfq for blk.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 5392 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 5392 insertions(+)
+ create mode 100644 block/bfq-mq-iosched.c
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+new file mode 100644
+index 000000000000..30d019fc67e0
+--- /dev/null
++++ b/block/bfq-mq-iosched.c
+@@ -0,0 +1,5392 @@
++/*
++ * Budget Fair Queueing (BFQ) I/O scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ *
++ * BFQ is a proportional-share I/O scheduler, with some extra
++ * low-latency capabilities. BFQ also supports full hierarchical
++ * scheduling through cgroups. Next paragraphs provide an introduction
++ * on BFQ inner workings. Details on BFQ benefits and usage can be
++ * found in Documentation/block/bfq-iosched.txt.
++ *
++ * BFQ is a proportional-share storage-I/O scheduling algorithm based
++ * on the slice-by-slice service scheme of CFQ. But BFQ assigns
++ * budgets, measured in number of sectors, to processes instead of
++ * time slices. The device is not granted to the in-service process
++ * for a given time slice, but until it has exhausted its assigned
++ * budget. This change from the time to the service domain enables BFQ
++ * to distribute the device throughput among processes as desired,
++ * without any distortion due to throughput fluctuations, or to device
++ * internal queueing. BFQ uses an ad hoc internal scheduler, called
++ * B-WF2Q+, to schedule processes according to their budgets. More
++ * precisely, BFQ schedules queues associated with processes. Thanks to
++ * the accurate policy of B-WF2Q+, BFQ can afford to assign high
++ * budgets to I/O-bound processes issuing sequential requests (to
++ * boost the throughput), and yet guarantee a low latency to
++ * interactive and soft real-time applications.
++ *
++ * NOTE: if the main or only goal, with a given device, is to achieve
++ * the maximum-possible throughput at all times, then do switch off
++ * all low-latency heuristics for that device, by setting low_latency
++ * to 0.
++ *
++ * BFQ is described in [1], where also a reference to the initial, more
++ * theoretical paper on BFQ can be found. The interested reader can find
++ * in the latter paper full details on the main algorithm, as well as
++ * formulas of the guarantees and formal proofs of all the properties.
++ * With respect to the version of BFQ presented in these papers, this
++ * implementation adds a few more heuristics, such as the one that
++ * guarantees a low latency to soft real-time applications, and a
++ * hierarchical extension based on H-WF2Q+.
++ *
++ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with
++ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
++ * complexity derives from the one introduced with EEVDF in [3].
++ *
++ * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
++ * Scheduler", Proceedings of the First Workshop on Mobile System
++ * Technologies (MST-2015), May 2015.
++ * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
++ *
++ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf
++ *
++ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
++ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
++ * Oct 1997.
++ *
++ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
++ *
++ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
++ * First: A Flexible and Accurate Mechanism for Proportional Share
++ * Resource Allocation,'' technical report.
++ *
++ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
++ */
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/cgroup.h>
++#include <linux/elevator.h>
++#include <linux/jiffies.h>
++#include <linux/rbtree.h>
++#include <linux/ioprio.h>
++#include "blk.h"
++#include "bfq.h"
++
++/* Expiration time of sync (0) and async (1) requests, in ns. */
++static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
++
++/* Maximum backwards seek, in KiB. */
++static const int bfq_back_max = (16 * 1024);
++
++/* Penalty of a backwards seek, in number of sectors. */
++static const int bfq_back_penalty = 2;
++
++/* Idling period duration, in ns. */
++static u32 bfq_slice_idle = (NSEC_PER_SEC / 125);
++
++/* Minimum number of assigned budgets for which stats are safe to compute. */
++static const int bfq_stats_min_budgets = 194;
++
++/* Default maximum budget values, in sectors and number of requests. */
++static const int bfq_default_max_budget = (16 * 1024);
++
++/*
++ * Async to sync throughput distribution is controlled as follows:
++ * when an async request is served, the entity is charged the number
++ * of sectors of the request, multiplied by the factor below
++ */
++static const int bfq_async_charge_factor = 10;
++
++/* Default timeout values, in jiffies, approximating CFQ defaults. */
++static const int bfq_timeout = (HZ / 8);
++
++static struct kmem_cache *bfq_pool;
++
++/* Below this threshold (in ns), we consider thinktime immediate. */
++#define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
++
++/* hw_tag detection: parallel requests threshold and min samples needed. */
++#define BFQ_HW_QUEUE_THRESHOLD 4
++#define BFQ_HW_QUEUE_SAMPLES 32
++
++#define BFQQ_SEEK_THR (sector_t)(8 * 100)
++#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
++#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
++#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
++
++/* Min number of samples required to perform peak-rate update */
++#define BFQ_RATE_MIN_SAMPLES 32
++/* Min observation time interval required to perform a peak-rate update (ns) */
++#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
++/* Target observation time interval for a peak-rate update (ns) */
++#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
++
++/* Shift used for peak rate fixed precision calculations. */
++#define BFQ_RATE_SHIFT 16
++
++/*
++ * By default, BFQ computes the duration of the weight raising for
++ * interactive applications automatically, using the following formula:
++ * duration = (R / r) * T, where r is the peak rate of the device, and
++ * R and T are two reference parameters.
++ * In particular, R is the peak rate of the reference device (see below),
++ * and T is a reference time: given the systems that are likely to be
++ * installed on the reference device according to its speed class, T is
++ * about the maximum time needed, under BFQ and while reading two files in
++ * parallel, to load typical large applications on these systems.
++ * In practice, the slower/faster the device at hand is, the more/less it
++ * takes to load applications with respect to the reference device.
++ * Accordingly, the longer/shorter BFQ grants weight raising to interactive
++ * applications.
++ *
++ * BFQ uses four different reference pairs (R, T), depending on:
++ * . whether the device is rotational or non-rotational;
++ * . whether the device is slow, such as old or portable HDDs, as well as
++ * SD cards, or fast, such as newer HDDs and SSDs.
++ *
++ * The device's speed class is dynamically (re)detected in
++ * bfq_update_peak_rate() every time the estimated peak rate is updated.
++ *
++ * In the following definitions, R_slow[0]/R_fast[0] and
++ * T_slow[0]/T_fast[0] are the reference values for a slow/fast
++ * rotational device, whereas R_slow[1]/R_fast[1] and
++ * T_slow[1]/T_fast[1] are the reference values for a slow/fast
++ * non-rotational device. Finally, device_speed_thresh are the
++ * thresholds used to switch between speed classes. The reference
++ * rates are not the actual peak rates of the devices used as a
++ * reference, but slightly lower values. The reason for using these
++ * slightly lower values is that the peak-rate estimator tends to
++ * yield slightly lower values than the actual peak rate (it can yield
++ * the actual peak rate only if there is only one process doing I/O,
++ * and the process does sequential I/O).
++ *
++ * Both the reference peak rates and the thresholds are measured in
++ * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
++ */
++static int R_slow[2] = {1000, 10700};
++static int R_fast[2] = {14000, 33000};
++/*
++ * To improve readability, a conversion function is used to initialize the
++ * following arrays, which entails that they can be initialized only in a
++ * function.
++ */
++static int T_slow[2];
++static int T_fast[2];
++static int device_speed_thresh[2];
++
++#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
++ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
++
++#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
++
++static void bfq_schedule_dispatch(struct bfq_data *bfqd);
++
++#include "bfq-ioc.c"
++#include "bfq-sched.c"
++#include "bfq-cgroup-included.c"
++
++#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
++#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
++
++#define bfq_sample_valid(samples) ((samples) > 80)
++
++/*
++ * Scheduler run of queue, if there are requests pending and no one in the
++ * driver that will restart queueing.
++ */
++static void bfq_schedule_dispatch(struct bfq_data *bfqd)
++{
++ if (bfqd->queued != 0) {
++ bfq_log(bfqd, "schedule dispatch");
++ kblockd_schedule_work(&bfqd->unplug_work);
++ }
++}
++
++/*
++ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
++ * We choose the request that is closesr to the head right now. Distance
++ * behind the head is penalized and only allowed to a certain extent.
++ */
++static struct request *bfq_choose_req(struct bfq_data *bfqd,
++ struct request *rq1,
++ struct request *rq2,
++ sector_t last)
++{
++ sector_t s1, s2, d1 = 0, d2 = 0;
++ unsigned long back_max;
++#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
++#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
++ unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
++
++ if (!rq1 || rq1 == rq2)
++ return rq2;
++ if (!rq2)
++ return rq1;
++
++ if (rq_is_sync(rq1) && !rq_is_sync(rq2))
++ return rq1;
++ else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
++ return rq2;
++ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
++ return rq1;
++ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
++ return rq2;
++
++ s1 = blk_rq_pos(rq1);
++ s2 = blk_rq_pos(rq2);
++
++ /*
++ * By definition, 1KiB is 2 sectors.
++ */
++ back_max = bfqd->bfq_back_max * 2;
++
++ /*
++ * Strict one way elevator _except_ in the case where we allow
++ * short backward seeks which are biased as twice the cost of a
++ * similar forward seek.
++ */
++ if (s1 >= last)
++ d1 = s1 - last;
++ else if (s1 + back_max >= last)
++ d1 = (last - s1) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ1_WRAP;
++
++ if (s2 >= last)
++ d2 = s2 - last;
++ else if (s2 + back_max >= last)
++ d2 = (last - s2) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ2_WRAP;
++
++ /* Found required data */
++
++ /*
++ * By doing switch() on the bit mask "wrap" we avoid having to
++ * check two variables for all permutations: --> faster!
++ */
++ switch (wrap) {
++ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
++ if (d1 < d2)
++ return rq1;
++ else if (d2 < d1)
++ return rq2;
++
++ if (s1 >= s2)
++ return rq1;
++ else
++ return rq2;
++
++ case BFQ_RQ2_WRAP:
++ return rq1;
++ case BFQ_RQ1_WRAP:
++ return rq2;
++ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
++ default:
++ /*
++ * Since both rqs are wrapped,
++ * start with the one that's further behind head
++ * (--> only *one* back seek required),
++ * since back seek takes more time than forward.
++ */
++ if (s1 <= s2)
++ return rq1;
++ else
++ return rq2;
++ }
++}
++
++static struct bfq_queue *
++bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
++ sector_t sector, struct rb_node **ret_parent,
++ struct rb_node ***rb_link)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *bfqq = NULL;
++
++ parent = NULL;
++ p = &root->rb_node;
++ while (*p) {
++ struct rb_node **n;
++
++ parent = *p;
++ bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++
++ /*
++ * Sort strictly based on sector. Smallest to the left,
++ * largest to the right.
++ */
++ if (sector > blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_right;
++ else if (sector < blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_left;
++ else
++ break;
++ p = n;
++ bfqq = NULL;
++ }
++
++ *ret_parent = parent;
++ if (rb_link)
++ *rb_link = p;
++
++ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++ (unsigned long long) sector,
++ bfqq ? bfqq->pid : 0);
++
++ return bfqq;
++}
++
++static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *__bfqq;
++
++ if (bfqq->pos_root) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++
++ if (bfq_class_idle(bfqq))
++ return;
++ if (!bfqq->next_rq)
++ return;
++
++ bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
++ blk_rq_pos(bfqq->next_rq), &parent, &p);
++ if (!__bfqq) {
++ rb_link_node(&bfqq->pos_node, parent, p);
++ rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
++ } else
++ bfqq->pos_root = NULL;
++}
++
++/*
++ * Tell whether there are active queues or groups with differentiated weights.
++ */
++static bool bfq_differentiated_weights(struct bfq_data *bfqd)
++{
++ /*
++ * For weights to differ, at least one of the trees must contain
++ * at least two nodes.
++ */
++ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
++ (bfqd->queue_weights_tree.rb_node->rb_left ||
++ bfqd->queue_weights_tree.rb_node->rb_right)
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ ) ||
++ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
++ (bfqd->group_weights_tree.rb_node->rb_left ||
++ bfqd->group_weights_tree.rb_node->rb_right)
++#endif
++ );
++}
++
++/*
++ * The following function returns true if every queue must receive the
++ * same share of the throughput (this condition is used when deciding
++ * whether idling may be disabled, see the comments in the function
++ * bfq_bfqq_may_idle()).
++ *
++ * Such a scenario occurs when:
++ * 1) all active queues have the same weight,
++ * 2) all active groups at the same level in the groups tree have the same
++ * weight,
++ * 3) all active groups at the same level in the groups tree have the same
++ * number of children.
++ *
++ * Unfortunately, keeping the necessary state for evaluating exactly the
++ * above symmetry conditions would be quite complex and time-consuming.
++ * Therefore this function evaluates, instead, the following stronger
++ * sub-conditions, for which it is much easier to maintain the needed
++ * state:
++ * 1) all active queues have the same weight,
++ * 2) all active groups have the same weight,
++ * 3) all active groups have at most one active child each.
++ * In particular, the last two conditions are always true if hierarchical
++ * support and the cgroups interface are not enabled, thus no state needs
++ * to be maintained in this case.
++ */
++static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
++{
++ return !bfq_differentiated_weights(bfqd);
++}
++
++/*
++ * If the weight-counter tree passed as input contains no counter for
++ * the weight of the input entity, then add that counter; otherwise just
++ * increment the existing counter.
++ *
++ * Note that weight-counter trees contain few nodes in mostly symmetric
++ * scenarios. For example, if all queues have the same weight, then the
++ * weight-counter tree for the queues may contain at most one node.
++ * This holds even if low_latency is on, because weight-raised queues
++ * are not inserted in the tree.
++ * In most scenarios, the rate at which nodes are created/destroyed
++ * should be low too.
++ */
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ struct rb_node **new = &(root->rb_node), *parent = NULL;
++
++ /*
++ * Do not insert if the entity is already associated with a
++ * counter, which happens if:
++ * 1) the entity is associated with a queue,
++ * 2) a request arrival has caused the queue to become both
++ * non-weight-raised, and hence change its weight, and
++ * backlogged; in this respect, each of the two events
++ * causes an invocation of this function,
++ * 3) this is the invocation of this function caused by the
++ * second event. This second invocation is actually useless,
++ * and we handle this fact by exiting immediately. More
++ * efficient or clearer solutions might possibly be adopted.
++ */
++ if (entity->weight_counter)
++ return;
++
++ while (*new) {
++ struct bfq_weight_counter *__counter = container_of(*new,
++ struct bfq_weight_counter,
++ weights_node);
++ parent = *new;
++
++ if (entity->weight == __counter->weight) {
++ entity->weight_counter = __counter;
++ goto inc_counter;
++ }
++ if (entity->weight < __counter->weight)
++ new = &((*new)->rb_left);
++ else
++ new = &((*new)->rb_right);
++ }
++
++ entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
++ GFP_ATOMIC);
++
++ /*
++ * In the unlucky event of an allocation failure, we just
++ * exit. This will cause the weight of entity to not be
++ * considered in bfq_differentiated_weights, which, in its
++ * turn, causes the scenario to be deemed wrongly symmetric in
++ * case entity's weight would have been the only weight making
++ * the scenario asymmetric. On the bright side, no unbalance
++ * will however occur when entity becomes inactive again (the
++ * invocation of this function is triggered by an activation
++ * of entity). In fact, bfq_weights_tree_remove does nothing
++ * if !entity->weight_counter.
++ */
++ if (unlikely(!entity->weight_counter))
++ return;
++
++ entity->weight_counter->weight = entity->weight;
++ rb_link_node(&entity->weight_counter->weights_node, parent, new);
++ rb_insert_color(&entity->weight_counter->weights_node, root);
++
++inc_counter:
++ entity->weight_counter->num_active++;
++}
++
++/*
++ * Decrement the weight counter associated with the entity, and, if the
++ * counter reaches 0, remove the counter from the tree.
++ * See the comments to the function bfq_weights_tree_add() for considerations
++ * about overhead.
++ */
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ if (!entity->weight_counter)
++ return;
++
++ BUG_ON(RB_EMPTY_ROOT(root));
++ BUG_ON(entity->weight_counter->weight != entity->weight);
++
++ BUG_ON(!entity->weight_counter->num_active);
++ entity->weight_counter->num_active--;
++ if (entity->weight_counter->num_active > 0)
++ goto reset_entity_pointer;
++
++ rb_erase(&entity->weight_counter->weights_node, root);
++ kfree(entity->weight_counter);
++
++reset_entity_pointer:
++ entity->weight_counter = NULL;
++}
++
++/*
++ * Return expired entry, or NULL to just start from scratch in rbtree.
++ */
++static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
++ struct request *last)
++{
++ struct request *rq;
++
++ if (bfq_bfqq_fifo_expire(bfqq))
++ return NULL;
++
++ bfq_mark_bfqq_fifo_expire(bfqq);
++
++ rq = rq_entry_fifo(bfqq->fifo.next);
++
++ if (rq == last || ktime_get_ns() < rq->fifo_time)
++ return NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
++ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
++ return rq;
++}
++
++static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct request *last)
++{
++ struct rb_node *rbnext = rb_next(&last->rb_node);
++ struct rb_node *rbprev = rb_prev(&last->rb_node);
++ struct request *next, *prev = NULL;
++
++ BUG_ON(list_empty(&bfqq->fifo));
++
++ /* Follow expired path, else get first next available. */
++ next = bfq_check_fifo(bfqq, last);
++ if (next) {
++ BUG_ON(next == last);
++ return next;
++ }
++
++ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
++
++ if (rbprev)
++ prev = rb_entry_rq(rbprev);
++
++ if (rbnext)
++ next = rb_entry_rq(rbnext);
++ else {
++ rbnext = rb_first(&bfqq->sort_list);
++ if (rbnext && rbnext != &last->rb_node)
++ next = rb_entry_rq(rbnext);
++ }
++
++ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
++}
++
++/* see the definition of bfq_async_charge_factor for details */
++static unsigned long bfq_serv_to_charge(struct request *rq,
++ struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
++ return blk_rq_sectors(rq);
++
++ /*
++ * If there are no weight-raised queues, then amplify service
++ * by just the async charge factor; otherwise amplify service
++ * by twice the async charge factor, to further reduce latency
++ * for weight-raised queues.
++ */
++ if (bfqq->bfqd->wr_busy_queues == 0)
++ return blk_rq_sectors(rq) * bfq_async_charge_factor;
++
++ return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
++}
++
++/**
++ * bfq_updated_next_req - update the queue after a new next_rq selection.
++ * @bfqd: the device data the queue belongs to.
++ * @bfqq: the queue to update.
++ *
++ * If the first request of a queue changes we make sure that the queue
++ * has enough budget to serve at least its first request (if the
++ * request has grown). We do this because if the queue has not enough
++ * budget for its first request, it has to go through two dispatch
++ * rounds to actually get it dispatched.
++ */
++static void bfq_updated_next_req(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ struct request *next_rq = bfqq->next_rq;
++ unsigned long new_budget;
++
++ if (!next_rq)
++ return;
++
++ if (bfqq == bfqd->in_service_queue)
++ /*
++ * In order not to break guarantees, budgets cannot be
++ * changed after an entity has been selected.
++ */
++ return;
++
++ BUG_ON(entity->tree != &st->active);
++ BUG_ON(entity == entity->sched_data->in_service_entity);
++
++ new_budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ if (entity->budget != new_budget) {
++ entity->budget = new_budget;
++ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
++ new_budget);
++ bfq_requeue_bfqq(bfqd, bfqq);
++ }
++}
++
++static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
++{
++ u64 dur;
++
++ if (bfqd->bfq_wr_max_time > 0)
++ return bfqd->bfq_wr_max_time;
++
++ dur = bfqd->RT_prod;
++ do_div(dur, bfqd->peak_rate);
++
++ /*
++ * Limit duration between 3 and 13 seconds. Tests show that
++ * higher values than 13 seconds often yield the opposite of
++ * the desired result, i.e., worsen responsiveness by letting
++ * non-interactive and non-soft-real-time applications
++ * preserve weight raising for a too long time interval.
++ *
++ * On the other end, lower values than 3 seconds make it
++ * difficult for most interactive tasks to complete their jobs
++ * before weight-raising finishes.
++ */
++ if (dur > msecs_to_jiffies(13000))
++ dur = msecs_to_jiffies(13000);
++ else if (dur < msecs_to_jiffies(3000))
++ dur = msecs_to_jiffies(3000);
++
++ return dur;
++}
++
++static void
++bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
++ struct bfq_io_cq *bic, bool bfq_already_existing)
++{
++ unsigned int old_wr_coeff;
++ bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
++
++ if (bic->saved_has_short_ttime)
++ bfq_mark_bfqq_has_short_ttime(bfqq);
++ else
++ bfq_clear_bfqq_has_short_ttime(bfqq);
++
++ if (bic->saved_IO_bound)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ else
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ if (unlikely(busy))
++ old_wr_coeff = bfqq->wr_coeff;
++
++ bfqq->wr_coeff = bic->saved_wr_coeff;
++ bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
++ BUG_ON(time_is_after_jiffies(bfqq->wr_start_at_switch_to_srt));
++ bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
++ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++
++ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
++ time_is_before_jiffies(bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time))) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "resume state: switching off wr (%lu + %lu < %lu)",
++ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time,
++ jiffies);
++
++ bfqq->wr_coeff = 1;
++ }
++
++ /* make sure weight will be updated, however we got here */
++ bfqq->entity.prio_changed = 1;
++
++ if (likely(!busy))
++ return;
++
++ if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) {
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ } else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) {
++ bfqd->wr_busy_queues--;
++ BUG_ON(bfqd->wr_busy_queues < 0);
++ }
++}
++
++static int bfqq_process_refs(struct bfq_queue *bfqq)
++{
++ int process_refs, io_refs;
++
++ lockdep_assert_held(bfqq->bfqd->queue->queue_lock);
++
++ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++ process_refs = bfqq->ref - io_refs - bfqq->entity.on_st;
++ BUG_ON(process_refs < 0);
++ return process_refs;
++}
++
++/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
++static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_queue *item;
++ struct hlist_node *n;
++
++ hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
++ hlist_del_init(&item->burst_list_node);
++ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++ bfqd->burst_size = 1;
++ bfqd->burst_parent_entity = bfqq->entity.parent;
++}
++
++/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
++static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ /* Increment burst size to take into account also bfqq */
++ bfqd->burst_size++;
++
++ bfq_log_bfqq(bfqd, bfqq, "add_to_burst %d", bfqd->burst_size);
++
++ BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh);
++
++ if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
++ struct bfq_queue *pos, *bfqq_item;
++ struct hlist_node *n;
++
++ /*
++ * Enough queues have been activated shortly after each
++ * other to consider this burst as large.
++ */
++ bfqd->large_burst = true;
++ bfq_log_bfqq(bfqd, bfqq, "add_to_burst: large burst started");
++
++ /*
++ * We can now mark all queues in the burst list as
++ * belonging to a large burst.
++ */
++ hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
++ burst_list_node) {
++ bfq_mark_bfqq_in_large_burst(bfqq_item);
++ bfq_log_bfqq(bfqd, bfqq_item, "marked in large burst");
++ }
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ bfq_log_bfqq(bfqd, bfqq, "marked in large burst");
++
++ /*
++ * From now on, and until the current burst finishes, any
++ * new queue being activated shortly after the last queue
++ * was inserted in the burst can be immediately marked as
++ * belonging to a large burst. So the burst list is not
++ * needed any more. Remove it.
++ */
++ hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
++ burst_list_node)
++ hlist_del_init(&pos->burst_list_node);
++ } else /*
++ * Burst not yet large: add bfqq to the burst list. Do
++ * not increment the ref counter for bfqq, because bfqq
++ * is removed from the burst list before freeing bfqq
++ * in put_queue.
++ */
++ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++}
++
++/*
++ * If many queues belonging to the same group happen to be created
++ * shortly after each other, then the processes associated with these
++ * queues have typically a common goal. In particular, bursts of queue
++ * creations are usually caused by services or applications that spawn
++ * many parallel threads/processes. Examples are systemd during boot,
++ * or git grep. To help these processes get their job done as soon as
++ * possible, it is usually better to not grant either weight-raising
++ * or device idling to their queues.
++ *
++ * In this comment we describe, firstly, the reasons why this fact
++ * holds, and, secondly, the next function, which implements the main
++ * steps needed to properly mark these queues so that they can then be
++ * treated in a different way.
++ *
++ * The above services or applications benefit mostly from a high
++ * throughput: the quicker the requests of the activated queues are
++ * cumulatively served, the sooner the target job of these queues gets
++ * completed. As a consequence, weight-raising any of these queues,
++ * which also implies idling the device for it, is almost always
++ * counterproductive. In most cases it just lowers throughput.
++ *
++ * On the other hand, a burst of queue creations may be caused also by
++ * the start of an application that does not consist of a lot of
++ * parallel I/O-bound threads. In fact, with a complex application,
++ * several short processes may need to be executed to start-up the
++ * application. In this respect, to start an application as quickly as
++ * possible, the best thing to do is in any case to privilege the I/O
++ * related to the application with respect to all other
++ * I/O. Therefore, the best strategy to start as quickly as possible
++ * an application that causes a burst of queue creations is to
++ * weight-raise all the queues created during the burst. This is the
++ * exact opposite of the best strategy for the other type of bursts.
++ *
++ * In the end, to take the best action for each of the two cases, the
++ * two types of bursts need to be distinguished. Fortunately, this
++ * seems relatively easy, by looking at the sizes of the bursts. In
++ * particular, we found a threshold such that only bursts with a
++ * larger size than that threshold are apparently caused by
++ * services or commands such as systemd or git grep. For brevity,
++ * hereafter we call just 'large' these bursts. BFQ *does not*
++ * weight-raise queues whose creation occurs in a large burst. In
++ * addition, for each of these queues BFQ performs or does not perform
++ * idling depending on which choice boosts the throughput more. The
++ * exact choice depends on the device and request pattern at
++ * hand.
++ *
++ * Unfortunately, false positives may occur while an interactive task
++ * is starting (e.g., an application is being started). The
++ * consequence is that the queues associated with the task do not
++ * enjoy weight raising as expected. Fortunately these false positives
++ * are very rare. They typically occur if some service happens to
++ * start doing I/O exactly when the interactive task starts.
++ *
++ * Turning back to the next function, it implements all the steps
++ * needed to detect the occurrence of a large burst and to properly
++ * mark all the queues belonging to it (so that they can then be
++ * treated in a different way). This goal is achieved by maintaining a
++ * "burst list" that holds, temporarily, the queues that belong to the
++ * burst in progress. The list is then used to mark these queues as
++ * belonging to a large burst if the burst does become large. The main
++ * steps are the following.
++ *
++ * . when the very first queue is created, the queue is inserted into the
++ * list (as it could be the first queue in a possible burst)
++ *
++ * . if the current burst has not yet become large, and a queue Q that does
++ * not yet belong to the burst is activated shortly after the last time
++ * at which a new queue entered the burst list, then the function appends
++ * Q to the burst list
++ *
++ * . if, as a consequence of the previous step, the burst size reaches
++ * the large-burst threshold, then
++ *
++ * . all the queues in the burst list are marked as belonging to a
++ * large burst
++ *
++ * . the burst list is deleted; in fact, the burst list already served
++ * its purpose (keeping temporarily track of the queues in a burst,
++ * so as to be able to mark them as belonging to a large burst in the
++ * previous sub-step), and now is not needed any more
++ *
++ * . the device enters a large-burst mode
++ *
++ * . if a queue Q that does not belong to the burst is created while
++ * the device is in large-burst mode and shortly after the last time
++ * at which a queue either entered the burst list or was marked as
++ * belonging to the current large burst, then Q is immediately marked
++ * as belonging to a large burst.
++ *
++ * . if a queue Q that does not belong to the burst is created a while
++ * later, i.e., not shortly after, than the last time at which a queue
++ * either entered the burst list or was marked as belonging to the
++ * current large burst, then the current burst is deemed as finished and:
++ *
++ * . the large-burst mode is reset if set
++ *
++ * . the burst list is emptied
++ *
++ * . Q is inserted in the burst list, as Q may be the first queue
++ * in a possible new burst (then the burst list contains just Q
++ * after this step).
++ */
++static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq is already in the burst list or is part of a large
++ * burst, or finally has just been split, then there is
++ * nothing else to do.
++ */
++ if (!hlist_unhashed(&bfqq->burst_list_node) ||
++ bfq_bfqq_in_large_burst(bfqq) ||
++ time_is_after_eq_jiffies(bfqq->split_time +
++ msecs_to_jiffies(10)))
++ return;
++
++ /*
++ * If bfqq's creation happens late enough, or bfqq belongs to
++ * a different group than the burst group, then the current
++ * burst is finished, and related data structures must be
++ * reset.
++ *
++ * In this respect, consider the special case where bfqq is
++ * the very first queue created after BFQ is selected for this
++ * device. In this case, last_ins_in_burst and
++ * burst_parent_entity are not yet significant when we get
++ * here. But it is easy to verify that, whether or not the
++ * following condition is true, bfqq will end up being
++ * inserted into the burst list. In particular the list will
++ * happen to contain only bfqq. And this is exactly what has
++ * to happen, as bfqq may be the first queue of the first
++ * burst.
++ */
++ if (time_is_before_jiffies(bfqd->last_ins_in_burst +
++ bfqd->bfq_burst_interval) ||
++ bfqq->entity.parent != bfqd->burst_parent_entity) {
++ bfqd->large_burst = false;
++ bfq_reset_burst_list(bfqd, bfqq);
++ bfq_log_bfqq(bfqd, bfqq,
++ "handle_burst: late activation or different group");
++ goto end;
++ }
++
++ /*
++ * If we get here, then bfqq is being activated shortly after the
++ * last queue. So, if the current burst is also large, we can mark
++ * bfqq as belonging to this large burst immediately.
++ */
++ if (bfqd->large_burst) {
++ bfq_log_bfqq(bfqd, bfqq, "handle_burst: marked in burst");
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ goto end;
++ }
++
++ /*
++ * If we get here, then a large-burst state has not yet been
++ * reached, but bfqq is being activated shortly after the last
++ * queue. Then we add bfqq to the burst.
++ */
++ bfq_add_to_burst(bfqd, bfqq);
++end:
++ /*
++ * At this point, bfqq either has been added to the current
++ * burst or has caused the current burst to terminate and a
++ * possible new burst to start. In particular, in the second
++ * case, bfqq has become the first queue in the possible new
++ * burst. In both cases last_ins_in_burst needs to be moved
++ * forward.
++ */
++ bfqd->last_ins_in_burst = jiffies;
++
++}
++
++static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ return entity->budget - entity->service;
++}
++
++/*
++ * If enough samples have been computed, return the current max budget
++ * stored in bfqd, which is dynamically updated according to the
++ * estimated disk peak rate; otherwise return the default max budget
++ */
++static int bfq_max_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < bfq_stats_min_budgets)
++ return bfq_default_max_budget;
++ else
++ return bfqd->bfq_max_budget;
++}
++
++/*
++ * Return min budget, which is a fraction of the current or default
++ * max budget (trying with 1/32)
++ */
++static int bfq_min_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < bfq_stats_min_budgets)
++ return bfq_default_max_budget / 32;
++ else
++ return bfqd->bfq_max_budget / 32;
++}
++
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool compensate,
++ enum bfqq_expiration reason);
++
++/*
++ * The next function, invoked after the input queue bfqq switches from
++ * idle to busy, updates the budget of bfqq. The function also tells
++ * whether the in-service queue should be expired, by returning
++ * true. The purpose of expiring the in-service queue is to give bfqq
++ * the chance to possibly preempt the in-service queue, and the reason
++ * for preempting the in-service queue is to achieve one of the two
++ * goals below.
++ *
++ * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
++ * expired because it has remained idle. In particular, bfqq may have
++ * expired for one of the following two reasons:
++ *
++ * - BFQ_BFQQ_NO_MORE_REQUEST bfqq did not enjoy any device idling and
++ * did not make it to issue a new request before its last request
++ * was served;
++ *
++ * - BFQ_BFQQ_TOO_IDLE bfqq did enjoy device idling, but did not issue
++ * a new request before the expiration of the idling-time.
++ *
++ * Even if bfqq has expired for one of the above reasons, the process
++ * associated with the queue may be however issuing requests greedily,
++ * and thus be sensitive to the bandwidth it receives (bfqq may have
++ * remained idle for other reasons: CPU high load, bfqq not enjoying
++ * idling, I/O throttling somewhere in the path from the process to
++ * the I/O scheduler, ...). But if, after every expiration for one of
++ * the above two reasons, bfqq has to wait for the service of at least
++ * one full budget of another queue before being served again, then
++ * bfqq is likely to get a much lower bandwidth or resource time than
++ * its reserved ones. To address this issue, two countermeasures need
++ * to be taken.
++ *
++ * First, the budget and the timestamps of bfqq need to be updated in
++ * a special way on bfqq reactivation: they need to be updated as if
++ * bfqq did not remain idle and did not expire. In fact, if they are
++ * computed as if bfqq expired and remained idle until reactivation,
++ * then the process associated with bfqq is treated as if, instead of
++ * being greedy, it stopped issuing requests when bfqq remained idle,
++ * and restarts issuing requests only on this reactivation. In other
++ * words, the scheduler does not help the process recover the "service
++ * hole" between bfqq expiration and reactivation. As a consequence,
++ * the process receives a lower bandwidth than its reserved one. In
++ * contrast, to recover this hole, the budget must be updated as if
++ * bfqq was not expired at all before this reactivation, i.e., it must
++ * be set to the value of the remaining budget when bfqq was
++ * expired. Along the same line, timestamps need to be assigned the
++ * value they had the last time bfqq was selected for service, i.e.,
++ * before last expiration. Thus timestamps need to be back-shifted
++ * with respect to their normal computation (see [1] for more details
++ * on this tricky aspect).
++ *
++ * Secondly, to allow the process to recover the hole, the in-service
++ * queue must be expired too, to give bfqq the chance to preempt it
++ * immediately. In fact, if bfqq has to wait for a full budget of the
++ * in-service queue to be completed, then it may become impossible to
++ * let the process recover the hole, even if the back-shifted
++ * timestamps of bfqq are lower than those of the in-service queue. If
++ * this happens for most or all of the holes, then the process may not
++ * receive its reserved bandwidth. In this respect, it is worth noting
++ * that, being the service of outstanding requests unpreemptible, a
++ * little fraction of the holes may however be unrecoverable, thereby
++ * causing a little loss of bandwidth.
++ *
++ * The last important point is detecting whether bfqq does need this
++ * bandwidth recovery. In this respect, the next function deems the
++ * process associated with bfqq greedy, and thus allows it to recover
++ * the hole, if: 1) the process is waiting for the arrival of a new
++ * request (which implies that bfqq expired for one of the above two
++ * reasons), and 2) such a request has arrived soon. The first
++ * condition is controlled through the flag non_blocking_wait_rq,
++ * while the second through the flag arrived_in_time. If both
++ * conditions hold, then the function computes the budget in the
++ * above-described special way, and signals that the in-service queue
++ * should be expired. Timestamp back-shifting is done later in
++ * __bfq_activate_entity.
++ *
++ * 2. Reduce latency. Even if timestamps are not backshifted to let
++ * the process associated with bfqq recover a service hole, bfqq may
++ * however happen to have, after being (re)activated, a lower finish
++ * timestamp than the in-service queue. That is, the next budget of
++ * bfqq may have to be completed before the one of the in-service
++ * queue. If this is the case, then preempting the in-service queue
++ * allows this goal to be achieved, apart from the unpreemptible,
++ * outstanding requests mentioned above.
++ *
++ * Unfortunately, regardless of which of the above two goals one wants
++ * to achieve, service trees need first to be updated to know whether
++ * the in-service queue must be preempted. To have service trees
++ * correctly updated, the in-service queue must be expired and
++ * rescheduled, and bfqq must be scheduled too. This is one of the
++ * most costly operations (in future versions, the scheduling
++ * mechanism may be re-designed in such a way to make it possible to
++ * know whether preemption is needed without needing to update service
++ * trees). In addition, queue preemptions almost always cause random
++ * I/O, and thus loss of throughput. Because of these facts, the next
++ * function adopts the following simple scheme to avoid both costly
++ * operations and too frequent preemptions: it requests the expiration
++ * of the in-service queue (unconditionally) only for queues that need
++ * to recover a hole, or that either are weight-raised or deserve to
++ * be weight-raised.
++ */
++static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool arrived_in_time,
++ bool wr_or_deserves_wr)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
++ /*
++ * We do not clear the flag non_blocking_wait_rq here, as
++ * the latter is used in bfq_activate_bfqq to signal
++ * that timestamps need to be back-shifted (and is
++ * cleared right after).
++ */
++
++ /*
++ * In next assignment we rely on that either
++ * entity->service or entity->budget are not updated
++ * on expiration if bfqq is empty (see
++ * __bfq_bfqq_recalc_budget). Thus both quantities
++ * remain unchanged after such an expiration, and the
++ * following statement therefore assigns to
++ * entity->budget the remaining budget on such an
++ * expiration. For clarity, entity->service is not
++ * updated on expiration in any case, and, in normal
++ * operation, is reset only when bfqq is selected for
++ * service (see bfq_get_next_queue).
++ */
++ BUG_ON(bfqq->max_budget < 0);
++ entity->budget = min_t(unsigned long,
++ bfq_bfqq_budget_left(bfqq),
++ bfqq->max_budget);
++
++ BUG_ON(entity->budget < 0);
++ return true;
++ }
++
++ BUG_ON(bfqq->max_budget < 0);
++ entity->budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(bfqq->next_rq, bfqq));
++ BUG_ON(entity->budget < 0);
++
++ bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
++ return wr_or_deserves_wr;
++}
++
++static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ unsigned int old_wr_coeff,
++ bool wr_or_deserves_wr,
++ bool interactive,
++ bool in_burst,
++ bool soft_rt)
++{
++ if (old_wr_coeff == 1 && wr_or_deserves_wr) {
++ /* start a weight-raising period */
++ if (interactive) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ } else {
++ bfqq->wr_start_at_switch_to_srt = jiffies;
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff *
++ BFQ_SOFTRT_WEIGHT_FACTOR;
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ }
++ /*
++ * If needed, further reduce budget to make sure it is
++ * close to bfqq's backlog, so as to reduce the
++ * scheduling-error component due to a too large
++ * budget. Do not care about throughput consequences,
++ * but only about latency. Finally, do not assign a
++ * too small budget either, to avoid increasing
++ * latency by causing too frequent expirations.
++ */
++ bfqq->entity.budget = min_t(unsigned long,
++ bfqq->entity.budget,
++ 2 * bfq_min_budget(bfqd));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais starting at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ } else if (old_wr_coeff > 1) {
++ if (interactive) { /* update wr coeff and duration */
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ } else if (in_burst) {
++ bfqq->wr_coeff = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais ending at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->
++ wr_cur_max_time));
++ } else if (soft_rt) {
++ /*
++ * The application is now or still meeting the
++ * requirements for being deemed soft rt. We
++ * can then correctly and safely (re)charge
++ * the weight-raising duration for the
++ * application with the weight-raising
++ * duration for soft rt applications.
++ *
++ * In particular, doing this recharge now, i.e.,
++ * before the weight-raising period for the
++ * application finishes, reduces the probability
++ * of the following negative scenario:
++ * 1) the weight of a soft rt application is
++ * raised at startup (as for any newly
++ * created application),
++ * 2) since the application is not interactive,
++ * at a certain time weight-raising is
++ * stopped for the application,
++ * 3) at that time the application happens to
++ * still have pending requests, and hence
++ * is destined to not have a chance to be
++ * deemed soft rt before these requests are
++ * completed (see the comments to the
++ * function bfq_bfqq_softrt_next_start()
++ * for details on soft rt detection),
++ * 4) these pending requests experience a high
++ * latency because the application is not
++ * weight-raised while they are pending.
++ */
++ if (bfqq->wr_cur_max_time !=
++ bfqd->bfq_wr_rt_max_time) {
++ bfqq->wr_start_at_switch_to_srt =
++ bfqq->last_wr_start_finish;
++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff *
++ BFQ_SOFTRT_WEIGHT_FACTOR;
++ bfq_log_bfqq(bfqd, bfqq,
++ "switching to soft_rt wr");
++ } else
++ bfq_log_bfqq(bfqd, bfqq,
++ "moving forward soft_rt wr duration");
++ bfqq->last_wr_start_finish = jiffies;
++ }
++ }
++}
++
++static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ return bfqq->dispatched == 0 &&
++ time_is_before_jiffies(
++ bfqq->budget_timeout +
++ bfqd->bfq_wr_min_idle_time);
++}
++
++static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ int old_wr_coeff,
++ struct request *rq,
++ bool *interactive)
++{
++ bool soft_rt, in_burst, wr_or_deserves_wr,
++ bfqq_wants_to_preempt,
++ idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
++ /*
++ * See the comments on
++ * bfq_bfqq_update_budg_for_activation for
++ * details on the usage of the next variable.
++ */
++ arrived_in_time = ktime_get_ns() <=
++ RQ_BIC(rq)->ttime.last_end_request +
++ bfqd->bfq_slice_idle * 3;
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "bfq_add_request non-busy: "
++ "jiffies %lu, in_time %d, idle_long %d busyw %d "
++ "wr_coeff %u",
++ jiffies, arrived_in_time,
++ idle_for_long_time,
++ bfq_bfqq_non_blocking_wait_rq(bfqq),
++ old_wr_coeff);
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ BUG_ON(bfqq == bfqd->in_service_queue);
++ bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
++
++ /*
++ * bfqq deserves to be weight-raised if:
++ * - it is sync,
++ * - it does not belong to a large burst,
++ * - it has been idle for enough time or is soft real-time,
++ * - is linked to a bfq_io_cq (it is not shared in any sense)
++ */
++ in_burst = bfq_bfqq_in_large_burst(bfqq);
++ soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
++ !in_burst &&
++ time_is_before_jiffies(bfqq->soft_rt_next_start);
++ *interactive =
++ !in_burst &&
++ idle_for_long_time;
++ wr_or_deserves_wr = bfqd->low_latency &&
++ (bfqq->wr_coeff > 1 ||
++ (bfq_bfqq_sync(bfqq) &&
++ bfqq->bic && (*interactive || soft_rt)));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "bfq_add_request: "
++ "in_burst %d, "
++ "soft_rt %d (next %lu), inter %d, bic %p",
++ bfq_bfqq_in_large_burst(bfqq), soft_rt,
++ bfqq->soft_rt_next_start,
++ *interactive,
++ bfqq->bic);
++
++ /*
++ * Using the last flag, update budget and check whether bfqq
++ * may want to preempt the in-service queue.
++ */
++ bfqq_wants_to_preempt =
++ bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
++ arrived_in_time,
++ wr_or_deserves_wr);
++
++ /*
++ * If bfqq happened to be activated in a burst, but has been
++ * idle for much more than an interactive queue, then we
++ * assume that, in the overall I/O initiated in the burst, the
++ * I/O associated with bfqq is finished. So bfqq does not need
++ * to be treated as a queue belonging to a burst
++ * anymore. Accordingly, we reset bfqq's in_large_burst flag
++ * if set, and remove bfqq from the burst list if it's
++ * there. We do not decrement burst_size, because the fact
++ * that bfqq does not need to belong to the burst list any
++ * more does not invalidate the fact that bfqq was created in
++ * a burst.
++ */
++ if (likely(!bfq_bfqq_just_created(bfqq)) &&
++ idle_for_long_time &&
++ time_is_before_jiffies(
++ bfqq->budget_timeout +
++ msecs_to_jiffies(10000))) {
++ hlist_del_init(&bfqq->burst_list_node);
++ bfq_clear_bfqq_in_large_burst(bfqq);
++ }
++
++ bfq_clear_bfqq_just_created(bfqq);
++
++ if (!bfq_bfqq_IO_bound(bfqq)) {
++ if (arrived_in_time) {
++ bfqq->requests_within_timer++;
++ if (bfqq->requests_within_timer >=
++ bfqd->bfq_requests_within_timer)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ } else
++ bfqq->requests_within_timer = 0;
++ bfq_log_bfqq(bfqd, bfqq, "requests in time %d",
++ bfqq->requests_within_timer);
++ }
++
++ if (bfqd->low_latency) {
++ if (unlikely(time_is_after_jiffies(bfqq->split_time)))
++ /* wraparound */
++ bfqq->split_time =
++ jiffies - bfqd->bfq_wr_min_idle_time - 1;
++
++ if (time_is_before_jiffies(bfqq->split_time +
++ bfqd->bfq_wr_min_idle_time)) {
++ bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
++ old_wr_coeff,
++ wr_or_deserves_wr,
++ *interactive,
++ in_burst,
++ soft_rt);
++
++ if (old_wr_coeff != bfqq->wr_coeff)
++ bfqq->entity.prio_changed = 1;
++ }
++ }
++
++ bfqq->last_idle_bklogged = jiffies;
++ bfqq->service_from_backlogged = 0;
++ bfq_clear_bfqq_softrt_update(bfqq);
++
++ bfq_add_bfqq_busy(bfqd, bfqq);
++
++ /*
++ * Expire in-service queue only if preemption may be needed
++ * for guarantees. In this respect, the function
++ * next_queue_may_preempt just checks a simple, necessary
++ * condition, and not a sufficient condition based on
++ * timestamps. In fact, for the latter condition to be
++ * evaluated, timestamps would need first to be updated, and
++ * this operation is quite costly (see the comments on the
++ * function bfq_bfqq_update_budg_for_activation).
++ */
++ if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
++ bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
++ next_queue_may_preempt(bfqd)) {
++ struct bfq_queue *in_serv =
++ bfqd->in_service_queue;
++ BUG_ON(in_serv == bfqq);
++
++ bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
++ false, BFQ_BFQQ_PREEMPTED);
++ }
++}
++
++static void bfq_add_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *next_rq, *prev;
++ unsigned int old_wr_coeff = bfqq->wr_coeff;
++ bool interactive = false;
++
++ bfq_log_bfqq(bfqd, bfqq, "add_request: size %u %s",
++ blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A");
++
++ if (bfqq->wr_coeff > 1) /* queue is being weight-raised */
++ bfq_log_bfqq(bfqd, bfqq,
++ "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqq->wr_coeff,
++ bfqq->entity.weight, bfqq->entity.orig_weight);
++
++ bfqq->queued[rq_is_sync(rq)]++;
++ bfqd->queued++;
++
++ elv_rb_add(&bfqq->sort_list, rq);
++
++ /*
++ * Check if this request is a better next-to-serve candidate.
++ */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
++ BUG_ON(!next_rq);
++ bfqq->next_rq = next_rq;
++
++ /*
++ * Adjust priority tree position, if next_rq changes.
++ */
++ if (prev != bfqq->next_rq)
++ bfq_pos_tree_add_move(bfqd, bfqq);
++
++ if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
++ bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
++ rq, &interactive);
++ else {
++ if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
++ time_is_before_jiffies(
++ bfqq->last_wr_start_finish +
++ bfqd->bfq_wr_min_inter_arr_async)) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "non-idle wrais starting, "
++ "wr_max_time %u wr_busy %d",
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqd->wr_busy_queues);
++ }
++ if (prev != bfqq->next_rq)
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ /*
++ * Assign jiffies to last_wr_start_finish in the following
++ * cases:
++ *
++ * . if bfqq is not going to be weight-raised, because, for
++ * non weight-raised queues, last_wr_start_finish stores the
++ * arrival time of the last request; as of now, this piece
++ * of information is used only for deciding whether to
++ * weight-raise async queues
++ *
++ * . if bfqq is not weight-raised, because, if bfqq is now
++ * switching to weight-raised, then last_wr_start_finish
++ * stores the time when weight-raising starts
++ *
++ * . if bfqq is interactive, because, regardless of whether
++ * bfqq is currently weight-raised, the weight-raising
++ * period must start or restart (this case is considered
++ * separately because it is not detected by the above
++ * conditions, if bfqq is already weight-raised)
++ *
++ * last_wr_start_finish has to be updated also if bfqq is soft
++ * real-time, because the weight-raising period is constantly
++ * restarted on idle-to-busy transitions for these queues, but
++ * this is already done in bfq_bfqq_handle_idle_busy_switch if
++ * needed.
++ */
++ if (bfqd->low_latency &&
++ (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
++ bfqq->last_wr_start_finish = jiffies;
++}
++
++static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
++ struct bio *bio)
++{
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (!bic)
++ return NULL;
++
++ bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
++ if (bfqq)
++ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
++
++ return NULL;
++}
++
++static sector_t get_sdist(sector_t last_pos, struct request *rq)
++{
++ sector_t sdist = 0;
++
++ if (last_pos) {
++ if (last_pos < blk_rq_pos(rq))
++ sdist = blk_rq_pos(rq) - last_pos;
++ else
++ sdist = last_pos - blk_rq_pos(rq);
++ }
++
++ return sdist;
++}
++
++static void bfq_activate_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ bfqd->rq_in_driver++;
++}
++
++static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ BUG_ON(bfqd->rq_in_driver == 0);
++ bfqd->rq_in_driver--;
++}
++
++static void bfq_remove_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ const int sync = rq_is_sync(rq);
++
++ BUG_ON(bfqq->entity.service > bfqq->entity.budget &&
++ bfqq == bfqd->in_service_queue);
++
++ if (bfqq->next_rq == rq) {
++ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ if (rq->queuelist.prev != &rq->queuelist)
++ list_del_init(&rq->queuelist);
++ BUG_ON(bfqq->queued[sync] == 0);
++ bfqq->queued[sync]--;
++ bfqd->queued--;
++ elv_rb_del(&bfqq->sort_list, rq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ bfqq->next_rq = NULL;
++
++ BUG_ON(bfqq->entity.budget < 0);
++
++ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
++ BUG_ON(bfqq->ref < 2); /* referred by rq and on tree */
++ bfq_del_bfqq_busy(bfqd, bfqq, false);
++ /*
++ * bfqq emptied. In normal operation, when
++ * bfqq is empty, bfqq->entity.service and
++ * bfqq->entity.budget must contain,
++ * respectively, the service received and the
++ * budget used last time bfqq emptied. These
++ * facts do not hold in this case, as at least
++ * this last removal occurred while bfqq is
++ * not in service. To avoid inconsistencies,
++ * reset both bfqq->entity.service and
++ * bfqq->entity.budget, if bfqq has still a
++ * process that may issue I/O requests to it.
++ */
++ bfqq->entity.budget = bfqq->entity.service = 0;
++ }
++
++ /*
++ * Remove queue from request-position tree as it is empty.
++ */
++ if (bfqq->pos_root) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++ }
++
++ if (rq->cmd_flags & REQ_META) {
++ BUG_ON(bfqq->meta_pending == 0);
++ bfqq->meta_pending--;
++ }
++ bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
++}
++
++static enum elv_merge bfq_merge(struct request_queue *q, struct request **req,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct request *__rq;
++
++ __rq = bfq_find_rq_fmerge(bfqd, bio);
++ if (__rq && elv_bio_merge_ok(__rq, bio)) {
++ *req = __rq;
++ return ELEVATOR_FRONT_MERGE;
++ }
++
++ return ELEVATOR_NO_MERGE;
++}
++
++static void bfq_merged_request(struct request_queue *q, struct request *req,
++ enum elv_merge type)
++{
++ if (type == ELEVATOR_FRONT_MERGE &&
++ rb_prev(&req->rb_node) &&
++ blk_rq_pos(req) <
++ blk_rq_pos(container_of(rb_prev(&req->rb_node),
++ struct request, rb_node))) {
++ struct bfq_queue *bfqq = RQ_BFQQ(req);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *prev, *next_rq;
++
++ /* Reposition request in its sort_list */
++ elv_rb_del(&bfqq->sort_list, req);
++ elv_rb_add(&bfqq->sort_list, req);
++ /* Choose next request to be served for bfqq */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
++ bfqd->last_position);
++ BUG_ON(!next_rq);
++ bfqq->next_rq = next_rq;
++ /*
++ * If next_rq changes, update both the queue's budget to
++ * fit the new request and the queue's position in its
++ * rq_pos_tree.
++ */
++ if (prev != bfqq->next_rq) {
++ bfq_updated_next_req(bfqd, bfqq);
++ bfq_pos_tree_add_move(bfqd, bfqq);
++ }
++ }
++}
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static void bfq_bio_merged(struct request_queue *q, struct request *req,
++ struct bio *bio)
++{
++ bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_opf);
++}
++#endif
++
++static void bfq_merged_requests(struct request_queue *q, struct request *rq,
++ struct request *next)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
++
++ /*
++ * If next and rq belong to the same bfq_queue and next is older
++ * than rq, then reposition rq in the fifo (by substituting next
++ * with rq). Otherwise, if next and rq belong to different
++ * bfq_queues, never reposition rq: in fact, we would have to
++ * reposition it with respect to next's position in its own fifo,
++ * which would most certainly be too expensive with respect to
++ * the benefits.
++ */
++ if (bfqq == next_bfqq &&
++ !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
++ next->fifo_time < rq->fifo_time) {
++ list_del_init(&rq->queuelist);
++ list_replace_init(&next->queuelist, &rq->queuelist);
++ rq->fifo_time = next->fifo_time;
++ }
++
++ if (bfqq->next_rq == next)
++ bfqq->next_rq = rq;
++
++ bfq_remove_request(next);
++ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
++}
++
++/* Must be called with bfqq != NULL */
++static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
++{
++ BUG_ON(!bfqq);
++
++ if (bfq_bfqq_busy(bfqq)) {
++ bfqq->bfqd->wr_busy_queues--;
++ BUG_ON(bfqq->bfqd->wr_busy_queues < 0);
++ }
++ bfqq->wr_coeff = 1;
++ bfqq->wr_cur_max_time = 0;
++ bfqq->last_wr_start_finish = jiffies;
++ /*
++ * Trigger a weight change on the next invocation of
++ * __bfq_entity_update_weight_prio.
++ */
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "end_wr: wrais ending at %lu, rais_max_time %u",
++ bfqq->last_wr_start_finish,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "end_wr: wr_busy %d",
++ bfqq->bfqd->wr_busy_queues);
++}
++
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ if (bfqg->async_bfqq[i][j])
++ bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
++ if (bfqg->async_idle_bfqq)
++ bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
++}
++
++static void bfq_end_wr(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ bfq_end_wr_async(bfqd);
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++}
++
++static sector_t bfq_io_struct_pos(void *io_struct, bool request)
++{
++ if (request)
++ return blk_rq_pos(io_struct);
++ else
++ return ((struct bio *)io_struct)->bi_iter.bi_sector;
++}
++
++static int bfq_rq_close_to_sector(void *io_struct, bool request,
++ sector_t sector)
++{
++ return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
++ BFQQ_CLOSE_THR;
++}
++
++static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ sector_t sector)
++{
++ struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
++ struct rb_node *parent, *node;
++ struct bfq_queue *__bfqq;
++
++ if (RB_EMPTY_ROOT(root))
++ return NULL;
++
++ /*
++ * First, if we find a request starting at the end of the last
++ * request, choose it.
++ */
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
++ if (__bfqq)
++ return __bfqq;
++
++ /*
++ * If the exact sector wasn't found, the parent of the NULL leaf
++ * will contain the closest sector (rq_pos_tree sorted by
++ * next_request position).
++ */
++ __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ if (blk_rq_pos(__bfqq->next_rq) < sector)
++ node = rb_next(&__bfqq->pos_node);
++ else
++ node = rb_prev(&__bfqq->pos_node);
++ if (!node)
++ return NULL;
++
++ __bfqq = rb_entry(node, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ return NULL;
++}
++
++static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
++ struct bfq_queue *cur_bfqq,
++ sector_t sector)
++{
++ struct bfq_queue *bfqq;
++
++ /*
++ * We shall notice if some of the queues are cooperating,
++ * e.g., working closely on the same area of the device. In
++ * that case, we can group them together and: 1) don't waste
++ * time idling, and 2) serve the union of their requests in
++ * the best possible order for throughput.
++ */
++ bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
++ if (!bfqq || bfqq == cur_bfqq)
++ return NULL;
++
++ return bfqq;
++}
++
++static struct bfq_queue *
++bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ int process_refs, new_process_refs;
++ struct bfq_queue *__bfqq;
++
++ /*
++ * If there are no process references on the new_bfqq, then it is
++ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
++ * may have dropped their last reference (not just their last process
++ * reference).
++ */
++ if (!bfqq_process_refs(new_bfqq))
++ return NULL;
++
++ /* Avoid a circular list and skip interim queue merges. */
++ while ((__bfqq = new_bfqq->new_bfqq)) {
++ if (__bfqq == bfqq)
++ return NULL;
++ new_bfqq = __bfqq;
++ }
++
++ process_refs = bfqq_process_refs(bfqq);
++ new_process_refs = bfqq_process_refs(new_bfqq);
++ /*
++ * If the process for the bfqq has gone away, there is no
++ * sense in merging the queues.
++ */
++ if (process_refs == 0 || new_process_refs == 0)
++ return NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
++ new_bfqq->pid);
++
++ /*
++ * Merging is just a redirection: the requests of the process
++ * owning one of the two queues are redirected to the other queue.
++ * The latter queue, in its turn, is set as shared if this is the
++ * first time that the requests of some process are redirected to
++ * it.
++ *
++ * We redirect bfqq to new_bfqq and not the opposite, because we
++ * are in the context of the process owning bfqq, hence we have
++ * the io_cq of this process. So we can immediately configure this
++ * io_cq to redirect the requests of the process to new_bfqq.
++ *
++ * NOTE, even if new_bfqq coincides with the in-service queue, the
++ * io_cq of new_bfqq is not available, because, if the in-service
++ * queue is shared, bfqd->in_service_bic may not point to the
++ * io_cq of the in-service queue.
++ * Redirecting the requests of the process owning bfqq to the
++ * currently in-service queue is in any case the best option, as
++ * we feed the in-service queue with new requests close to the
++ * last request served and, by doing so, hopefully increase the
++ * throughput.
++ */
++ bfqq->new_bfqq = new_bfqq;
++ new_bfqq->ref += process_refs;
++ return new_bfqq;
++}
++
++static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
++ struct bfq_queue *new_bfqq)
++{
++ if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
++ (bfqq->ioprio_class != new_bfqq->ioprio_class))
++ return false;
++
++ /*
++ * If either of the queues has already been detected as seeky,
++ * then merging it with the other queue is unlikely to lead to
++ * sequential I/O.
++ */
++ if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
++ return false;
++
++ /*
++ * Interleaved I/O is known to be done by (some) applications
++ * only for reads, so it does not make sense to merge async
++ * queues.
++ */
++ if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
++ return false;
++
++ return true;
++}
++
++/*
++ * If this function returns true, then bfqq cannot be merged. The idea
++ * is that true cooperation happens very early after processes start
++ * to do I/O. Usually, late cooperations are just accidental false
++ * positives. In case bfqq is weight-raised, such false positives
++ * would evidently degrade latency guarantees for bfqq.
++ */
++static bool wr_from_too_long(struct bfq_queue *bfqq)
++{
++ return bfqq->wr_coeff > 1 &&
++ time_is_before_jiffies(bfqq->last_wr_start_finish +
++ msecs_to_jiffies(100));
++}
++
++/*
++ * Attempt to schedule a merge of bfqq with the currently in-service
++ * queue or with a close queue among the scheduled queues. Return
++ * NULL if no merge was scheduled, a pointer to the shared bfq_queue
++ * structure otherwise.
++ *
++ * The OOM queue is not allowed to participate to cooperation: in fact, since
++ * the requests temporarily redirected to the OOM queue could be redirected
++ * again to dedicated queues at any time, the state needed to correctly
++ * handle merging with the OOM queue would be quite complex and expensive
++ * to maintain. Besides, in such a critical condition as an out of memory,
++ * the benefits of queue merging may be little relevant, or even negligible.
++ *
++ * Weight-raised queues can be merged only if their weight-raising
++ * period has just started. In fact cooperating processes are usually
++ * started together. Thus, with this filter we avoid false positives
++ * that would jeopardize low-latency guarantees.
++ *
++ * WARNING: queue merging may impair fairness among non-weight raised
++ * queues, for at least two reasons: 1) the original weight of a
++ * merged queue may change during the merged state, 2) even being the
++ * weight the same, a merged queue may be bloated with many more
++ * requests than the ones produced by its originally-associated
++ * process.
++ */
++static struct bfq_queue *
++bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ void *io_struct, bool request)
++{
++ struct bfq_queue *in_service_bfqq, *new_bfqq;
++
++ if (bfqq->new_bfqq)
++ return bfqq->new_bfqq;
++
++ if (io_struct && wr_from_too_long(bfqq) &&
++ likely(bfqq != &bfqd->oom_bfqq))
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have looked for coop, but bfq%d wr",
++ bfqq->pid);
++
++ if (!io_struct ||
++ wr_from_too_long(bfqq) ||
++ unlikely(bfqq == &bfqd->oom_bfqq))
++ return NULL;
++
++ /* If there is only one backlogged queue, don't search. */
++ if (bfqd->busy_queues == 1)
++ return NULL;
++
++ in_service_bfqq = bfqd->in_service_queue;
++
++ if (in_service_bfqq && in_service_bfqq != bfqq &&
++ bfqd->in_service_bic && wr_from_too_long(in_service_bfqq)
++ && likely(in_service_bfqq == &bfqd->oom_bfqq))
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have tried merge with in-service-queue, but wr");
++
++ if (!in_service_bfqq || in_service_bfqq == bfqq ||
++ !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) ||
++ unlikely(in_service_bfqq == &bfqd->oom_bfqq))
++ goto check_scheduled;
++
++ if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++ bfqq->entity.parent == in_service_bfqq->entity.parent &&
++ bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
++ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
++ if (new_bfqq)
++ return new_bfqq;
++ }
++ /*
++ * Check whether there is a cooperator among currently scheduled
++ * queues. The only thing we need is that the bio/request is not
++ * NULL, as we need it to establish whether a cooperator exists.
++ */
++check_scheduled:
++ new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
++ bfq_io_struct_pos(io_struct, request));
++
++ BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent);
++
++ if (new_bfqq && wr_from_too_long(new_bfqq) &&
++ likely(new_bfqq != &bfqd->oom_bfqq) &&
++ bfq_may_be_close_cooperator(bfqq, new_bfqq))
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have merged with bfq%d, but wr",
++ new_bfqq->pid);
++
++ if (new_bfqq && !wr_from_too_long(new_bfqq) &&
++ likely(new_bfqq != &bfqd->oom_bfqq) &&
++ bfq_may_be_close_cooperator(bfqq, new_bfqq))
++ return bfq_setup_merge(bfqq, new_bfqq);
++
++ return NULL;
++}
++
++static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
++{
++ struct bfq_io_cq *bic = bfqq->bic;
++
++ /*
++ * If !bfqq->bic, the queue is already shared or its requests
++ * have already been redirected to a shared queue; both idle window
++ * and weight raising state have already been saved. Do nothing.
++ */
++ if (!bic)
++ return;
++
++ bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
++ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
++ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
++ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
++ bic->saved_wr_coeff = bfqq->wr_coeff;
++ bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
++ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
++ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++}
++
++static void bfq_get_bic_reference(struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq->bic has a non-NULL value, the bic to which it belongs
++ * is about to begin using a shared bfq_queue.
++ */
++ if (bfqq->bic)
++ atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
++}
++
++static void
++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
++ (unsigned long) new_bfqq->pid);
++ /* Save weight raising and idle window of the merged queues */
++ bfq_bfqq_save_state(bfqq);
++ bfq_bfqq_save_state(new_bfqq);
++ if (bfq_bfqq_IO_bound(bfqq))
++ bfq_mark_bfqq_IO_bound(new_bfqq);
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ /*
++ * If bfqq is weight-raised, then let new_bfqq inherit
++ * weight-raising. To reduce false positives, neglect the case
++ * where bfqq has just been created, but has not yet made it
++ * to be weight-raised (which may happen because EQM may merge
++ * bfqq even before bfq_add_request is executed for the first
++ * time for bfqq). Handling this case would however be very
++ * easy, thanks to the flag just_created.
++ */
++ if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
++ new_bfqq->wr_coeff = bfqq->wr_coeff;
++ new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
++ new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
++ new_bfqq->wr_start_at_switch_to_srt =
++ bfqq->wr_start_at_switch_to_srt;
++ if (bfq_bfqq_busy(new_bfqq)) {
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ }
++
++ new_bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqd, new_bfqq,
++ "wr start after merge with %d, rais_max_time %u",
++ bfqq->pid,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
++ bfqq->wr_coeff = 1;
++ bfqq->entity.prio_changed = 1;
++ if (bfq_bfqq_busy(bfqq)) {
++ bfqd->wr_busy_queues--;
++ BUG_ON(bfqd->wr_busy_queues < 0);
++ }
++
++ }
++
++ bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
++ bfqd->wr_busy_queues);
++
++ /*
++ * Grab a reference to the bic, to prevent it from being destroyed
++ * before being possibly touched by a bfq_split_bfqq().
++ */
++ bfq_get_bic_reference(bfqq);
++ bfq_get_bic_reference(new_bfqq);
++ /*
++ * Merge queues (that is, let bic redirect its requests to new_bfqq)
++ */
++ bic_set_bfqq(bic, new_bfqq, 1);
++ bfq_mark_bfqq_coop(new_bfqq);
++ /*
++ * new_bfqq now belongs to at least two bics (it is a shared queue):
++ * set new_bfqq->bic to NULL. bfqq either:
++ * - does not belong to any bic any more, and hence bfqq->bic must
++ * be set to NULL, or
++ * - is a queue whose owning bics have already been redirected to a
++ * different queue, hence the queue is destined to not belong to
++ * any bic soon and bfqq->bic is already NULL (therefore the next
++ * assignment causes no harm).
++ */
++ new_bfqq->bic = NULL;
++ bfqq->bic = NULL;
++ /* release process reference to bfqq */
++ bfq_put_queue(bfqq);
++}
++
++static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ bool is_sync = op_is_sync(bio->bi_opf);
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq, *new_bfqq;
++
++ /*
++ * Disallow merge of a sync bio into an async request.
++ */
++ if (is_sync && !rq_is_sync(rq))
++ return false;
++
++ /*
++ * Lookup the bfqq that this bio will be queued with. Allow
++ * merge only if rq is queued there.
++ * Queue lock is held here.
++ */
++ bic = bfq_bic_lookup(bfqd, current->io_context);
++ if (!bic)
++ return false;
++
++ bfqq = bic_to_bfqq(bic, is_sync);
++ /*
++ * We take advantage of this function to perform an early merge
++ * of the queues of possible cooperating processes.
++ */
++ if (bfqq) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
++ if (new_bfqq) {
++ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
++ /*
++ * If we get here, the bio will be queued in the
++ * shared queue, i.e., new_bfqq, so use new_bfqq
++ * to decide whether bio and rq can be merged.
++ */
++ bfqq = new_bfqq;
++ }
++ }
++
++ return bfqq == RQ_BFQQ(rq);
++}
++
++static int bfq_allow_rq_merge(struct request_queue *q, struct request *rq,
++ struct request *next)
++{
++ return RQ_BFQQ(rq) == RQ_BFQQ(next);
++}
++
++/*
++ * Set the maximum time for the in-service queue to consume its
++ * budget. This prevents seeky processes from lowering the throughput.
++ * In practice, a time-slice service scheme is used with seeky
++ * processes.
++ */
++static void bfq_set_budget_timeout(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ unsigned int timeout_coeff;
++
++ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
++ timeout_coeff = 1;
++ else
++ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
++
++ bfqd->last_budget_start = ktime_get();
++
++ bfqq->budget_timeout = jiffies +
++ bfqd->bfq_timeout * timeout_coeff;
++
++ bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++ jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff));
++}
++
++static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ if (bfqq) {
++ bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
++ bfq_mark_bfqq_must_alloc(bfqq);
++ bfq_clear_bfqq_fifo_expire(bfqq);
++
++ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
++
++ BUG_ON(bfqq == bfqd->in_service_queue);
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
++ bfqq->wr_coeff > 1 &&
++ bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
++ time_is_before_jiffies(bfqq->budget_timeout)) {
++ /*
++ * For soft real-time queues, move the start
++ * of the weight-raising period forward by the
++ * time the queue has not received any
++ * service. Otherwise, a relatively long
++ * service delay is likely to cause the
++ * weight-raising period of the queue to end,
++ * because of the short duration of the
++ * weight-raising period of a soft real-time
++ * queue. It is worth noting that this move
++ * is not so dangerous for the other queues,
++ * because soft real-time queues are not
++ * greedy.
++ *
++ * To not add a further variable, we use the
++ * overloaded field budget_timeout to
++ * determine for how long the queue has not
++ * received service, i.e., how much time has
++ * elapsed since the queue expired. However,
++ * this is a little imprecise, because
++ * budget_timeout is set to jiffies if bfqq
++ * not only expires, but also remains with no
++ * request.
++ */
++ if (time_after(bfqq->budget_timeout,
++ bfqq->last_wr_start_finish))
++ bfqq->last_wr_start_finish +=
++ jiffies - bfqq->budget_timeout;
++ else
++ bfqq->last_wr_start_finish = jiffies;
++
++ if (time_is_after_jiffies(bfqq->last_wr_start_finish)) {
++ pr_crit(
++ "BFQ WARNING:last %lu budget %lu jiffies %lu",
++ bfqq->last_wr_start_finish,
++ bfqq->budget_timeout,
++ jiffies);
++ pr_crit("diff %lu", jiffies -
++ max_t(unsigned long,
++ bfqq->last_wr_start_finish,
++ bfqq->budget_timeout));
++ bfqq->last_wr_start_finish = jiffies;
++ }
++ }
++
++ bfq_set_budget_timeout(bfqd, bfqq);
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_in_service_queue, cur-budget = %d",
++ bfqq->entity.budget);
++ } else
++ bfq_log(bfqd, "set_in_service_queue: NULL");
++
++ bfqd->in_service_queue = bfqq;
++}
++
++/*
++ * Get and set a new queue for service.
++ */
++static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
++
++ __bfq_set_in_service_queue(bfqd, bfqq);
++ return bfqq;
++}
++
++static void bfq_arm_slice_timer(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfqd->in_service_queue;
++ struct bfq_io_cq *bic;
++ u32 sl;
++
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ /* Processes have exited, don't wait. */
++ bic = bfqd->in_service_bic;
++ if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0)
++ return;
++
++ bfq_mark_bfqq_wait_request(bfqq);
++
++ /*
++ * We don't want to idle for seeks, but we do want to allow
++ * fair distribution of slice time for a process doing back-to-back
++ * seeks. So allow a little bit of time for him to submit a new rq.
++ *
++ * To prevent processes with (partly) seeky workloads from
++ * being too ill-treated, grant them a small fraction of the
++ * assigned budget before reducing the waiting time to
++ * BFQ_MIN_TT. This happened to help reduce latency.
++ */
++ sl = bfqd->bfq_slice_idle;
++ /*
++ * Unless the queue is being weight-raised or the scenario is
++ * asymmetric, grant only minimum idle time if the queue
++ * is seeky. A long idling is preserved for a weight-raised
++ * queue, or, more in general, in an asymemtric scenario,
++ * because a long idling is needed for guaranteeing to a queue
++ * its reserved share of the throughput (in particular, it is
++ * needed if the queue has a higher weight than some other
++ * queue).
++ */
++ if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
++ bfq_symmetric_scenario(bfqd))
++ sl = min_t(u32, sl, BFQ_MIN_TT);
++
++ bfqd->last_idling_start = ktime_get();
++ hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
++ HRTIMER_MODE_REL);
++ bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
++ bfq_log(bfqd, "arm idle: %ld/%ld ms",
++ sl / NSEC_PER_MSEC, bfqd->bfq_slice_idle / NSEC_PER_MSEC);
++}
++
++/*
++ * In autotuning mode, max_budget is dynamically recomputed as the
++ * amount of sectors transferred in timeout at the estimated peak
++ * rate. This enables BFQ to utilize a full timeslice with a full
++ * budget, even if the in-service queue is served at peak rate. And
++ * this maximises throughput with sequential workloads.
++ */
++static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
++{
++ return (u64)bfqd->peak_rate * USEC_PER_MSEC *
++ jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
++}
++
++/*
++ * Update parameters related to throughput and responsiveness, as a
++ * function of the estimated peak rate. See comments on
++ * bfq_calc_max_budget(), and on T_slow and T_fast arrays.
++ */
++static void update_thr_responsiveness_params(struct bfq_data *bfqd)
++{
++ int dev_type = blk_queue_nonrot(bfqd->queue);
++
++ if (bfqd->bfq_user_max_budget == 0) {
++ bfqd->bfq_max_budget =
++ bfq_calc_max_budget(bfqd);
++ BUG_ON(bfqd->bfq_max_budget < 0);
++ bfq_log(bfqd, "new max_budget = %d",
++ bfqd->bfq_max_budget);
++ }
++
++ if (bfqd->device_speed == BFQ_BFQD_FAST &&
++ bfqd->peak_rate < device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_SLOW;
++ bfqd->RT_prod = R_slow[dev_type] *
++ T_slow[dev_type];
++ } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
++ bfqd->peak_rate > device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_FAST;
++ bfqd->RT_prod = R_fast[dev_type] *
++ T_fast[dev_type];
++ }
++
++ bfq_log(bfqd,
++"dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec",
++ dev_type == 0 ? "ROT" : "NONROT",
++ bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW",
++ bfqd->device_speed == BFQ_BFQD_FAST ?
++ (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT :
++ (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT,
++ (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>>
++ BFQ_RATE_SHIFT);
++}
++
++static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq)
++{
++ if (rq != NULL) { /* new rq dispatch now, reset accordingly */
++ bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns() ;
++ bfqd->peak_rate_samples = 1;
++ bfqd->sequential_samples = 0;
++ bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
++ blk_rq_sectors(rq);
++ } else /* no new rq dispatched, just reset the number of samples */
++ bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
++
++ bfq_log(bfqd,
++ "reset_rate_computation at end, sample %u/%u tot_sects %llu",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ bfqd->tot_sectors_dispatched);
++}
++
++static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
++{
++ u32 rate, weight, divisor;
++
++ /*
++ * For the convergence property to hold (see comments on
++ * bfq_update_peak_rate()) and for the assessment to be
++ * reliable, a minimum number of samples must be present, and
++ * a minimum amount of time must have elapsed. If not so, do
++ * not compute new rate. Just reset parameters, to get ready
++ * for a new evaluation attempt.
++ */
++ if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
++ bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) {
++ bfq_log(bfqd,
++ "update_rate_reset: only resetting, delta_first %lluus samples %d",
++ bfqd->delta_from_first>>10, bfqd->peak_rate_samples);
++ goto reset_computation;
++ }
++
++ /*
++ * If a new request completion has occurred after last
++ * dispatch, then, to approximate the rate at which requests
++ * have been served by the device, it is more precise to
++ * extend the observation interval to the last completion.
++ */
++ bfqd->delta_from_first =
++ max_t(u64, bfqd->delta_from_first,
++ bfqd->last_completion - bfqd->first_dispatch);
++
++ BUG_ON(bfqd->delta_from_first == 0);
++ /*
++ * Rate computed in sects/usec, and not sects/nsec, for
++ * precision issues.
++ */
++ rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
++ div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
++
++ bfq_log(bfqd,
++"update_rate_reset: tot_sects %llu delta_first %lluus rate %llu sects/s (%d)",
++ bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10,
++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
++ rate > 20<<BFQ_RATE_SHIFT);
++
++ /*
++ * Peak rate not updated if:
++ * - the percentage of sequential dispatches is below 3/4 of the
++ * total, and rate is below the current estimated peak rate
++ * - rate is unreasonably high (> 20M sectors/sec)
++ */
++ if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
++ rate <= bfqd->peak_rate) ||
++ rate > 20<<BFQ_RATE_SHIFT) {
++ bfq_log(bfqd,
++ "update_rate_reset: goto reset, samples %u/%u rate/peak %llu/%llu",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
++ goto reset_computation;
++ } else {
++ bfq_log(bfqd,
++ "update_rate_reset: do update, samples %u/%u rate/peak %llu/%llu",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
++ }
++
++ /*
++ * We have to update the peak rate, at last! To this purpose,
++ * we use a low-pass filter. We compute the smoothing constant
++ * of the filter as a function of the 'weight' of the new
++ * measured rate.
++ *
++ * As can be seen in next formulas, we define this weight as a
++ * quantity proportional to how sequential the workload is,
++ * and to how long the observation time interval is.
++ *
++ * The weight runs from 0 to 8. The maximum value of the
++ * weight, 8, yields the minimum value for the smoothing
++ * constant. At this minimum value for the smoothing constant,
++ * the measured rate contributes for half of the next value of
++ * the estimated peak rate.
++ *
++ * So, the first step is to compute the weight as a function
++ * of how sequential the workload is. Note that the weight
++ * cannot reach 9, because bfqd->sequential_samples cannot
++ * become equal to bfqd->peak_rate_samples, which, in its
++ * turn, holds true because bfqd->sequential_samples is not
++ * incremented for the first sample.
++ */
++ weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
++
++ /*
++ * Second step: further refine the weight as a function of the
++ * duration of the observation interval.
++ */
++ weight = min_t(u32, 8,
++ div_u64(weight * bfqd->delta_from_first,
++ BFQ_RATE_REF_INTERVAL));
++
++ /*
++ * Divisor ranging from 10, for minimum weight, to 2, for
++ * maximum weight.
++ */
++ divisor = 10 - weight;
++ BUG_ON(divisor == 0);
++
++ /*
++ * Finally, update peak rate:
++ *
++ * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
++ */
++ bfqd->peak_rate *= divisor-1;
++ bfqd->peak_rate /= divisor;
++ rate /= divisor; /* smoothing constant alpha = 1/divisor */
++
++ bfq_log(bfqd,
++ "update_rate_reset: divisor %d tmp_peak_rate %llu tmp_rate %u",
++ divisor,
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT),
++ (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT));
++
++ BUG_ON(bfqd->peak_rate == 0);
++ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
++
++ bfqd->peak_rate += rate;
++ update_thr_responsiveness_params(bfqd);
++ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
++
++reset_computation:
++ bfq_reset_rate_computation(bfqd, rq);
++}
++
++/*
++ * Update the read/write peak rate (the main quantity used for
++ * auto-tuning, see update_thr_responsiveness_params()).
++ *
++ * It is not trivial to estimate the peak rate (correctly): because of
++ * the presence of sw and hw queues between the scheduler and the
++ * device components that finally serve I/O requests, it is hard to
++ * say exactly when a given dispatched request is served inside the
++ * device, and for how long. As a consequence, it is hard to know
++ * precisely at what rate a given set of requests is actually served
++ * by the device.
++ *
++ * On the opposite end, the dispatch time of any request is trivially
++ * available, and, from this piece of information, the "dispatch rate"
++ * of requests can be immediately computed. So, the idea in the next
++ * function is to use what is known, namely request dispatch times
++ * (plus, when useful, request completion times), to estimate what is
++ * unknown, namely in-device request service rate.
++ *
++ * The main issue is that, because of the above facts, the rate at
++ * which a certain set of requests is dispatched over a certain time
++ * interval can vary greatly with respect to the rate at which the
++ * same requests are then served. But, since the size of any
++ * intermediate queue is limited, and the service scheme is lossless
++ * (no request is silently dropped), the following obvious convergence
++ * property holds: the number of requests dispatched MUST become
++ * closer and closer to the number of requests completed as the
++ * observation interval grows. This is the key property used in
++ * the next function to estimate the peak service rate as a function
++ * of the observed dispatch rate. The function assumes to be invoked
++ * on every request dispatch.
++ */
++static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
++{
++ u64 now_ns = ktime_get_ns();
++
++ if (bfqd->peak_rate_samples == 0) { /* first dispatch */
++ bfq_log(bfqd,
++ "update_peak_rate: goto reset, samples %d",
++ bfqd->peak_rate_samples) ;
++ bfq_reset_rate_computation(bfqd, rq);
++ goto update_last_values; /* will add one sample */
++ }
++
++ /*
++ * Device idle for very long: the observation interval lasting
++ * up to this dispatch cannot be a valid observation interval
++ * for computing a new peak rate (similarly to the late-
++ * completion event in bfq_completed_request()). Go to
++ * update_rate_and_reset to have the following three steps
++ * taken:
++ * - close the observation interval at the last (previous)
++ * request dispatch or completion
++ * - compute rate, if possible, for that observation interval
++ * - start a new observation interval with this dispatch
++ */
++ if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
++ bfqd->rq_in_driver == 0) {
++ bfq_log(bfqd,
++"update_peak_rate: jumping to updating&resetting delta_last %lluus samples %d",
++ (now_ns - bfqd->last_dispatch)>>10,
++ bfqd->peak_rate_samples) ;
++ goto update_rate_and_reset;
++ }
++
++ /* Update sampling information */
++ bfqd->peak_rate_samples++;
++
++ if ((bfqd->rq_in_driver > 0 ||
++ now_ns - bfqd->last_completion < BFQ_MIN_TT)
++ && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
++ bfqd->sequential_samples++;
++
++ bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
++
++ /* Reset max observed rq size every 32 dispatches */
++ if (likely(bfqd->peak_rate_samples % 32))
++ bfqd->last_rq_max_size =
++ max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
++ else
++ bfqd->last_rq_max_size = blk_rq_sectors(rq);
++
++ bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
++
++ bfq_log(bfqd,
++ "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ bfqd->tot_sectors_dispatched,
++ bfqd->delta_from_first>>10);
++
++ /* Target observation interval not yet reached, go on sampling */
++ if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
++ goto update_last_values;
++
++update_rate_and_reset:
++ bfq_update_rate_reset(bfqd, rq);
++update_last_values:
++ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
++ bfqd->last_dispatch = now_ns;
++
++ bfq_log(bfqd,
++ "update_peak_rate: delta_first %lluus last_pos %llu peak_rate %llu",
++ (now_ns - bfqd->first_dispatch)>>10,
++ (unsigned long long) bfqd->last_position,
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
++ bfq_log(bfqd,
++ "update_peak_rate: samples at end %d", bfqd->peak_rate_samples);
++}
++
++/*
++ * Move request from internal lists to the dispatch list of the request queue
++ */
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ /*
++ * For consistency, the next instruction should have been executed
++ * after removing the request from the queue and dispatching it.
++ * We execute instead this instruction before bfq_remove_request()
++ * (and hence introduce a temporary inconsistency), for efficiency.
++ * In fact, in a forced_dispatch, this prevents two counters related
++ * to bfqq->dispatched to risk to be uselessly decremented if bfqq
++ * is not in service, and then to be incremented again after
++ * incrementing bfqq->dispatched.
++ */
++ bfqq->dispatched++;
++ bfq_update_peak_rate(q->elevator->elevator_data, rq);
++
++ bfq_remove_request(rq);
++ elv_dispatch_sort(q, rq);
++}
++
++static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ /*
++ * If this bfqq is shared between multiple processes, check
++ * to make sure that those processes are still issuing I/Os
++ * within the mean seek distance. If not, it may be time to
++ * break the queues apart again.
++ */
++ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
++ bfq_mark_bfqq_split_coop(bfqq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ if (bfqq->dispatched == 0)
++ /*
++ * Overloading budget_timeout field to store
++ * the time at which the queue remains with no
++ * backlog and no outstanding request; used by
++ * the weight-raising mechanism.
++ */
++ bfqq->budget_timeout = jiffies;
++
++ bfq_del_bfqq_busy(bfqd, bfqq, true);
++ } else {
++ bfq_requeue_bfqq(bfqd, bfqq);
++ /*
++ * Resort priority tree of potential close cooperators.
++ */
++ bfq_pos_tree_add_move(bfqd, bfqq);
++ }
++
++ /*
++ * All in-service entities must have been properly deactivated
++ * or requeued before executing the next function, which
++ * resets all in-service entites as no more in service.
++ */
++ __bfq_bfqd_reset_in_service(bfqd);
++}
++
++/**
++ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
++ * @bfqd: device data.
++ * @bfqq: queue to update.
++ * @reason: reason for expiration.
++ *
++ * Handle the feedback on @bfqq budget at queue expiration.
++ * See the body for detailed comments.
++ */
++static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ enum bfqq_expiration reason)
++{
++ struct request *next_rq;
++ int budget, min_budget;
++
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ min_budget = bfq_min_budget(bfqd);
++
++ if (bfqq->wr_coeff == 1)
++ budget = bfqq->max_budget;
++ else /*
++ * Use a constant, low budget for weight-raised queues,
++ * to help achieve a low latency. Keep it slightly higher
++ * than the minimum possible budget, to cause a little
++ * bit fewer expirations.
++ */
++ budget = 2 * min_budget;
++
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
++ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
++ budget, bfq_min_budget(bfqd));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
++
++ if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
++ switch (reason) {
++ /*
++ * Caveat: in all the following cases we trade latency
++ * for throughput.
++ */
++ case BFQ_BFQQ_TOO_IDLE:
++ /*
++ * This is the only case where we may reduce
++ * the budget: if there is no request of the
++ * process still waiting for completion, then
++ * we assume (tentatively) that the timer has
++ * expired because the batch of requests of
++ * the process could have been served with a
++ * smaller budget. Hence, betting that
++ * process will behave in the same way when it
++ * becomes backlogged again, we reduce its
++ * next budget. As long as we guess right,
++ * this budget cut reduces the latency
++ * experienced by the process.
++ *
++ * However, if there are still outstanding
++ * requests, then the process may have not yet
++ * issued its next request just because it is
++ * still waiting for the completion of some of
++ * the still outstanding ones. So in this
++ * subcase we do not reduce its budget, on the
++ * contrary we increase it to possibly boost
++ * the throughput, as discussed in the
++ * comments to the BUDGET_TIMEOUT case.
++ */
++ if (bfqq->dispatched > 0) /* still outstanding reqs */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ else {
++ if (budget > 5 * min_budget)
++ budget -= 4 * min_budget;
++ else
++ budget = min_budget;
++ }
++ break;
++ case BFQ_BFQQ_BUDGET_TIMEOUT:
++ /*
++ * We double the budget here because it gives
++ * the chance to boost the throughput if this
++ * is not a seeky process (and has bumped into
++ * this timeout because of, e.g., ZBR).
++ */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_BUDGET_EXHAUSTED:
++ /*
++ * The process still has backlog, and did not
++ * let either the budget timeout or the disk
++ * idling timeout expire. Hence it is not
++ * seeky, has a short thinktime and may be
++ * happy with a higher budget too. So
++ * definitely increase the budget of this good
++ * candidate to boost the disk throughput.
++ */
++ budget = min(budget * 4, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_NO_MORE_REQUESTS:
++ /*
++ * For queues that expire for this reason, it
++ * is particularly important to keep the
++ * budget close to the actual service they
++ * need. Doing so reduces the timestamp
++ * misalignment problem described in the
++ * comments in the body of
++ * __bfq_activate_entity. In fact, suppose
++ * that a queue systematically expires for
++ * BFQ_BFQQ_NO_MORE_REQUESTS and presents a
++ * new request in time to enjoy timestamp
++ * back-shifting. The larger the budget of the
++ * queue is with respect to the service the
++ * queue actually requests in each service
++ * slot, the more times the queue can be
++ * reactivated with the same virtual finish
++ * time. It follows that, even if this finish
++ * time is pushed to the system virtual time
++ * to reduce the consequent timestamp
++ * misalignment, the queue unjustly enjoys for
++ * many re-activations a lower finish time
++ * than all newly activated queues.
++ *
++ * The service needed by bfqq is measured
++ * quite precisely by bfqq->entity.service.
++ * Since bfqq does not enjoy device idling,
++ * bfqq->entity.service is equal to the number
++ * of sectors that the process associated with
++ * bfqq requested to read/write before waiting
++ * for request completions, or blocking for
++ * other reasons.
++ */
++ budget = max_t(int, bfqq->entity.service, min_budget);
++ break;
++ default:
++ return;
++ }
++ } else if (!bfq_bfqq_sync(bfqq))
++ /*
++ * Async queues get always the maximum possible
++ * budget, as for them we do not care about latency
++ * (in addition, their ability to dispatch is limited
++ * by the charging factor).
++ */
++ budget = bfqd->bfq_max_budget;
++
++ bfqq->max_budget = budget;
++
++ if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
++ !bfqd->bfq_user_max_budget)
++ bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
++
++ /*
++ * If there is still backlog, then assign a new budget, making
++ * sure that it is large enough for the next request. Since
++ * the finish time of bfqq must be kept in sync with the
++ * budget, be sure to call __bfq_bfqq_expire() *after* this
++ * update.
++ *
++ * If there is no backlog, then no need to update the budget;
++ * it will be updated on the arrival of a new request.
++ */
++ next_rq = bfqq->next_rq;
++ if (next_rq) {
++ BUG_ON(reason == BFQ_BFQQ_TOO_IDLE ||
++ reason == BFQ_BFQQ_NO_MORE_REQUESTS);
++ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ BUG_ON(!bfq_bfqq_busy(bfqq));
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
++ next_rq ? blk_rq_sectors(next_rq) : 0,
++ bfqq->entity.budget);
++}
++
++/*
++ * Return true if the process associated with bfqq is "slow". The slow
++ * flag is used, in addition to the budget timeout, to reduce the
++ * amount of service provided to seeky processes, and thus reduce
++ * their chances to lower the throughput. More details in the comments
++ * on the function bfq_bfqq_expire().
++ *
++ * An important observation is in order: as discussed in the comments
++ * on the function bfq_update_peak_rate(), with devices with internal
++ * queues, it is hard if ever possible to know when and for how long
++ * an I/O request is processed by the device (apart from the trivial
++ * I/O pattern where a new request is dispatched only after the
++ * previous one has been completed). This makes it hard to evaluate
++ * the real rate at which the I/O requests of each bfq_queue are
++ * served. In fact, for an I/O scheduler like BFQ, serving a
++ * bfq_queue means just dispatching its requests during its service
++ * slot (i.e., until the budget of the queue is exhausted, or the
++ * queue remains idle, or, finally, a timeout fires). But, during the
++ * service slot of a bfq_queue, around 100 ms at most, the device may
++ * be even still processing requests of bfq_queues served in previous
++ * service slots. On the opposite end, the requests of the in-service
++ * bfq_queue may be completed after the service slot of the queue
++ * finishes.
++ *
++ * Anyway, unless more sophisticated solutions are used
++ * (where possible), the sum of the sizes of the requests dispatched
++ * during the service slot of a bfq_queue is probably the only
++ * approximation available for the service received by the bfq_queue
++ * during its service slot. And this sum is the quantity used in this
++ * function to evaluate the I/O speed of a process.
++ */
++static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ bool compensate, enum bfqq_expiration reason,
++ unsigned long *delta_ms)
++{
++ ktime_t delta_ktime;
++ u32 delta_usecs;
++ bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
++
++ if (!bfq_bfqq_sync(bfqq))
++ return false;
++
++ if (compensate)
++ delta_ktime = bfqd->last_idling_start;
++ else
++ delta_ktime = ktime_get();
++ delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
++ delta_usecs = ktime_to_us(delta_ktime);
++
++ /* don't use too short time intervals */
++ if (delta_usecs < 1000) {
++ if (blk_queue_nonrot(bfqd->queue))
++ /*
++ * give same worst-case guarantees as idling
++ * for seeky
++ */
++ *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
++ else /* charge at least one seek */
++ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
++
++ bfq_log(bfqd, "bfq_bfqq_is_slow: too short %u", delta_usecs);
++
++ return slow;
++ }
++
++ *delta_ms = delta_usecs / USEC_PER_MSEC;
++
++ /*
++ * Use only long (> 20ms) intervals to filter out excessive
++ * spikes in service rate estimation.
++ */
++ if (delta_usecs > 20000) {
++ /*
++ * Caveat for rotational devices: processes doing I/O
++ * in the slower disk zones tend to be slow(er) even
++ * if not seeky. In this respect, the estimated peak
++ * rate is likely to be an average over the disk
++ * surface. Accordingly, to not be too harsh with
++ * unlucky processes, a process is deemed slow only if
++ * its rate has been lower than half of the estimated
++ * peak rate.
++ */
++ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
++ bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d",
++ bfqq->entity.service, bfqd->bfq_max_budget);
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
++
++ return slow;
++}
++
++/*
++ * To be deemed as soft real-time, an application must meet two
++ * requirements. First, the application must not require an average
++ * bandwidth higher than the approximate bandwidth required to playback or
++ * record a compressed high-definition video.
++ * The next function is invoked on the completion of the last request of a
++ * batch, to compute the next-start time instant, soft_rt_next_start, such
++ * that, if the next request of the application does not arrive before
++ * soft_rt_next_start, then the above requirement on the bandwidth is met.
++ *
++ * The second requirement is that the request pattern of the application is
++ * isochronous, i.e., that, after issuing a request or a batch of requests,
++ * the application stops issuing new requests until all its pending requests
++ * have been completed. After that, the application may issue a new batch,
++ * and so on.
++ * For this reason the next function is invoked to compute
++ * soft_rt_next_start only for applications that meet this requirement,
++ * whereas soft_rt_next_start is set to infinity for applications that do
++ * not.
++ *
++ * Unfortunately, even a greedy application may happen to behave in an
++ * isochronous way if the CPU load is high. In fact, the application may
++ * stop issuing requests while the CPUs are busy serving other processes,
++ * then restart, then stop again for a while, and so on. In addition, if
++ * the disk achieves a low enough throughput with the request pattern
++ * issued by the application (e.g., because the request pattern is random
++ * and/or the device is slow), then the application may meet the above
++ * bandwidth requirement too. To prevent such a greedy application to be
++ * deemed as soft real-time, a further rule is used in the computation of
++ * soft_rt_next_start: soft_rt_next_start must be higher than the current
++ * time plus the maximum time for which the arrival of a request is waited
++ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
++ * This filters out greedy applications, as the latter issue instead their
++ * next request as soon as possible after the last one has been completed
++ * (in contrast, when a batch of requests is completed, a soft real-time
++ * application spends some time processing data).
++ *
++ * Unfortunately, the last filter may easily generate false positives if
++ * only bfqd->bfq_slice_idle is used as a reference time interval and one
++ * or both the following cases occur:
++ * 1) HZ is so low that the duration of a jiffy is comparable to or higher
++ * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
++ * HZ=100.
++ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
++ * for a while, then suddenly 'jump' by several units to recover the lost
++ * increments. This seems to happen, e.g., inside virtual machines.
++ * To address this issue, we do not use as a reference time interval just
++ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
++ * particular we add the minimum number of jiffies for which the filter
++ * seems to be quite precise also in embedded systems and KVM/QEMU virtual
++ * machines.
++ */
++static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqd, bfqq,
++"softrt_next_start: service_blkg %lu soft_rate %u sects/sec interval %u",
++ bfqq->service_from_backlogged,
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies_to_msecs(HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate));
++
++ return max(bfqq->last_idle_bklogged +
++ HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
++}
++
++/*
++ * Return the farthest future time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_greatest_from_now(void)
++{
++ return jiffies + MAX_JIFFY_OFFSET;
++}
++
++/*
++ * Return the farthest past time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_smallest_from_now(void)
++{
++ return jiffies - MAX_JIFFY_OFFSET;
++}
++
++/**
++ * bfq_bfqq_expire - expire a queue.
++ * @bfqd: device owning the queue.
++ * @bfqq: the queue to expire.
++ * @compensate: if true, compensate for the time spent idling.
++ * @reason: the reason causing the expiration.
++ *
++ * If the process associated with bfqq does slow I/O (e.g., because it
++ * issues random requests), we charge bfqq with the time it has been
++ * in service instead of the service it has received (see
++ * bfq_bfqq_charge_time for details on how this goal is achieved). As
++ * a consequence, bfqq will typically get higher timestamps upon
++ * reactivation, and hence it will be rescheduled as if it had
++ * received more service than what it has actually received. In the
++ * end, bfqq receives less service in proportion to how slowly its
++ * associated process consumes its budgets (and hence how seriously it
++ * tends to lower the throughput). In addition, this time-charging
++ * strategy guarantees time fairness among slow processes. In
++ * contrast, if the process associated with bfqq is not slow, we
++ * charge bfqq exactly with the service it has received.
++ *
++ * Charging time to the first type of queues and the exact service to
++ * the other has the effect of using the WF2Q+ policy to schedule the
++ * former on a timeslice basis, without violating service domain
++ * guarantees among the latter.
++ */
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool compensate,
++ enum bfqq_expiration reason)
++{
++ bool slow;
++ unsigned long delta = 0;
++ struct bfq_entity *entity = &bfqq->entity;
++ int ref;
++
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ /*
++ * Check whether the process is slow (see bfq_bfqq_is_slow).
++ */
++ slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
++
++ /*
++ * Increase service_from_backlogged before next statement,
++ * because the possible next invocation of
++ * bfq_bfqq_charge_time would likely inflate
++ * entity->service. In contrast, service_from_backlogged must
++ * contain real service, to enable the soft real-time
++ * heuristic to correctly compute the bandwidth consumed by
++ * bfqq.
++ */
++ bfqq->service_from_backlogged += entity->service;
++
++ /*
++ * As above explained, charge slow (typically seeky) and
++ * timed-out queues with the time and not the service
++ * received, to favor sequential workloads.
++ *
++ * Processes doing I/O in the slower disk zones will tend to
++ * be slow(er) even if not seeky. Therefore, since the
++ * estimated peak rate is actually an average over the disk
++ * surface, these processes may timeout just for bad luck. To
++ * avoid punishing them, do not charge time to processes that
++ * succeeded in consuming at least 2/3 of their budget. This
++ * allows BFQ to preserve enough elasticity to still perform
++ * bandwidth, and not time, distribution with little unlucky
++ * or quasi-sequential processes.
++ */
++ if (bfqq->wr_coeff == 1 &&
++ (slow ||
++ (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++ bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
++ bfq_bfqq_charge_time(bfqd, bfqq, delta);
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ if (reason == BFQ_BFQQ_TOO_IDLE &&
++ entity->service <= 2 * entity->budget / 10)
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ if (bfqd->low_latency && bfqq->wr_coeff == 1)
++ bfqq->last_wr_start_finish = jiffies;
++
++ if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ /*
++ * If we get here, and there are no outstanding
++ * requests, then the request pattern is isochronous
++ * (see the comments on the function
++ * bfq_bfqq_softrt_next_start()). Thus we can compute
++ * soft_rt_next_start. If, instead, the queue still
++ * has outstanding requests, then we have to wait for
++ * the completion of all the outstanding requests to
++ * discover whether the request pattern is actually
++ * isochronous.
++ */
++ BUG_ON(bfqd->busy_queues < 1);
++ if (bfqq->dispatched == 0) {
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++ bfq_log_bfqq(bfqd, bfqq, "new soft_rt_next %lu",
++ bfqq->soft_rt_next_start);
++ } else {
++ /*
++ * The application is still waiting for the
++ * completion of one or more requests:
++ * prevent it from possibly being incorrectly
++ * deemed as soft real-time by setting its
++ * soft_rt_next_start to infinity. In fact,
++ * without this assignment, the application
++ * would be incorrectly deemed as soft
++ * real-time if:
++ * 1) it issued a new request before the
++ * completion of all its in-flight
++ * requests, and
++ * 2) at that time, its soft_rt_next_start
++ * happened to be in the past.
++ */
++ bfqq->soft_rt_next_start =
++ bfq_greatest_from_now();
++ /*
++ * Schedule an update of soft_rt_next_start to when
++ * the task may be discovered to be isochronous.
++ */
++ bfq_mark_bfqq_softrt_update(bfqq);
++ }
++ }
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "expire (%d, slow %d, num_disp %d, short_ttime %d, weight %d)",
++ reason, slow, bfqq->dispatched,
++ bfq_bfqq_has_short_ttime(bfqq), entity->weight);
++
++ /*
++ * Increase, decrease or leave budget unchanged according to
++ * reason.
++ */
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
++ BUG_ON(bfqq->next_rq == NULL &&
++ bfqq->entity.budget < bfqq->entity.service);
++ ref = bfqq->ref;
++ __bfq_bfqq_expire(bfqd, bfqq);
++
++ BUG_ON(ref > 1 &&
++ !bfq_bfqq_busy(bfqq) && reason == BFQ_BFQQ_BUDGET_EXHAUSTED &&
++ !bfq_class_idle(bfqq));
++
++ /* mark bfqq as waiting a request only if a bic still points to it */
++ if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
++ reason != BFQ_BFQQ_BUDGET_TIMEOUT &&
++ reason != BFQ_BFQQ_BUDGET_EXHAUSTED)
++ bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
++}
++
++/*
++ * Budget timeout is not implemented through a dedicated timer, but
++ * just checked on request arrivals and completions, as well as on
++ * idle timer expirations.
++ */
++static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
++{
++ return time_is_before_eq_jiffies(bfqq->budget_timeout);
++}
++
++/*
++ * If we expire a queue that is actively waiting (i.e., with the
++ * device idled) for the arrival of a new request, then we may incur
++ * the timestamp misalignment problem described in the body of the
++ * function __bfq_activate_entity. Hence we return true only if this
++ * condition does not hold, or if the queue is slow enough to deserve
++ * only to be kicked off for preserving a high throughput.
++ */
++static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "may_budget_timeout: wait_request %d left %d timeout %d",
++ bfq_bfqq_wait_request(bfqq),
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
++ bfq_bfqq_budget_timeout(bfqq));
++
++ return (!bfq_bfqq_wait_request(bfqq) ||
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
++ &&
++ bfq_bfqq_budget_timeout(bfqq);
++}
++
++/*
++ * For a queue that becomes empty, device idling is allowed only if
++ * this function returns true for that queue. As a consequence, since
++ * device idling plays a critical role for both throughput boosting
++ * and service guarantees, the return value of this function plays a
++ * critical role as well.
++ *
++ * In a nutshell, this function returns true only if idling is
++ * beneficial for throughput or, even if detrimental for throughput,
++ * idling is however necessary to preserve service guarantees (low
++ * latency, desired throughput distribution, ...). In particular, on
++ * NCQ-capable devices, this function tries to return false, so as to
++ * help keep the drives' internal queues full, whenever this helps the
++ * device boost the throughput without causing any service-guarantee
++ * issue.
++ *
++ * In more detail, the return value of this function is obtained by,
++ * first, computing a number of boolean variables that take into
++ * account throughput and service-guarantee issues, and, then,
++ * combining these variables in a logical expression. Most of the
++ * issues taken into account are not trivial. We discuss these issues
++ * while introducing the variables.
++ */
++static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++ bool rot_without_queueing =
++ !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
++ bfqq_sequential_and_IO_bound,
++ idling_boosts_thr, idling_boosts_thr_without_issues,
++ idling_needed_for_service_guarantees,
++ asymmetric_scenario;
++
++ if (bfqd->strict_guarantees)
++ return true;
++
++ /*
++ * Idling is performed only if slice_idle > 0. In addition, we
++ * do not idle if
++ * (a) bfqq is async
++ * (b) bfqq is in the idle io prio class: in this case we do
++ * not idle because we want to minimize the bandwidth that
++ * queues in this class can steal to higher-priority queues
++ */
++ if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
++ bfq_class_idle(bfqq))
++ return false;
++
++ bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
++ bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
++ /*
++ * The next variable takes into account the cases where idling
++ * boosts the throughput.
++ *
++ * The value of the variable is computed considering, first, that
++ * idling is virtually always beneficial for the throughput if:
++ * (a) the device is not NCQ-capable and rotational, or
++ * (b) regardless of the presence of NCQ, the device is rotational and
++ * the request pattern for bfqq is I/O-bound and sequential, or
++ * (c) regardless of whether it is rotational, the device is
++ * not NCQ-capable and the request pattern for bfqq is
++ * I/O-bound and sequential.
++ *
++ * Secondly, and in contrast to the above item (b), idling an
++ * NCQ-capable flash-based device would not boost the
++ * throughput even with sequential I/O; rather it would lower
++ * the throughput in proportion to how fast the device
++ * is. Accordingly, the next variable is true if any of the
++ * above conditions (a), (b) or (c) is true, and, in
++ * particular, happens to be false if bfqd is an NCQ-capable
++ * flash-based device.
++ */
++ idling_boosts_thr = rot_without_queueing ||
++ ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
++ bfqq_sequential_and_IO_bound);
++
++ /*
++ * The value of the next variable,
++ * idling_boosts_thr_without_issues, is equal to that of
++ * idling_boosts_thr, unless a special case holds. In this
++ * special case, described below, idling may cause problems to
++ * weight-raised queues.
++ *
++ * When the request pool is saturated (e.g., in the presence
++ * of write hogs), if the processes associated with
++ * non-weight-raised queues ask for requests at a lower rate,
++ * then processes associated with weight-raised queues have a
++ * higher probability to get a request from the pool
++ * immediately (or at least soon) when they need one. Thus
++ * they have a higher probability to actually get a fraction
++ * of the device throughput proportional to their high
++ * weight. This is especially true with NCQ-capable drives,
++ * which enqueue several requests in advance, and further
++ * reorder internally-queued requests.
++ *
++ * For this reason, we force to false the value of
++ * idling_boosts_thr_without_issues if there are weight-raised
++ * busy queues. In this case, and if bfqq is not weight-raised,
++ * this guarantees that the device is not idled for bfqq (if,
++ * instead, bfqq is weight-raised, then idling will be
++ * guaranteed by another variable, see below). Combined with
++ * the timestamping rules of BFQ (see [1] for details), this
++ * behavior causes bfqq, and hence any sync non-weight-raised
++ * queue, to get a lower number of requests served, and thus
++ * to ask for a lower number of requests from the request
++ * pool, before the busy weight-raised queues get served
++ * again. This often mitigates starvation problems in the
++ * presence of heavy write workloads and NCQ, thereby
++ * guaranteeing a higher application and system responsiveness
++ * in these hostile scenarios.
++ */
++ idling_boosts_thr_without_issues = idling_boosts_thr &&
++ bfqd->wr_busy_queues == 0;
++
++ /*
++ * There is then a case where idling must be performed not
++ * for throughput concerns, but to preserve service
++ * guarantees.
++ *
++ * To introduce this case, we can note that allowing the drive
++ * to enqueue more than one request at a time, and hence
++ * delegating de facto final scheduling decisions to the
++ * drive's internal scheduler, entails loss of control on the
++ * actual request service order. In particular, the critical
++ * situation is when requests from different processes happen
++ * to be present, at the same time, in the internal queue(s)
++ * of the drive. In such a situation, the drive, by deciding
++ * the service order of the internally-queued requests, does
++ * determine also the actual throughput distribution among
++ * these processes. But the drive typically has no notion or
++ * concern about per-process throughput distribution, and
++ * makes its decisions only on a per-request basis. Therefore,
++ * the service distribution enforced by the drive's internal
++ * scheduler is likely to coincide with the desired
++ * device-throughput distribution only in a completely
++ * symmetric scenario where:
++ * (i) each of these processes must get the same throughput as
++ * the others;
++ * (ii) all these processes have the same I/O pattern
++ * (either sequential or random).
++ * In fact, in such a scenario, the drive will tend to treat
++ * the requests of each of these processes in about the same
++ * way as the requests of the others, and thus to provide
++ * each of these processes with about the same throughput
++ * (which is exactly the desired throughput distribution). In
++ * contrast, in any asymmetric scenario, device idling is
++ * certainly needed to guarantee that bfqq receives its
++ * assigned fraction of the device throughput (see [1] for
++ * details).
++ *
++ * We address this issue by controlling, actually, only the
++ * symmetry sub-condition (i), i.e., provided that
++ * sub-condition (i) holds, idling is not performed,
++ * regardless of whether sub-condition (ii) holds. In other
++ * words, only if sub-condition (i) holds, then idling is
++ * allowed, and the device tends to be prevented from queueing
++ * many requests, possibly of several processes. The reason
++ * for not controlling also sub-condition (ii) is that we
++ * exploit preemption to preserve guarantees in case of
++ * symmetric scenarios, even if (ii) does not hold, as
++ * explained in the next two paragraphs.
++ *
++ * Even if a queue, say Q, is expired when it remains idle, Q
++ * can still preempt the new in-service queue if the next
++ * request of Q arrives soon (see the comments on
++ * bfq_bfqq_update_budg_for_activation). If all queues and
++ * groups have the same weight, this form of preemption,
++ * combined with the hole-recovery heuristic described in the
++ * comments on function bfq_bfqq_update_budg_for_activation,
++ * are enough to preserve a correct bandwidth distribution in
++ * the mid term, even without idling. In fact, even if not
++ * idling allows the internal queues of the device to contain
++ * many requests, and thus to reorder requests, we can rather
++ * safely assume that the internal scheduler still preserves a
++ * minimum of mid-term fairness. The motivation for using
++ * preemption instead of idling is that, by not idling,
++ * service guarantees are preserved without minimally
++ * sacrificing throughput. In other words, both a high
++ * throughput and its desired distribution are obtained.
++ *
++ * More precisely, this preemption-based, idleless approach
++ * provides fairness in terms of IOPS, and not sectors per
++ * second. This can be seen with a simple example. Suppose
++ * that there are two queues with the same weight, but that
++ * the first queue receives requests of 8 sectors, while the
++ * second queue receives requests of 1024 sectors. In
++ * addition, suppose that each of the two queues contains at
++ * most one request at a time, which implies that each queue
++ * always remains idle after it is served. Finally, after
++ * remaining idle, each queue receives very quickly a new
++ * request. It follows that the two queues are served
++ * alternatively, preempting each other if needed. This
++ * implies that, although both queues have the same weight,
++ * the queue with large requests receives a service that is
++ * 1024/8 times as high as the service received by the other
++ * queue.
++ *
++ * On the other hand, device idling is performed, and thus
++ * pure sector-domain guarantees are provided, for the
++ * following queues, which are likely to need stronger
++ * throughput guarantees: weight-raised queues, and queues
++ * with a higher weight than other queues. When such queues
++ * are active, sub-condition (i) is false, which triggers
++ * device idling.
++ *
++ * According to the above considerations, the next variable is
++ * true (only) if sub-condition (i) holds. To compute the
++ * value of this variable, we not only use the return value of
++ * the function bfq_symmetric_scenario(), but also check
++ * whether bfqq is being weight-raised, because
++ * bfq_symmetric_scenario() does not take into account also
++ * weight-raised queues (see comments on
++ * bfq_weights_tree_add()).
++ *
++ * As a side note, it is worth considering that the above
++ * device-idling countermeasures may however fail in the
++ * following unlucky scenario: if idling is (correctly)
++ * disabled in a time period during which all symmetry
++ * sub-conditions hold, and hence the device is allowed to
++ * enqueue many requests, but at some later point in time some
++ * sub-condition stops to hold, then it may become impossible
++ * to let requests be served in the desired order until all
++ * the requests already queued in the device have been served.
++ */
++ asymmetric_scenario = bfqq->wr_coeff > 1 ||
++ !bfq_symmetric_scenario(bfqd);
++
++ /*
++ * Finally, there is a case where maximizing throughput is the
++ * best choice even if it may cause unfairness toward
++ * bfqq. Such a case is when bfqq became active in a burst of
++ * queue activations. Queues that became active during a large
++ * burst benefit only from throughput, as discussed in the
++ * comments on bfq_handle_burst. Thus, if bfqq became active
++ * in a burst and not idling the device maximizes throughput,
++ * then the device must no be idled, because not idling the
++ * device provides bfqq and all other queues in the burst with
++ * maximum benefit. Combining this and the above case, we can
++ * now establish when idling is actually needed to preserve
++ * service guarantees.
++ */
++ idling_needed_for_service_guarantees =
++ asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
++
++ /*
++ * We have now all the components we need to compute the
++ * return value of the function, which is true only if idling
++ * either boosts the throughput (without issues), or is
++ * necessary to preserve service guarantees.
++ */
++ bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d",
++ bfq_bfqq_sync(bfqq), idling_boosts_thr);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "may_idle: wr_busy %d boosts %d IO-bound %d guar %d",
++ bfqd->wr_busy_queues,
++ idling_boosts_thr_without_issues,
++ bfq_bfqq_IO_bound(bfqq),
++ idling_needed_for_service_guarantees);
++
++ return idling_boosts_thr_without_issues ||
++ idling_needed_for_service_guarantees;
++}
++
++/*
++ * If the in-service queue is empty but the function bfq_bfqq_may_idle
++ * returns true, then:
++ * 1) the queue must remain in service and cannot be expired, and
++ * 2) the device must be idled to wait for the possible arrival of a new
++ * request for the queue.
++ * See the comments on the function bfq_bfqq_may_idle for the reasons
++ * why performing device idling is the best choice to boost the throughput
++ * and preserve service guarantees when bfq_bfqq_may_idle itself
++ * returns true.
++ */
++static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
++{
++ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq);
++}
++
++/*
++ * Select a queue for service. If we have a current queue in service,
++ * check whether to continue servicing it, or retrieve and set a new one.
++ */
++static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++ struct request *next_rq;
++ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++
++ bfqq = bfqd->in_service_queue;
++ if (!bfqq)
++ goto new_queue;
++
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
++
++ if (bfq_may_expire_for_budg_timeout(bfqq) &&
++ !hrtimer_active(&bfqd->idle_slice_timer) &&
++ !bfq_bfqq_must_idle(bfqq))
++ goto expire;
++
++check_queue:
++ /*
++ * This loop is rarely executed more than once. Even when it
++ * happens, it is much more convenient to re-execute this loop
++ * than to return NULL and trigger a new dispatch to get a
++ * request served.
++ */
++ next_rq = bfqq->next_rq;
++ /*
++ * If bfqq has requests queued and it has enough budget left to
++ * serve them, keep the queue, otherwise expire it.
++ */
++ if (next_rq) {
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ if (bfq_serv_to_charge(next_rq, bfqq) >
++ bfq_bfqq_budget_left(bfqq)) {
++ /*
++ * Expire the queue for budget exhaustion,
++ * which makes sure that the next budget is
++ * enough to serve the next request, even if
++ * it comes from the fifo expired path.
++ */
++ reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
++ goto expire;
++ } else {
++ /*
++ * The idle timer may be pending because we may
++ * not disable disk idling even when a new request
++ * arrives.
++ */
++ if (bfq_bfqq_wait_request(bfqq)) {
++ BUG_ON(!hrtimer_active(&bfqd->idle_slice_timer));
++ /*
++ * If we get here: 1) at least a new request
++ * has arrived but we have not disabled the
++ * timer because the request was too small,
++ * 2) then the block layer has unplugged
++ * the device, causing the dispatch to be
++ * invoked.
++ *
++ * Since the device is unplugged, now the
++ * requests are probably large enough to
++ * provide a reasonable throughput.
++ * So we disable idling.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
++ bfqg_stats_update_idle_time(bfqq_group(bfqq));
++ }
++ goto keep_queue;
++ }
++ }
++
++ /*
++ * No requests pending. However, if the in-service queue is idling
++ * for a new request, or has requests waiting for a completion and
++ * may idle after their completion, then keep it anyway.
++ */
++ if (hrtimer_active(&bfqd->idle_slice_timer) ||
++ (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
++ bfqq = NULL;
++ goto keep_queue;
++ }
++
++ reason = BFQ_BFQQ_NO_MORE_REQUESTS;
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, false, reason);
++new_queue:
++ bfqq = bfq_set_in_service_queue(bfqd);
++ if (bfqq) {
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
++ goto check_queue;
++ }
++keep_queue:
++ if (bfqq)
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
++ else
++ bfq_log(bfqd, "select_queue: no queue returned");
++
++ return bfqq;
++}
++
++static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
++ BUG_ON(bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
++ time_is_after_jiffies(bfqq->last_wr_start_finish));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqq->wr_coeff,
++ bfqq->entity.weight, bfqq->entity.orig_weight);
++
++ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight !=
++ entity->orig_weight * bfqq->wr_coeff);
++ if (entity->prio_changed)
++ bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
++
++ /*
++ * If the queue was activated in a burst, or too much
++ * time has elapsed from the beginning of this
++ * weight-raising period, then end weight raising.
++ */
++ if (bfq_bfqq_in_large_burst(bfqq))
++ bfq_bfqq_end_wr(bfqq);
++ else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time)) {
++ if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
++ time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
++ bfq_wr_duration(bfqd)))
++ bfq_bfqq_end_wr(bfqq);
++ else {
++ /* switch back to interactive wr */
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ bfqq->last_wr_start_finish =
++ bfqq->wr_start_at_switch_to_srt;
++ BUG_ON(time_is_after_jiffies(
++ bfqq->last_wr_start_finish));
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "back to interactive wr");
++ }
++ }
++ }
++ /*
++ * To improve latency (for this or other queues), immediately
++ * update weight both if it must be raised and if it must be
++ * lowered. Since, entity may be on some active tree here, and
++ * might have a pending change of its ioprio class, invoke
++ * next function with the last parameter unset (see the
++ * comments on the function).
++ */
++ if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
++ __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
++ entity, false);
++}
++
++/*
++ * Dispatch one request from bfqq, moving it to the request queue
++ * dispatch list.
++ */
++static int bfq_dispatch_request(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++ struct request *rq = bfqq->next_rq;
++ unsigned long service_to_charge;
++
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++ BUG_ON(!rq);
++ service_to_charge = bfq_serv_to_charge(rq, bfqq);
++
++ BUG_ON(service_to_charge > bfq_bfqq_budget_left(bfqq));
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ bfq_bfqq_served(bfqq, service_to_charge);
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ bfq_dispatch_insert(bfqd->queue, rq);
++
++ /*
++ * If weight raising has to terminate for bfqq, then next
++ * function causes an immediate update of bfqq's weight,
++ * without waiting for next activation. As a consequence, on
++ * expiration, bfqq will be timestamped as if has never been
++ * weight-raised during this service slot, even if it has
++ * received part or even most of the service as a
++ * weight-raised queue. This inflates bfqq's timestamps, which
++ * is beneficial, as bfqq is then more willing to leave the
++ * device immediately to possible other weight-raised queues.
++ */
++ bfq_update_wr_data(bfqd, bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "dispatched %u sec req (%llu), budg left %d",
++ blk_rq_sectors(rq),
++ (unsigned long long) blk_rq_pos(rq),
++ bfq_bfqq_budget_left(bfqq));
++
++ dispatched++;
++
++ if (!bfqd->in_service_bic) {
++ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
++ bfqd->in_service_bic = RQ_BIC(rq);
++ }
++
++ if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
++ goto expire;
++
++ return dispatched;
++
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, false, BFQ_BFQQ_BUDGET_EXHAUSTED);
++ return dispatched;
++}
++
++static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++
++ while (bfqq->next_rq) {
++ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
++ dispatched++;
++ }
++
++ BUG_ON(!list_empty(&bfqq->fifo));
++ return dispatched;
++}
++
++/*
++ * Drain our current requests.
++ * Used for barriers and when switching io schedulers on-the-fly.
++ */
++static int bfq_forced_dispatch(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq, *n;
++ struct bfq_service_tree *st;
++ int dispatched = 0;
++
++ bfqq = bfqd->in_service_queue;
++ if (bfqq)
++ __bfq_bfqq_expire(bfqd, bfqq);
++
++ /*
++ * Loop through classes, and be careful to leave the scheduler
++ * in a consistent state, as feedback mechanisms and vtime
++ * updates cannot be disabled during the process.
++ */
++ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
++ st = bfq_entity_service_tree(&bfqq->entity);
++
++ dispatched += __bfq_forced_dispatch_bfqq(bfqq);
++
++ bfqq->max_budget = bfq_max_budget(bfqd);
++ bfq_forget_idle(st);
++ }
++
++ BUG_ON(bfqd->busy_queues != 0);
++
++ return dispatched;
++}
++
++static int bfq_dispatch_requests(struct request_queue *q, int force)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq;
++
++ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++
++ if (bfqd->busy_queues == 0)
++ return 0;
++
++ if (unlikely(force))
++ return bfq_forced_dispatch(bfqd);
++
++ /*
++ * Force device to serve one request at a time if
++ * strict_guarantees is true. Forcing this service scheme is
++ * currently the ONLY way to guarantee that the request
++ * service order enforced by the scheduler is respected by a
++ * queueing device. Otherwise the device is free even to make
++ * some unlucky request wait for as long as the device
++ * wishes.
++ *
++ * Of course, serving one request at at time may cause loss of
++ * throughput.
++ */
++ if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
++ return 0;
++
++ bfqq = bfq_select_queue(bfqd);
++ if (!bfqq)
++ return 0;
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ BUG_ON(bfq_bfqq_wait_request(bfqq));
++
++ if (!bfq_dispatch_request(bfqd, bfqq))
++ return 0;
++
++ bfq_log_bfqq(bfqd, bfqq, "dispatched %s request",
++ bfq_bfqq_sync(bfqq) ? "sync" : "async");
++
++ BUG_ON(bfqq->next_rq == NULL &&
++ bfqq->entity.budget < bfqq->entity.service);
++ return 1;
++}
++
++/*
++ * Task holds one reference to the queue, dropped when task exits. Each rq
++ * in-flight on this queue also holds a reference, dropped when rq is freed.
++ *
++ * Queue lock must be held here. Recall not to use bfqq after calling
++ * this function on it.
++ */
++static void bfq_put_queue(struct bfq_queue *bfqq)
++{
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ struct bfq_group *bfqg = bfqq_group(bfqq);
++#endif
++
++ BUG_ON(bfqq->ref <= 0);
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
++ bfqq->ref--;
++ if (bfqq->ref)
++ return;
++
++ BUG_ON(rb_first(&bfqq->sort_list));
++ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
++ BUG_ON(bfqq->entity.tree);
++ BUG_ON(bfq_bfqq_busy(bfqq));
++
++ if (bfq_bfqq_sync(bfqq))
++ /*
++ * The fact that this queue is being destroyed does not
++ * invalidate the fact that this queue may have been
++ * activated during the current burst. As a consequence,
++ * although the queue does not exist anymore, and hence
++ * needs to be removed from the burst list if there,
++ * the burst size has not to be decremented.
++ */
++ hlist_del_init(&bfqq->burst_list_node);
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
++
++ kmem_cache_free(bfq_pool, bfqq);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ bfqg_put(bfqg);
++#endif
++}
++
++static void bfq_put_cooperator(struct bfq_queue *bfqq)
++{
++ struct bfq_queue *__bfqq, *next;
++
++ /*
++ * If this queue was scheduled to merge with another queue, be
++ * sure to drop the reference taken on that queue (and others in
++ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
++ */
++ __bfqq = bfqq->new_bfqq;
++ while (__bfqq) {
++ if (__bfqq == bfqq)
++ break;
++ next = __bfqq->new_bfqq;
++ bfq_put_queue(__bfqq);
++ __bfqq = next;
++ }
++}
++
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ if (bfqq == bfqd->in_service_queue) {
++ __bfq_bfqq_expire(bfqd, bfqq);
++ bfq_schedule_dispatch(bfqd);
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq); /* release process reference */
++}
++
++static void bfq_init_icq(struct io_cq *icq)
++{
++ icq_to_bic(icq)->ttime.last_end_request = ktime_get_ns() - (1ULL<<32);
++}
++
++static void bfq_exit_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++
++ if (bic_to_bfqq(bic, false)) {
++ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, false));
++ bic_set_bfqq(bic, NULL, false);
++ }
++
++ if (bic_to_bfqq(bic, true)) {
++ /*
++ * If the bic is using a shared queue, put the reference
++ * taken on the io_context when the bic started using a
++ * shared bfq_queue.
++ */
++ if (bfq_bfqq_coop(bic_to_bfqq(bic, true)))
++ put_io_context(icq->ioc);
++ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, true));
++ bic_set_bfqq(bic, NULL, true);
++ }
++}
++
++/*
++ * Update the entity prio values; note that the new values will not
++ * be used until the next (re)activation.
++ */
++static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
++{
++ struct task_struct *tsk = current;
++ int ioprio_class;
++
++ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ switch (ioprio_class) {
++ default:
++ dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
++ "bfq: bad prio class %d\n", ioprio_class);
++ case IOPRIO_CLASS_NONE:
++ /*
++ * No prio set, inherit CPU scheduling settings.
++ */
++ bfqq->new_ioprio = task_nice_ioprio(tsk);
++ bfqq->new_ioprio_class = task_nice_ioclass(tsk);
++ break;
++ case IOPRIO_CLASS_RT:
++ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
++ break;
++ case IOPRIO_CLASS_BE:
++ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
++ break;
++ case IOPRIO_CLASS_IDLE:
++ bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
++ bfqq->new_ioprio = 7;
++ break;
++ }
++
++ if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
++ pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
++ bfqq->new_ioprio);
++ BUG();
++ }
++
++ bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "set_next_ioprio_data: bic_class %d prio %d class %d",
++ ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class);
++}
++
++static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ struct bfq_queue *bfqq;
++ unsigned long uninitialized_var(flags);
++ int ioprio = bic->icq.ioc->ioprio;
++
++ /*
++ * This condition may trigger on a newly created bic, be sure to
++ * drop the lock before returning.
++ */
++ if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
++ return;
++
++ bic->ioprio = ioprio;
++
++ bfqq = bic_to_bfqq(bic, false);
++ if (bfqq) {
++ /* release process reference on this queue */
++ bfq_put_queue(bfqq);
++ bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
++ bic_set_bfqq(bic, bfqq, false);
++ bfq_log_bfqq(bfqd, bfqq,
++ "check_ioprio_change: bfqq %p %d",
++ bfqq, bfqq->ref);
++ }
++
++ bfqq = bic_to_bfqq(bic, true);
++ if (bfqq)
++ bfq_set_next_ioprio_data(bfqq, bic);
++}
++
++static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic, pid_t pid, int is_sync)
++{
++ RB_CLEAR_NODE(&bfqq->entity.rb_node);
++ INIT_LIST_HEAD(&bfqq->fifo);
++ INIT_HLIST_NODE(&bfqq->burst_list_node);
++ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
++
++ bfqq->ref = 0;
++ bfqq->bfqd = bfqd;
++
++ if (bic)
++ bfq_set_next_ioprio_data(bfqq, bic);
++
++ if (is_sync) {
++ /*
++ * No need to mark as has_short_ttime if in
++ * idle_class, because no device idling is performed
++ * for queues in idle class
++ */
++ if (!bfq_class_idle(bfqq))
++ /* tentatively mark as has_short_ttime */
++ bfq_mark_bfqq_has_short_ttime(bfqq);
++ bfq_mark_bfqq_sync(bfqq);
++ bfq_mark_bfqq_just_created(bfqq);
++ } else
++ bfq_clear_bfqq_sync(bfqq);
++ bfq_mark_bfqq_IO_bound(bfqq);
++
++ /* Tentative initial value to trade off between thr and lat */
++ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
++ bfqq->pid = pid;
++
++ bfqq->wr_coeff = 1;
++ bfqq->last_wr_start_finish = jiffies;
++ bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
++ bfqq->budget_timeout = bfq_smallest_from_now();
++ bfqq->split_time = bfq_smallest_from_now();
++
++ /*
++ * Set to the value for which bfqq will not be deemed as
++ * soft rt when it becomes backlogged.
++ */
++ bfqq->soft_rt_next_start = bfq_greatest_from_now();
++
++ /* first request is almost certainly seeky */
++ bfqq->seek_history = 1;
++}
++
++static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ int ioprio_class, int ioprio)
++{
++ switch (ioprio_class) {
++ case IOPRIO_CLASS_RT:
++ return &bfqg->async_bfqq[0][ioprio];
++ case IOPRIO_CLASS_NONE:
++ ioprio = IOPRIO_NORM;
++ /* fall through */
++ case IOPRIO_CLASS_BE:
++ return &bfqg->async_bfqq[1][ioprio];
++ case IOPRIO_CLASS_IDLE:
++ return &bfqg->async_idle_bfqq;
++ default:
++ BUG();
++ }
++}
++
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bio *bio, bool is_sync,
++ struct bfq_io_cq *bic)
++{
++ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ struct bfq_queue **async_bfqq = NULL;
++ struct bfq_queue *bfqq;
++ struct bfq_group *bfqg;
++
++ rcu_read_lock();
++
++ bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
++ if (!bfqg) {
++ bfqq = &bfqd->oom_bfqq;
++ goto out;
++ }
++
++ if (!is_sync) {
++ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
++ ioprio);
++ bfqq = *async_bfqq;
++ if (bfqq)
++ goto out;
++ }
++
++ bfqq = kmem_cache_alloc_node(bfq_pool,
++ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
++ bfqd->queue->node);
++
++ if (bfqq) {
++ bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
++ is_sync);
++ bfq_init_entity(&bfqq->entity, bfqg);
++ bfq_log_bfqq(bfqd, bfqq, "allocated");
++ } else {
++ bfqq = &bfqd->oom_bfqq;
++ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
++ goto out;
++ }
++
++ /*
++ * Pin the queue now that it's allocated, scheduler exit will
++ * prune it.
++ */
++ if (async_bfqq) {
++ bfqq->ref++; /*
++ * Extra group reference, w.r.t. sync
++ * queue. This extra reference is removed
++ * only if bfqq->bfqg disappears, to
++ * guarantee that this queue is not freed
++ * until its group goes away.
++ */
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++ bfqq, bfqq->ref);
++ *async_bfqq = bfqq;
++ }
++
++out:
++ bfqq->ref++; /* get a process reference to this queue */
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
++ rcu_read_unlock();
++ return bfqq;
++}
++
++static void bfq_update_io_thinktime(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic)
++{
++ struct bfq_ttime *ttime = &bic->ttime;
++ u64 elapsed = ktime_get_ns() - bic->ttime.last_end_request;
++
++ elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle);
++
++ ttime->ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
++ ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
++ ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
++ ttime->ttime_samples);
++}
++
++static void
++bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ bfqq->seek_history <<= 1;
++ bfqq->seek_history |=
++ get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
++ (!blk_queue_nonrot(bfqd->queue) ||
++ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
++}
++
++static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
++{
++ bool has_short_ttime = true;
++
++ /*
++ * No need to update has_short_ttime if bfqq is async or in
++ * idle io prio class, or if bfq_slice_idle is zero, because
++ * no device idling is performed for bfqq in this case.
++ */
++ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
++ bfqd->bfq_slice_idle == 0)
++ return;
++
++ /* Idle window just restored, statistics are meaningless. */
++ if (time_is_after_eq_jiffies(bfqq->split_time +
++ bfqd->bfq_wr_min_idle_time))
++ return;
++
++ /* Think time is infinite if no process is linked to
++ * bfqq. Otherwise check average think time to
++ * decide whether to mark as has_short_ttime
++ */
++ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
++ (bfq_sample_valid(bic->ttime.ttime_samples) &&
++ bic->ttime.ttime_mean > bfqd->bfq_slice_idle))
++ has_short_ttime = false;
++
++ bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
++ has_short_ttime);
++
++ if (has_short_ttime)
++ bfq_mark_bfqq_has_short_ttime(bfqq);
++ else
++ bfq_clear_bfqq_has_short_ttime(bfqq);
++}
++
++/*
++ * Called when a new fs request (rq) is added to bfqq. Check if there's
++ * something we should do about it.
++ */
++static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ struct bfq_io_cq *bic = RQ_BIC(rq);
++
++ if (rq->cmd_flags & REQ_META)
++ bfqq->meta_pending++;
++
++ bfq_update_io_thinktime(bfqd, bic);
++ bfq_update_has_short_ttime(bfqd, bfqq, bic);
++ bfq_update_io_seektime(bfqd, bfqq, rq);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "rq_enqueued: has_short_ttime=%d (seeky %d)",
++ bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
++
++ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
++
++ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
++ bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
++ blk_rq_sectors(rq) < 32;
++ bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
++
++ /*
++ * There is just this request queued: if the request
++ * is small and the queue is not to be expired, then
++ * just exit.
++ *
++ * In this way, if the device is being idled to wait
++ * for a new request from the in-service queue, we
++ * avoid unplugging the device and committing the
++ * device to serve just a small request. On the
++ * contrary, we wait for the block layer to decide
++ * when to unplug the device: hopefully, new requests
++ * will be merged to this one quickly, then the device
++ * will be unplugged and larger requests will be
++ * dispatched.
++ */
++ if (small_req && !budget_timeout)
++ return;
++
++ /*
++ * A large enough request arrived, or the queue is to
++ * be expired: in both cases disk idling is to be
++ * stopped, so clear wait_request flag and reset
++ * timer.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
++ bfqg_stats_update_idle_time(bfqq_group(bfqq));
++
++ /*
++ * The queue is not empty, because a new request just
++ * arrived. Hence we can safely expire the queue, in
++ * case of budget timeout, without risking that the
++ * timestamps of the queue are not updated correctly.
++ * See [1] for more details.
++ */
++ if (budget_timeout)
++ bfq_bfqq_expire(bfqd, bfqq, false,
++ BFQ_BFQQ_BUDGET_TIMEOUT);
++
++ /*
++ * Let the request rip immediately, or let a new queue be
++ * selected if bfqq has just been expired.
++ */
++ __blk_run_queue(bfqd->queue);
++ }
++}
++
++static void bfq_insert_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++
++ /*
++ * An unplug may trigger a requeue of a request from the device
++ * driver: make sure we are in process context while trying to
++ * merge two bfq_queues.
++ */
++ if (!in_interrupt()) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
++ if (new_bfqq) {
++ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
++ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
++ /*
++ * Release the request's reference to the old bfqq
++ * and make sure one is taken to the shared queue.
++ */
++ new_bfqq->allocated[rq_data_dir(rq)]++;
++ bfqq->allocated[rq_data_dir(rq)]--;
++ new_bfqq->ref++;
++ bfq_clear_bfqq_just_created(bfqq);
++ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
++ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
++ bfqq, new_bfqq);
++ /*
++ * rq is about to be enqueued into new_bfqq,
++ * release rq reference on bfqq
++ */
++ bfq_put_queue(bfqq);
++ rq->elv.priv[1] = new_bfqq;
++ bfqq = new_bfqq;
++ }
++ }
++
++ bfq_add_request(rq);
++
++ rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
++ list_add_tail(&rq->queuelist, &bfqq->fifo);
++
++ bfq_rq_enqueued(bfqd, bfqq, rq);
++}
++
++static void bfq_update_hw_tag(struct bfq_data *bfqd)
++{
++ bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
++ bfqd->rq_in_driver);
++
++ if (bfqd->hw_tag == 1)
++ return;
++
++ /*
++ * This sample is valid if the number of outstanding requests
++ * is large enough to allow a queueing behavior. Note that the
++ * sum is not exact, as it's not taking into account deactivated
++ * requests.
++ */
++ if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
++ return;
++
++ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
++ return;
++
++ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
++ bfqd->max_rq_in_driver = 0;
++ bfqd->hw_tag_samples = 0;
++}
++
++static void bfq_completed_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ u64 now_ns;
++ u32 delta_us;
++
++ bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left",
++ blk_rq_sectors(rq));
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++ bfq_update_hw_tag(bfqd);
++
++ BUG_ON(!bfqd->rq_in_driver);
++ BUG_ON(!bfqq->dispatched);
++ bfqd->rq_in_driver--;
++ bfqq->dispatched--;
++ bfqg_stats_update_completion(bfqq_group(bfqq),
++ rq_start_time_ns(rq),
++ rq_io_start_time_ns(rq),
++ rq->cmd_flags);
++
++ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++ /*
++ * Set budget_timeout (which we overload to store the
++ * time at which the queue remains with no backlog and
++ * no outstanding request; used by the weight-raising
++ * mechanism).
++ */
++ bfqq->budget_timeout = jiffies;
++
++ bfq_weights_tree_remove(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++ }
++
++ now_ns = ktime_get_ns();
++
++ RQ_BIC(rq)->ttime.last_end_request = now_ns;
++
++ /*
++ * Using us instead of ns, to get a reasonable precision in
++ * computing rate in next check.
++ */
++ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
++
++ bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
++ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
++ (USEC_PER_SEC*
++ (u64)((bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us))
++ >>BFQ_RATE_SHIFT,
++ (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT);
++
++ /*
++ * If the request took rather long to complete, and, according
++ * to the maximum request size recorded, this completion latency
++ * implies that the request was certainly served at a very low
++ * rate (less than 1M sectors/sec), then the whole observation
++ * interval that lasts up to this time instant cannot be a
++ * valid time interval for computing a new peak rate. Invoke
++ * bfq_update_rate_reset to have the following three steps
++ * taken:
++ * - close the observation interval at the last (previous)
++ * request dispatch or completion
++ * - compute rate, if possible, for that observation interval
++ * - reset to zero samples, which will trigger a proper
++ * re-initialization of the observation interval on next
++ * dispatch
++ */
++ if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
++ (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
++ 1UL<<(BFQ_RATE_SHIFT - 10))
++ bfq_update_rate_reset(bfqd, NULL);
++ bfqd->last_completion = now_ns;
++
++ /*
++ * If we are waiting to discover whether the request pattern
++ * of the task associated with the queue is actually
++ * isochronous, and both requisites for this condition to hold
++ * are now satisfied, then compute soft_rt_next_start (see the
++ * comments on the function bfq_bfqq_softrt_next_start()). We
++ * schedule this delayed check when bfqq expires, if it still
++ * has in-flight requests.
++ */
++ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list))
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++
++ /*
++ * If this is the in-service queue, check if it needs to be expired,
++ * or if we want to idle in case it has no pending requests.
++ */
++ if (bfqd->in_service_queue == bfqq) {
++ if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
++ bfq_arm_slice_timer(bfqd);
++ goto out;
++ } else if (bfq_may_expire_for_budg_timeout(bfqq))
++ bfq_bfqq_expire(bfqd, bfqq, false,
++ BFQ_BFQQ_BUDGET_TIMEOUT);
++ else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
++ (bfqq->dispatched == 0 ||
++ !bfq_bfqq_may_idle(bfqq)))
++ bfq_bfqq_expire(bfqd, bfqq, false,
++ BFQ_BFQQ_NO_MORE_REQUESTS);
++ }
++
++ if (!bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++
++out:
++ return;
++}
++
++static int __bfq_may_queue(struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
++ bfq_clear_bfqq_must_alloc(bfqq);
++ return ELV_MQUEUE_MUST;
++ }
++
++ return ELV_MQUEUE_MAY;
++}
++
++static int bfq_may_queue(struct request_queue *q, unsigned int op)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ /*
++ * Don't force setup of a queue from here, as a call to may_queue
++ * does not necessarily imply that a request actually will be
++ * queued. So just lookup a possibly existing queue, or return
++ * 'may queue' if that fails.
++ */
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (!bic)
++ return ELV_MQUEUE_MAY;
++
++ bfqq = bic_to_bfqq(bic, op_is_sync(op));
++ if (bfqq)
++ return __bfq_may_queue(bfqq);
++
++ return ELV_MQUEUE_MAY;
++}
++
++/*
++ * Queue lock held here.
++ */
++static void bfq_put_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ if (bfqq) {
++ const int rw = rq_data_dir(rq);
++
++ BUG_ON(!bfqq->allocated[rw]);
++ bfqq->allocated[rw]--;
++
++ rq->elv.priv[0] = NULL;
++ rq->elv.priv[1] = NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
++ bfqq, bfqq->ref);
++ bfq_put_queue(bfqq);
++ }
++}
++
++/*
++ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
++ * was the last process referring to that bfqq.
++ */
++static struct bfq_queue *
++bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
++
++ put_io_context(bic->icq.ioc);
++
++ if (bfqq_process_refs(bfqq) == 1) {
++ bfqq->pid = current->pid;
++ bfq_clear_bfqq_coop(bfqq);
++ bfq_clear_bfqq_split_coop(bfqq);
++ return bfqq;
++ }
++
++ bic_set_bfqq(bic, NULL, 1);
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq);
++ return NULL;
++}
++
++/*
++ * Allocate bfq data structures associated with this request.
++ */
++static int bfq_set_request(struct request_queue *q, struct request *rq,
++ struct bio *bio, gfp_t gfp_mask)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
++ const int rw = rq_data_dir(rq);
++ const int is_sync = rq_is_sync(rq);
++ struct bfq_queue *bfqq;
++ unsigned long flags;
++ bool bfqq_already_existing = false, split = false;
++
++ spin_lock_irqsave(q->queue_lock, flags);
++
++ if (!bic)
++ goto queue_fail;
++
++ bfq_check_ioprio_change(bic, bio);
++
++ bfq_bic_update_cgroup(bic, bio);
++
++new_queue:
++ bfqq = bic_to_bfqq(bic, is_sync);
++ if (!bfqq || bfqq == &bfqd->oom_bfqq) {
++ if (bfqq)
++ bfq_put_queue(bfqq);
++ bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
++ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
++
++ bic_set_bfqq(bic, bfqq, is_sync);
++ if (split && is_sync) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_request: was_in_list %d "
++ "was_in_large_burst %d "
++ "large burst in progress %d",
++ bic->was_in_burst_list,
++ bic->saved_in_large_burst,
++ bfqd->large_burst);
++
++ if ((bic->was_in_burst_list && bfqd->large_burst) ||
++ bic->saved_in_large_burst) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_request: marking in "
++ "large burst");
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ } else {
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_request: clearing in "
++ "large burst");
++ bfq_clear_bfqq_in_large_burst(bfqq);
++ if (bic->was_in_burst_list)
++ hlist_add_head(&bfqq->burst_list_node,
++ &bfqd->burst_list);
++ }
++ bfqq->split_time = jiffies;
++ }
++ } else {
++ /* If the queue was seeky for too long, break it apart. */
++ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
++ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
++
++ /* Update bic before losing reference to bfqq */
++ if (bfq_bfqq_in_large_burst(bfqq))
++ bic->saved_in_large_burst = true;
++
++ bfqq = bfq_split_bfqq(bic, bfqq);
++ split = true;
++ if (!bfqq)
++ goto new_queue;
++ else
++ bfqq_already_existing = true;
++ }
++ }
++
++ bfqq->allocated[rw]++;
++ bfqq->ref++;
++ bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref);
++
++ rq->elv.priv[0] = bic;
++ rq->elv.priv[1] = bfqq;
++
++ /*
++ * If a bfq_queue has only one process reference, it is owned
++ * by only one bfq_io_cq: we can set the bic field of the
++ * bfq_queue to the address of that structure. Also, if the
++ * queue has just been split, mark a flag so that the
++ * information is available to the other scheduler hooks.
++ */
++ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
++ bfqq->bic = bic;
++ if (split) {
++ /*
++ * If the queue has just been split from a shared
++ * queue, restore the idle window and the possible
++ * weight raising period.
++ */
++ bfq_bfqq_resume_state(bfqq, bfqd, bic,
++ bfqq_already_existing);
++ }
++ }
++
++ if (unlikely(bfq_bfqq_just_created(bfqq)))
++ bfq_handle_burst(bfqd, bfqq);
++
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 0;
++
++queue_fail:
++ bfq_schedule_dispatch(bfqd);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 1;
++}
++
++static void bfq_kick_queue(struct work_struct *work)
++{
++ struct bfq_data *bfqd =
++ container_of(work, struct bfq_data, unplug_work);
++ struct request_queue *q = bfqd->queue;
++
++ spin_lock_irq(q->queue_lock);
++ __blk_run_queue(q);
++ spin_unlock_irq(q->queue_lock);
++}
++
++/*
++ * Handler of the expiration of the timer running if the in-service queue
++ * is idling inside its time slice.
++ */
++static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
++{
++ struct bfq_data *bfqd = container_of(timer, struct bfq_data,
++ idle_slice_timer);
++ struct bfq_queue *bfqq;
++ unsigned long flags;
++ enum bfqq_expiration reason;
++
++ spin_lock_irqsave(bfqd->queue->queue_lock, flags);
++
++ bfqq = bfqd->in_service_queue;
++ /*
++ * Theoretical race here: the in-service queue can be NULL or
++ * different from the queue that was idling if the timer handler
++ * spins on the queue_lock and a new request arrives for the
++ * current queue and there is a full dispatch cycle that changes
++ * the in-service queue. This can hardly happen, but in the worst
++ * case we just expire a queue too early.
++ */
++ if (bfqq) {
++ bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
++ bfq_clear_bfqq_wait_request(bfqq);
++
++ if (bfq_bfqq_budget_timeout(bfqq))
++ /*
++ * Also here the queue can be safely expired
++ * for budget timeout without wasting
++ * guarantees
++ */
++ reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
++ /*
++ * The queue may not be empty upon timer expiration,
++ * because we may not disable the timer when the
++ * first request of the in-service queue arrives
++ * during disk idling.
++ */
++ reason = BFQ_BFQQ_TOO_IDLE;
++ else
++ goto schedule_dispatch;
++
++ bfq_bfqq_expire(bfqd, bfqq, true, reason);
++ }
++
++schedule_dispatch:
++ bfq_schedule_dispatch(bfqd);
++
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
++ return HRTIMER_NORESTART;
++}
++
++static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
++{
++ hrtimer_cancel(&bfqd->idle_slice_timer);
++ cancel_work_sync(&bfqd->unplug_work);
++}
++
++static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
++ struct bfq_queue **bfqq_ptr)
++{
++ struct bfq_group *root_group = bfqd->root_group;
++ struct bfq_queue *bfqq = *bfqq_ptr;
++
++ bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++ if (bfqq) {
++ bfq_bfqq_move(bfqd, bfqq, root_group);
++ bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++ bfqq, bfqq->ref);
++ bfq_put_queue(bfqq);
++ *bfqq_ptr = NULL;
++ }
++}
++
++/*
++ * Release all the bfqg references to its async queues. If we are
++ * deallocating the group these queues may still contain requests, so
++ * we reparent them to the root cgroup (i.e., the only one that will
++ * exist for sure until all the requests on a device are gone).
++ */
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
++
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
++}
++
++static void bfq_exit_queue(struct elevator_queue *e)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ struct request_queue *q = bfqd->queue;
++ struct bfq_queue *bfqq, *n;
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ spin_lock_irq(q->queue_lock);
++
++ BUG_ON(bfqd->in_service_queue);
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
++ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
++
++ spin_unlock_irq(q->queue_lock);
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
++#else
++ bfq_put_async_queues(bfqd, bfqd->root_group);
++ kfree(bfqd->root_group);
++#endif
++
++ kfree(bfqd);
++}
++
++static void bfq_init_root_group(struct bfq_group *root_group,
++ struct bfq_data *bfqd)
++{
++ int i;
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ root_group->entity.parent = NULL;
++ root_group->my_entity = NULL;
++ root_group->bfqd = bfqd;
++#endif
++ root_group->rq_pos_tree = RB_ROOT;
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++ root_group->sched_data.bfq_class_idle_last_service = jiffies;
++}
++
++static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
++{
++ struct bfq_data *bfqd;
++ struct elevator_queue *eq;
++
++ eq = elevator_alloc(q, e);
++ if (!eq)
++ return -ENOMEM;
++
++ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
++ if (!bfqd) {
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++ }
++ eq->elevator_data = bfqd;
++
++ /*
++ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
++ * Grab a permanent reference to it, so that the normal code flow
++ * will not attempt to free it.
++ */
++ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
++ bfqd->oom_bfqq.ref++;
++ bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
++ bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
++ bfqd->oom_bfqq.entity.new_weight =
++ bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
++
++ /* oom_bfqq does not participate to bursts */
++ bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
++ /*
++ * Trigger weight initialization, according to ioprio, at the
++ * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
++ * class won't be changed any more.
++ */
++ bfqd->oom_bfqq.entity.prio_changed = 1;
++
++ bfqd->queue = q;
++
++ spin_lock_irq(q->queue_lock);
++ q->elevator = eq;
++ spin_unlock_irq(q->queue_lock);
++
++ bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
++ if (!bfqd->root_group)
++ goto out_free;
++ bfq_init_root_group(bfqd->root_group, bfqd);
++ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
++
++ hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
++ HRTIMER_MODE_REL);
++ bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
++
++ bfqd->queue_weights_tree = RB_ROOT;
++ bfqd->group_weights_tree = RB_ROOT;
++
++ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
++
++ INIT_LIST_HEAD(&bfqd->active_list);
++ INIT_LIST_HEAD(&bfqd->idle_list);
++ INIT_HLIST_HEAD(&bfqd->burst_list);
++
++ bfqd->hw_tag = -1;
++
++ bfqd->bfq_max_budget = bfq_default_max_budget;
++
++ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
++ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
++ bfqd->bfq_back_max = bfq_back_max;
++ bfqd->bfq_back_penalty = bfq_back_penalty;
++ bfqd->bfq_slice_idle = bfq_slice_idle;
++ bfqd->bfq_timeout = bfq_timeout;
++
++ bfqd->bfq_requests_within_timer = 120;
++
++ bfqd->bfq_large_burst_thresh = 8;
++ bfqd->bfq_burst_interval = msecs_to_jiffies(180);
++
++ bfqd->low_latency = true;
++
++ /*
++ * Trade-off between responsiveness and fairness.
++ */
++ bfqd->bfq_wr_coeff = 30;
++ bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
++ bfqd->bfq_wr_max_time = 0;
++ bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
++ bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
++ bfqd->bfq_wr_max_softrt_rate = 7000; /*
++ * Approximate rate required
++ * to playback or record a
++ * high-definition compressed
++ * video.
++ */
++ bfqd->wr_busy_queues = 0;
++
++ /*
++ * Begin by assuming, optimistically, that the device is a
++ * high-speed one, and that its peak rate is equal to 2/3 of
++ * the highest reference rate.
++ */
++ bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
++ T_fast[blk_queue_nonrot(bfqd->queue)];
++ bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
++ bfqd->device_speed = BFQ_BFQD_FAST;
++
++ return 0;
++
++out_free:
++ kfree(bfqd);
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++}
++
++static void bfq_slab_kill(void)
++{
++ kmem_cache_destroy(bfq_pool);
++}
++
++static int __init bfq_slab_setup(void)
++{
++ bfq_pool = KMEM_CACHE(bfq_queue, 0);
++ if (!bfq_pool)
++ return -ENOMEM;
++ return 0;
++}
++
++static ssize_t bfq_var_show(unsigned int var, char *page)
++{
++ return sprintf(page, "%u\n", var);
++}
++
++static ssize_t bfq_var_store(unsigned long *var, const char *page,
++ size_t count)
++{
++ unsigned long new_val;
++ int ret = kstrtoul(page, 10, &new_val);
++
++ if (ret == 0)
++ *var = new_val;
++
++ return count;
++}
++
++static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++
++ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
++ jiffies_to_msecs(bfqd->bfq_wr_max_time) :
++ jiffies_to_msecs(bfq_wr_duration(bfqd)));
++}
++
++static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_queue *bfqq;
++ struct bfq_data *bfqd = e->elevator_data;
++ ssize_t num_char = 0;
++
++ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
++ bfqd->queued);
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ num_char += sprintf(page + num_char, "Active:\n");
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, nr_queued %d %d, ",
++ bfqq->pid,
++ bfqq->entity.weight,
++ bfqq->queued[0],
++ bfqq->queued[1]);
++ num_char += sprintf(page + num_char,
++ "dur %d/%u\n",
++ jiffies_to_msecs(
++ jiffies -
++ bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ num_char += sprintf(page + num_char, "Idle:\n");
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, dur %d/%u\n",
++ bfqq->pid,
++ bfqq->entity.weight,
++ jiffies_to_msecs(jiffies -
++ bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++
++ return num_char;
++}
++
++#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
++static ssize_t __FUNC(struct elevator_queue *e, char *page) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ u64 __data = __VAR; \
++ if (__CONV == 1) \
++ __data = jiffies_to_msecs(__data); \
++ else if (__CONV == 2) \
++ __data = div_u64(__data, NSEC_PER_MSEC); \
++ return bfq_var_show(__data, (page)); \
++}
++SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
++SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
++SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
++SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
++SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
++SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
++SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
++SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
++SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
++SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0);
++SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1);
++SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1);
++SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async,
++ 1);
++SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0);
++#undef SHOW_FUNCTION
++
++#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
++static ssize_t __FUNC(struct elevator_queue *e, char *page) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ u64 __data = __VAR; \
++ __data = div_u64(__data, NSEC_PER_USEC); \
++ return bfq_var_show(__data, (page)); \
++}
++USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
++#undef USEC_SHOW_FUNCTION
++
++#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
++static ssize_t \
++__FUNC(struct elevator_queue *e, const char *page, size_t count) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned long uninitialized_var(__data); \
++ int ret = bfq_var_store(&__data, (page), count); \
++ if (__data < (MIN)) \
++ __data = (MIN); \
++ else if (__data > (MAX)) \
++ __data = (MAX); \
++ if (__CONV == 1) \
++ *(__PTR) = msecs_to_jiffies(__data); \
++ else if (__CONV == 2) \
++ *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
++ else \
++ *(__PTR) = __data; \
++ return ret; \
++}
++STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
++ INT_MAX, 2);
++STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
++ INT_MAX, 2);
++STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
++STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
++ INT_MAX, 0);
++STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
++STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX,
++ 1);
++STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_min_inter_arr_async_store,
++ &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0,
++ INT_MAX, 0);
++#undef STORE_FUNCTION
++
++#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
++static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned long uninitialized_var(__data); \
++ int ret = bfq_var_store(&__data, (page), count); \
++ if (__data < (MIN)) \
++ __data = (MIN); \
++ else if (__data > (MAX)) \
++ __data = (MAX); \
++ *(__PTR) = (u64)__data * NSEC_PER_USEC; \
++ return ret; \
++}
++USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
++ UINT_MAX);
++#undef USEC_STORE_FUNCTION
++
++/* do nothing for the moment */
++static ssize_t bfq_weights_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ return count;
++}
++
++static ssize_t bfq_max_budget_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data == 0)
++ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
++ else {
++ if (__data > INT_MAX)
++ __data = INT_MAX;
++ bfqd->bfq_max_budget = __data;
++ }
++
++ bfqd->bfq_user_max_budget = __data;
++
++ return ret;
++}
++
++/*
++ * Leaving this name to preserve name compatibility with cfq
++ * parameters, but this timeout is used for both sync and async.
++ */
++static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data < 1)
++ __data = 1;
++ else if (__data > INT_MAX)
++ __data = INT_MAX;
++
++ bfqd->bfq_timeout = msecs_to_jiffies(__data);
++ if (bfqd->bfq_user_max_budget == 0)
++ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
++
++ return ret;
++}
++
++static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data > 1)
++ __data = 1;
++ if (!bfqd->strict_guarantees && __data == 1
++ && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
++ bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
++
++ bfqd->strict_guarantees = __data;
++
++ return ret;
++}
++
++static ssize_t bfq_low_latency_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data > 1)
++ __data = 1;
++ if (__data == 0 && bfqd->low_latency != 0)
++ bfq_end_wr(bfqd);
++ bfqd->low_latency = __data;
++
++ return ret;
++}
++
++#define BFQ_ATTR(name) \
++ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
++
++static struct elv_fs_entry bfq_attrs[] = {
++ BFQ_ATTR(fifo_expire_sync),
++ BFQ_ATTR(fifo_expire_async),
++ BFQ_ATTR(back_seek_max),
++ BFQ_ATTR(back_seek_penalty),
++ BFQ_ATTR(slice_idle),
++ BFQ_ATTR(slice_idle_us),
++ BFQ_ATTR(max_budget),
++ BFQ_ATTR(timeout_sync),
++ BFQ_ATTR(strict_guarantees),
++ BFQ_ATTR(low_latency),
++ BFQ_ATTR(wr_coeff),
++ BFQ_ATTR(wr_max_time),
++ BFQ_ATTR(wr_rt_max_time),
++ BFQ_ATTR(wr_min_idle_time),
++ BFQ_ATTR(wr_min_inter_arr_async),
++ BFQ_ATTR(wr_max_softrt_rate),
++ BFQ_ATTR(weights),
++ __ATTR_NULL
++};
++
++static struct elevator_type iosched_bfq = {
++ .ops.sq = {
++ .elevator_merge_fn = bfq_merge,
++ .elevator_merged_fn = bfq_merged_request,
++ .elevator_merge_req_fn = bfq_merged_requests,
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ .elevator_bio_merged_fn = bfq_bio_merged,
++#endif
++ .elevator_allow_bio_merge_fn = bfq_allow_bio_merge,
++ .elevator_allow_rq_merge_fn = bfq_allow_rq_merge,
++ .elevator_dispatch_fn = bfq_dispatch_requests,
++ .elevator_add_req_fn = bfq_insert_request,
++ .elevator_activate_req_fn = bfq_activate_request,
++ .elevator_deactivate_req_fn = bfq_deactivate_request,
++ .elevator_completed_req_fn = bfq_completed_request,
++ .elevator_former_req_fn = elv_rb_former_request,
++ .elevator_latter_req_fn = elv_rb_latter_request,
++ .elevator_init_icq_fn = bfq_init_icq,
++ .elevator_exit_icq_fn = bfq_exit_icq,
++ .elevator_set_req_fn = bfq_set_request,
++ .elevator_put_req_fn = bfq_put_request,
++ .elevator_may_queue_fn = bfq_may_queue,
++ .elevator_init_fn = bfq_init_queue,
++ .elevator_exit_fn = bfq_exit_queue,
++ },
++ .icq_size = sizeof(struct bfq_io_cq),
++ .icq_align = __alignof__(struct bfq_io_cq),
++ .elevator_attrs = bfq_attrs,
++ .elevator_name = "bfq-sq",
++ .elevator_owner = THIS_MODULE,
++};
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static struct blkcg_policy blkcg_policy_bfq = {
++ .dfl_cftypes = bfq_blkg_files,
++ .legacy_cftypes = bfq_blkcg_legacy_files,
++
++ .cpd_alloc_fn = bfq_cpd_alloc,
++ .cpd_init_fn = bfq_cpd_init,
++ .cpd_bind_fn = bfq_cpd_init,
++ .cpd_free_fn = bfq_cpd_free,
++
++ .pd_alloc_fn = bfq_pd_alloc,
++ .pd_init_fn = bfq_pd_init,
++ .pd_offline_fn = bfq_pd_offline,
++ .pd_free_fn = bfq_pd_free,
++ .pd_reset_stats_fn = bfq_pd_reset_stats,
++};
++#endif
++
++static int __init bfq_init(void)
++{
++ int ret;
++ char msg[60] = "BFQ I/O-scheduler: v8r12";
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ ret = blkcg_policy_register(&blkcg_policy_bfq);
++ if (ret)
++ return ret;
++#endif
++
++ ret = -ENOMEM;
++ if (bfq_slab_setup())
++ goto err_pol_unreg;
++
++ /*
++ * Times to load large popular applications for the typical
++ * systems installed on the reference devices (see the
++ * comments before the definitions of the next two
++ * arrays). Actually, we use slightly slower values, as the
++ * estimated peak rate tends to be smaller than the actual
++ * peak rate. The reason for this last fact is that estimates
++ * are computed over much shorter time intervals than the long
++ * intervals typically used for benchmarking. Why? First, to
++ * adapt more quickly to variations. Second, because an I/O
++ * scheduler cannot rely on a peak-rate-evaluation workload to
++ * be run for a long time.
++ */
++ T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */
++ T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */
++ T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */
++ T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */
++
++ /*
++ * Thresholds that determine the switch between speed classes
++ * (see the comments before the definition of the array
++ * device_speed_thresh). These thresholds are biased towards
++ * transitions to the fast class. This is safer than the
++ * opposite bias. In fact, a wrong transition to the slow
++ * class results in short weight-raising periods, because the
++ * speed of the device then tends to be higher that the
++ * reference peak rate. On the opposite end, a wrong
++ * transition to the fast class tends to increase
++ * weight-raising periods, because of the opposite reason.
++ */
++ device_speed_thresh[0] = (4 * R_slow[0]) / 3;
++ device_speed_thresh[1] = (4 * R_slow[1]) / 3;
++
++ ret = elv_register(&iosched_bfq);
++ if (ret)
++ goto err_pol_unreg;
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ strcat(msg, " (with cgroups support)");
++#endif
++ pr_info("%s", msg);
++
++ return 0;
++
++err_pol_unreg:
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ blkcg_policy_unregister(&blkcg_policy_bfq);
++#endif
++ return ret;
++}
++
++static void __exit bfq_exit(void)
++{
++ elv_unregister(&iosched_bfq);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ blkcg_policy_unregister(&blkcg_policy_bfq);
++#endif
++ bfq_slab_kill();
++}
++
++module_init(bfq_init);
++module_exit(bfq_exit);
++
++MODULE_AUTHOR("Arianna Avanzini, Fabio Checconi, Paolo Valente");
++MODULE_LICENSE("GPL");
+
+From e24d2e6461479dbd13d58be2dc44b23b5e24487c Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 19 Dec 2016 17:13:39 +0100
+Subject: [PATCH 07/51] Add config and build bits for bfq-mq-iosched
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/Kconfig.iosched | 10 +++++++++
+ block/Makefile | 1 +
+ block/bfq-cgroup-included.c | 4 ++--
+ block/bfq-mq-iosched.c | 25 ++++++++++++-----------
+ block/bfq-sched.c | 50 ++++++++++++++++++++++-----------------------
+ block/bfq-sq-iosched.c | 24 +++++++++++-----------
+ block/bfq.h | 36 +++++++++++++++++++++-----------
+ 8 files changed, 88 insertions(+), 64 deletions(-)
+
+diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
+index 9e3f4c2f7390..2d94af3d8b0a 100644
+--- a/block/Kconfig.iosched
++++ b/block/Kconfig.iosched
+@@ -96,6 +96,16 @@ config DEFAULT_IOSCHED
+ default "bfq-sq" if DEFAULT_BFQ_SQ
+ default "noop" if DEFAULT_NOOP
+
++config MQ_IOSCHED_BFQ
++ tristate "BFQ-MQ I/O Scheduler"
++ default y
++ ---help---
++ BFQ I/O scheduler for BLK-MQ. BFQ-MQ distributes bandwidth
++ among all processes according to their weights, regardless of
++ the device parameters and with any workload. It also
++ guarantees a low latency to interactive and soft real-time
++ applications. Details in Documentation/block/bfq-iosched.txt
++
+ config MQ_IOSCHED_DEADLINE
+ tristate "MQ deadline I/O scheduler"
+ default y
+diff --git a/block/Makefile b/block/Makefile
+index 59026b425791..a571329c23f0 100644
+--- a/block/Makefile
++++ b/block/Makefile
+@@ -25,6 +25,7 @@ obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
+ bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
+ obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
+ obj-$(CONFIG_IOSCHED_BFQ_SQ) += bfq-sq-iosched.o
++obj-$(CONFIG_MQ_IOSCHED_BFQ) += bfq-mq-iosched.o
+
+ obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
+ obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index af7c216a3540..9c483b658179 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -15,7 +15,7 @@
+ * file.
+ */
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+
+ /* bfqg stats flags */
+ enum bfqg_stats_flags {
+@@ -1116,7 +1116,7 @@ static struct cftype bfq_blkg_files[] = {
+ {} /* terminate */
+ };
+
+-#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg,
+ struct bfq_queue *bfqq, unsigned int op) { }
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 30d019fc67e0..e88e00f1e0a7 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -82,6 +82,7 @@
+ #include <linux/rbtree.h>
+ #include <linux/ioprio.h>
+ #include "blk.h"
++#undef CONFIG_BFQ_GROUP_IOSCHED /* cgroups support not yet functional */
+ #include "bfq.h"
+
+ /* Expiration time of sync (0) and async (1) requests, in ns. */
+@@ -387,7 +388,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
+ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
+ (bfqd->queue_weights_tree.rb_node->rb_left ||
+ bfqd->queue_weights_tree.rb_node->rb_right)
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ ) ||
+ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
+ (bfqd->group_weights_tree.rb_node->rb_left ||
+@@ -1672,7 +1673,7 @@ static void bfq_merged_request(struct request_queue *q, struct request *req,
+ }
+ }
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static void bfq_bio_merged(struct request_queue *q, struct request *req,
+ struct bio *bio)
+ {
+@@ -3879,7 +3880,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
+ */
+ static void bfq_put_queue(struct bfq_queue *bfqq)
+ {
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ struct bfq_group *bfqg = bfqq_group(bfqq);
+ #endif
+
+@@ -3909,7 +3910,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+
+ kmem_cache_free(bfq_pool, bfqq);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ bfqg_put(bfqg);
+ #endif
+ }
+@@ -4835,7 +4836,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
+ #else
+ bfq_put_async_queues(bfqd, bfqd->root_group);
+@@ -4850,7 +4851,7 @@ static void bfq_init_root_group(struct bfq_group *root_group,
+ {
+ int i;
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ root_group->entity.parent = NULL;
+ root_group->my_entity = NULL;
+ root_group->bfqd = bfqd;
+@@ -5265,7 +5266,7 @@ static struct elevator_type iosched_bfq = {
+ .elevator_merge_fn = bfq_merge,
+ .elevator_merged_fn = bfq_merged_request,
+ .elevator_merge_req_fn = bfq_merged_requests,
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ .elevator_bio_merged_fn = bfq_bio_merged,
+ #endif
+ .elevator_allow_bio_merge_fn = bfq_allow_bio_merge,
+@@ -5292,7 +5293,7 @@ static struct elevator_type iosched_bfq = {
+ .elevator_owner = THIS_MODULE,
+ };
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct blkcg_policy blkcg_policy_bfq = {
+ .dfl_cftypes = bfq_blkg_files,
+ .legacy_cftypes = bfq_blkcg_legacy_files,
+@@ -5315,7 +5316,7 @@ static int __init bfq_init(void)
+ int ret;
+ char msg[60] = "BFQ I/O-scheduler: v8r12";
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ ret = blkcg_policy_register(&blkcg_policy_bfq);
+ if (ret)
+ return ret;
+@@ -5362,7 +5363,7 @@ static int __init bfq_init(void)
+ if (ret)
+ goto err_pol_unreg;
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ strcat(msg, " (with cgroups support)");
+ #endif
+ pr_info("%s", msg);
+@@ -5370,7 +5371,7 @@ static int __init bfq_init(void)
+ return 0;
+
+ err_pol_unreg:
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+ #endif
+ return ret;
+@@ -5379,7 +5380,7 @@ static int __init bfq_init(void)
+ static void __exit bfq_exit(void)
+ {
+ elv_unregister(&iosched_bfq);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+ #endif
+ bfq_slab_kill();
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 5c0f9290a79c..b54a638186e3 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -136,7 +136,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "update_next_in_service: chosen this queue");
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(next_in_service,
+@@ -149,7 +149,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ return parent_sched_may_change;
+ }
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ /* both next loops stop at one of the child entities of the root group */
+ #define for_each_entity(entity) \
+ for (; entity ; entity = entity->parent)
+@@ -243,7 +243,7 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
+ return false;
+ }
+
+-#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#else /* BFQ_GROUP_IOSCHED_ENABLED */
+ #define for_each_entity(entity) \
+ for (; entity ; entity = NULL)
+
+@@ -260,7 +260,7 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
+ return true;
+ }
+
+-#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ /*
+ * Shift for timestamp calculations. This actually limits the maximum
+@@ -323,7 +323,7 @@ static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "calc_finish: start %llu, finish %llu, delta %llu",
+ start, finish, delta);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -473,7 +473,7 @@ static void bfq_update_active_node(struct rb_node *node)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "update_active_node: new min_start %llu",
+ ((entity->min_start>>10)*1000)>>12);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -540,7 +540,7 @@ static void bfq_active_insert(struct bfq_service_tree *st,
+ {
+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
+ struct rb_node *node = &entity->rb_node;
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ struct bfq_sched_data *sd = NULL;
+ struct bfq_group *bfqg = NULL;
+ struct bfq_data *bfqd = NULL;
+@@ -555,7 +555,7 @@ static void bfq_active_insert(struct bfq_service_tree *st,
+
+ bfq_update_active_tree(node);
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ sd = entity->sched_data;
+ bfqg = container_of(sd, struct bfq_group, sched_data);
+ BUG_ON(!bfqg);
+@@ -563,7 +563,7 @@ static void bfq_active_insert(struct bfq_service_tree *st,
+ #endif
+ if (bfqq)
+ list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else { /* bfq_group */
+ BUG_ON(!bfqd);
+ bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
+@@ -652,7 +652,7 @@ static void bfq_active_extract(struct bfq_service_tree *st,
+ {
+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
+ struct rb_node *node;
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ struct bfq_sched_data *sd = NULL;
+ struct bfq_group *bfqg = NULL;
+ struct bfq_data *bfqd = NULL;
+@@ -664,7 +664,7 @@ static void bfq_active_extract(struct bfq_service_tree *st,
+ if (node)
+ bfq_update_active_tree(node);
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ sd = entity->sched_data;
+ bfqg = container_of(sd, struct bfq_group, sched_data);
+ BUG_ON(!bfqg);
+@@ -672,7 +672,7 @@ static void bfq_active_extract(struct bfq_service_tree *st,
+ #endif
+ if (bfqq)
+ list_del(&bfqq->bfqq_list);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else { /* bfq_group */
+ BUG_ON(!bfqd);
+ bfq_weights_tree_remove(bfqd, entity,
+@@ -809,14 +809,14 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
+ unsigned int prev_weight, new_weight;
+ struct bfq_data *bfqd = NULL;
+ struct rb_root *root;
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ struct bfq_sched_data *sd;
+ struct bfq_group *bfqg;
+ #endif
+
+ if (bfqq)
+ bfqd = bfqq->bfqd;
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ sd = entity->my_sched_data;
+ bfqg = container_of(sd, struct bfq_group, sched_data);
+@@ -907,7 +907,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
+ return new_st;
+ }
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
+ #endif
+
+@@ -936,7 +936,7 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
+ st->vtime += bfq_delta(served, st->wsum);
+ bfq_forget_idle(st);
+ }
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ bfqg_stats_set_start_empty_time(bfqq_group(bfqq));
+ #endif
+ st = bfq_entity_service_tree(&bfqq->entity);
+@@ -1060,7 +1060,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "__activate_entity: new queue finish %llu",
+ ((entity->finish>>10)*1000)>>12);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -1078,7 +1078,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "__activate_entity: queue %seligible in st %p",
+ entity->start <= st->vtime ? "" : "non ", st);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -1153,7 +1153,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
+
+ BUG_ON(entity->on_st && bfqq);
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ if (entity->on_st && !bfqq) {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group,
+@@ -1485,7 +1485,7 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "invoking udpdate_next for this queue");
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(entity,
+@@ -1525,7 +1525,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "calc_vtime_jump: new value %llu",
+ root_entity->min_start);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(root_entity, struct bfq_group,
+@@ -1661,7 +1661,7 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service
+ "__lookup_next: start %llu vtime %llu st %p",
+ ((entity->start>>10)*1000)>>12,
+ ((new_vtime>>10)*1000)>>12, st);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -1735,7 +1735,7 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd)
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "chosen from st %p %d",
+ st + class_idx, class_idx);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -1777,7 +1777,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ */
+ sd = &bfqd->root_group->sched_data;
+ for (; sd ; sd = entity->my_sched_data) {
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ if (entity) {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -1867,7 +1867,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ bfq_log_bfqq(bfqd, bfqq,
+ "get_next_queue: this queue, finish %llu",
+ (((entity->finish>>10)*1000)>>10)>>2);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 30d019fc67e0..25da0d1c0622 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -387,7 +387,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
+ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
+ (bfqd->queue_weights_tree.rb_node->rb_left ||
+ bfqd->queue_weights_tree.rb_node->rb_right)
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ ) ||
+ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
+ (bfqd->group_weights_tree.rb_node->rb_left ||
+@@ -1672,7 +1672,7 @@ static void bfq_merged_request(struct request_queue *q, struct request *req,
+ }
+ }
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static void bfq_bio_merged(struct request_queue *q, struct request *req,
+ struct bio *bio)
+ {
+@@ -3879,7 +3879,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
+ */
+ static void bfq_put_queue(struct bfq_queue *bfqq)
+ {
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ struct bfq_group *bfqg = bfqq_group(bfqq);
+ #endif
+
+@@ -3909,7 +3909,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+
+ kmem_cache_free(bfq_pool, bfqq);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ bfqg_put(bfqg);
+ #endif
+ }
+@@ -4835,7 +4835,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
+ #else
+ bfq_put_async_queues(bfqd, bfqd->root_group);
+@@ -4850,7 +4850,7 @@ static void bfq_init_root_group(struct bfq_group *root_group,
+ {
+ int i;
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ root_group->entity.parent = NULL;
+ root_group->my_entity = NULL;
+ root_group->bfqd = bfqd;
+@@ -5265,7 +5265,7 @@ static struct elevator_type iosched_bfq = {
+ .elevator_merge_fn = bfq_merge,
+ .elevator_merged_fn = bfq_merged_request,
+ .elevator_merge_req_fn = bfq_merged_requests,
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ .elevator_bio_merged_fn = bfq_bio_merged,
+ #endif
+ .elevator_allow_bio_merge_fn = bfq_allow_bio_merge,
+@@ -5292,7 +5292,7 @@ static struct elevator_type iosched_bfq = {
+ .elevator_owner = THIS_MODULE,
+ };
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct blkcg_policy blkcg_policy_bfq = {
+ .dfl_cftypes = bfq_blkg_files,
+ .legacy_cftypes = bfq_blkcg_legacy_files,
+@@ -5315,7 +5315,7 @@ static int __init bfq_init(void)
+ int ret;
+ char msg[60] = "BFQ I/O-scheduler: v8r12";
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ ret = blkcg_policy_register(&blkcg_policy_bfq);
+ if (ret)
+ return ret;
+@@ -5362,7 +5362,7 @@ static int __init bfq_init(void)
+ if (ret)
+ goto err_pol_unreg;
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ strcat(msg, " (with cgroups support)");
+ #endif
+ pr_info("%s", msg);
+@@ -5370,7 +5370,7 @@ static int __init bfq_init(void)
+ return 0;
+
+ err_pol_unreg:
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+ #endif
+ return ret;
+@@ -5379,7 +5379,7 @@ static int __init bfq_init(void)
+ static void __exit bfq_exit(void)
+ {
+ elv_unregister(&iosched_bfq);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+ #endif
+ bfq_slab_kill();
+diff --git a/block/bfq.h b/block/bfq.h
+index 34fc4697fd89..53954d1b87f8 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -19,6 +19,18 @@
+ #include <linux/hrtimer.h>
+ #include <linux/blk-cgroup.h>
+
++/*
++ * Define an alternative macro to compile cgroups support. This is one
++ * of the steps needed to let bfq-mq share the files bfq-sched.c and
++ * bfq-cgroup.c with bfq-sq. For bfq-mq, the macro
++ * BFQ_GROUP_IOSCHED_ENABLED will be defined as a function of whether
++ * the configuration option CONFIG_BFQ_MQ_GROUP_IOSCHED, and not
++ * CONFIG_BFQ_GROUP_IOSCHED, is defined.
++ */
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#define BFQ_GROUP_IOSCHED_ENABLED
++#endif
++
+ #define BFQ_IOPRIO_CLASSES 3
+ #define BFQ_CL_IDLE_TIMEOUT (HZ/5)
+
+@@ -344,7 +356,7 @@ struct bfq_io_cq {
+ struct bfq_ttime ttime;
+ /* per (request_queue, blkcg) ioprio */
+ int ioprio;
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ uint64_t blkcg_serial_nr; /* the current blkcg serial */
+ #endif
+
+@@ -671,7 +683,7 @@ static const char *checked_dev_name(const struct device *dev)
+ return nodev;
+ }
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+@@ -696,7 +708,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ __pbuf, ##args); \
+ } while (0)
+
+-#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
+ pr_crit("%s bfq%d%c " fmt "\n", \
+@@ -705,7 +717,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ ##args)
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
+
+-#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log(bfqd, fmt, args...) \
+ pr_crit("%s bfq " fmt "\n", \
+@@ -713,7 +725,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ ##args)
+
+ #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+@@ -735,7 +747,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
+ } while (0)
+
+-#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
+@@ -743,7 +755,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ ##args)
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
+
+-#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log(bfqd, fmt, args...) \
+ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
+@@ -763,7 +775,7 @@ enum bfqq_expiration {
+
+
+ struct bfqg_stats {
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ /* number of ios merged */
+ struct blkg_rwstat merged;
+ /* total time spent on device in ns, may not be accurate w/ queueing */
+@@ -794,7 +806,7 @@ struct bfqg_stats {
+ #endif
+ };
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ /*
+ * struct bfq_group_data - per-blkcg storage for the blkio subsystem.
+ *
+@@ -895,7 +907,7 @@ bfq_entity_service_tree(struct bfq_entity *entity)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "entity_service_tree %p %d",
+ sched_data->service_tree + idx, idx);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -924,7 +936,7 @@ static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
+ return bic->icq.q->elevator->elevator_data;
+ }
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+
+ static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
+ {
+@@ -953,7 +965,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
+ struct bfq_io_cq *bic);
+ static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
+ struct bfq_group *bfqg);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
+ #endif
+ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
+
+From add91dbd756cf8ca3aa3add9a19eef742d5fca6b Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 20 Jan 2017 09:18:25 +0100
+Subject: [PATCH 08/51] Increase max policies for io controller
+
+To let bfq-mq policy be plugged too (however cgroups
+suppport is not yet functional in bfq-mq).
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ include/linux/blkdev.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index bf000c58644b..10f892ca585d 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -54,7 +54,7 @@ struct blk_stat_callback;
+ * Maximum number of blkcg policies allowed to be registered concurrently.
+ * Defined here to simplify include dependency.
+ */
+-#define BLKCG_MAX_POLS 4
++#define BLKCG_MAX_POLS 5
+
+ typedef void (rq_end_io_fn)(struct request *, blk_status_t);
+
+
+From 2c39a1d9ab4516d44e01e96f19f578b927e7f2e9 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 19 Dec 2016 18:11:33 +0100
+Subject: [PATCH 09/51] Copy header file bfq.h as bfq-mq.h
+
+This commit introduces the header file bfq-mq.h, that will play
+for bfq-mq-iosched.c the same role that bfq.h plays for bfq-iosched.c.
+
+For the moment, the file bfq-mq.h is just a copy of bfq.h.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 2 +-
+ block/bfq-mq.h | 973 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 974 insertions(+), 1 deletion(-)
+ create mode 100644 block/bfq-mq.h
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index e88e00f1e0a7..d1125aee658c 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -83,7 +83,7 @@
+ #include <linux/ioprio.h>
+ #include "blk.h"
+ #undef CONFIG_BFQ_GROUP_IOSCHED /* cgroups support not yet functional */
+-#include "bfq.h"
++#include "bfq-mq.h"
+
+ /* Expiration time of sync (0) and async (1) requests, in ns. */
+ static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+new file mode 100644
+index 000000000000..53954d1b87f8
+--- /dev/null
++++ b/block/bfq-mq.h
+@@ -0,0 +1,973 @@
++/*
++ * BFQ v8r12 for 4.11.0: data structures and common functions prototypes.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
++ */
++
++#ifndef _BFQ_H
++#define _BFQ_H
++
++#include <linux/blktrace_api.h>
++#include <linux/hrtimer.h>
++#include <linux/blk-cgroup.h>
++
++/*
++ * Define an alternative macro to compile cgroups support. This is one
++ * of the steps needed to let bfq-mq share the files bfq-sched.c and
++ * bfq-cgroup.c with bfq-sq. For bfq-mq, the macro
++ * BFQ_GROUP_IOSCHED_ENABLED will be defined as a function of whether
++ * the configuration option CONFIG_BFQ_MQ_GROUP_IOSCHED, and not
++ * CONFIG_BFQ_GROUP_IOSCHED, is defined.
++ */
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#define BFQ_GROUP_IOSCHED_ENABLED
++#endif
++
++#define BFQ_IOPRIO_CLASSES 3
++#define BFQ_CL_IDLE_TIMEOUT (HZ/5)
++
++#define BFQ_MIN_WEIGHT 1
++#define BFQ_MAX_WEIGHT 1000
++#define BFQ_WEIGHT_CONVERSION_COEFF 10
++
++#define BFQ_DEFAULT_QUEUE_IOPRIO 4
++
++#define BFQ_WEIGHT_LEGACY_DFL 100
++#define BFQ_DEFAULT_GRP_IOPRIO 0
++#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
++
++/*
++ * Soft real-time applications are extremely more latency sensitive
++ * than interactive ones. Over-raise the weight of the former to
++ * privilege them against the latter.
++ */
++#define BFQ_SOFTRT_WEIGHT_FACTOR 100
++
++struct bfq_entity;
++
++/**
++ * struct bfq_service_tree - per ioprio_class service tree.
++ *
++ * Each service tree represents a B-WF2Q+ scheduler on its own. Each
++ * ioprio_class has its own independent scheduler, and so its own
++ * bfq_service_tree. All the fields are protected by the queue lock
++ * of the containing bfqd.
++ */
++struct bfq_service_tree {
++ /* tree for active entities (i.e., those backlogged) */
++ struct rb_root active;
++ /* tree for idle entities (i.e., not backlogged, with V <= F_i)*/
++ struct rb_root idle;
++
++ struct bfq_entity *first_idle; /* idle entity with minimum F_i */
++ struct bfq_entity *last_idle; /* idle entity with maximum F_i */
++
++ u64 vtime; /* scheduler virtual time */
++ /* scheduler weight sum; active and idle entities contribute to it */
++ unsigned long wsum;
++};
++
++/**
++ * struct bfq_sched_data - multi-class scheduler.
++ *
++ * bfq_sched_data is the basic scheduler queue. It supports three
++ * ioprio_classes, and can be used either as a toplevel queue or as an
++ * intermediate queue in a hierarchical setup.
++ *
++ * The supported ioprio_classes are the same as in CFQ, in descending
++ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
++ * Requests from higher priority queues are served before all the
++ * requests from lower priority queues; among requests of the same
++ * queue requests are served according to B-WF2Q+.
++ *
++ * The schedule is implemented by the service trees, plus the field
++ * @next_in_service, which points to the entity on the active trees
++ * that will be served next, if 1) no changes in the schedule occurs
++ * before the current in-service entity is expired, 2) the in-service
++ * queue becomes idle when it expires, and 3) if the entity pointed by
++ * in_service_entity is not a queue, then the in-service child entity
++ * of the entity pointed by in_service_entity becomes idle on
++ * expiration. This peculiar definition allows for the following
++ * optimization, not yet exploited: while a given entity is still in
++ * service, we already know which is the best candidate for next
++ * service among the other active entitities in the same parent
++ * entity. We can then quickly compare the timestamps of the
++ * in-service entity with those of such best candidate.
++ *
++ * All the fields are protected by the queue lock of the containing
++ * bfqd.
++ */
++struct bfq_sched_data {
++ struct bfq_entity *in_service_entity; /* entity in service */
++ /* head-of-the-line entity in the scheduler (see comments above) */
++ struct bfq_entity *next_in_service;
++ /* array of service trees, one per ioprio_class */
++ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
++ /* last time CLASS_IDLE was served */
++ unsigned long bfq_class_idle_last_service;
++
++};
++
++/**
++ * struct bfq_weight_counter - counter of the number of all active entities
++ * with a given weight.
++ */
++struct bfq_weight_counter {
++ unsigned int weight; /* weight of the entities this counter refers to */
++ unsigned int num_active; /* nr of active entities with this weight */
++ /*
++ * Weights tree member (see bfq_data's @queue_weights_tree and
++ * @group_weights_tree)
++ */
++ struct rb_node weights_node;
++};
++
++/**
++ * struct bfq_entity - schedulable entity.
++ *
++ * A bfq_entity is used to represent either a bfq_queue (leaf node in the
++ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each
++ * entity belongs to the sched_data of the parent group in the cgroup
++ * hierarchy. Non-leaf entities have also their own sched_data, stored
++ * in @my_sched_data.
++ *
++ * Each entity stores independently its priority values; this would
++ * allow different weights on different devices, but this
++ * functionality is not exported to userspace by now. Priorities and
++ * weights are updated lazily, first storing the new values into the
++ * new_* fields, then setting the @prio_changed flag. As soon as
++ * there is a transition in the entity state that allows the priority
++ * update to take place the effective and the requested priority
++ * values are synchronized.
++ *
++ * Unless cgroups are used, the weight value is calculated from the
++ * ioprio to export the same interface as CFQ. When dealing with
++ * ``well-behaved'' queues (i.e., queues that do not spend too much
++ * time to consume their budget and have true sequential behavior, and
++ * when there are no external factors breaking anticipation) the
++ * relative weights at each level of the cgroups hierarchy should be
++ * guaranteed. All the fields are protected by the queue lock of the
++ * containing bfqd.
++ */
++struct bfq_entity {
++ struct rb_node rb_node; /* service_tree member */
++ /* pointer to the weight counter associated with this entity */
++ struct bfq_weight_counter *weight_counter;
++
++ /*
++ * Flag, true if the entity is on a tree (either the active or
++ * the idle one of its service_tree) or is in service.
++ */
++ bool on_st;
++
++ u64 finish; /* B-WF2Q+ finish timestamp (aka F_i) */
++ u64 start; /* B-WF2Q+ start timestamp (aka S_i) */
++
++ /* tree the entity is enqueued into; %NULL if not on a tree */
++ struct rb_root *tree;
++
++ /*
++ * minimum start time of the (active) subtree rooted at this
++ * entity; used for O(log N) lookups into active trees
++ */
++ u64 min_start;
++
++ /* amount of service received during the last service slot */
++ int service;
++
++ /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */
++ int budget;
++
++ unsigned int weight; /* weight of the queue */
++ unsigned int new_weight; /* next weight if a change is in progress */
++
++ /* original weight, used to implement weight boosting */
++ unsigned int orig_weight;
++
++ /* parent entity, for hierarchical scheduling */
++ struct bfq_entity *parent;
++
++ /*
++ * For non-leaf nodes in the hierarchy, the associated
++ * scheduler queue, %NULL on leaf nodes.
++ */
++ struct bfq_sched_data *my_sched_data;
++ /* the scheduler queue this entity belongs to */
++ struct bfq_sched_data *sched_data;
++
++ /* flag, set to request a weight, ioprio or ioprio_class change */
++ int prio_changed;
++};
++
++struct bfq_group;
++
++/**
++ * struct bfq_queue - leaf schedulable entity.
++ *
++ * A bfq_queue is a leaf request queue; it can be associated with an
++ * io_context or more, if it is async or shared between cooperating
++ * processes. @cgroup holds a reference to the cgroup, to be sure that it
++ * does not disappear while a bfqq still references it (mostly to avoid
++ * races between request issuing and task migration followed by cgroup
++ * destruction).
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_queue {
++ /* reference counter */
++ int ref;
++ /* parent bfq_data */
++ struct bfq_data *bfqd;
++
++ /* current ioprio and ioprio class */
++ unsigned short ioprio, ioprio_class;
++ /* next ioprio and ioprio class if a change is in progress */
++ unsigned short new_ioprio, new_ioprio_class;
++
++ /*
++ * Shared bfq_queue if queue is cooperating with one or more
++ * other queues.
++ */
++ struct bfq_queue *new_bfqq;
++ /* request-position tree member (see bfq_group's @rq_pos_tree) */
++ struct rb_node pos_node;
++ /* request-position tree root (see bfq_group's @rq_pos_tree) */
++ struct rb_root *pos_root;
++
++ /* sorted list of pending requests */
++ struct rb_root sort_list;
++ /* if fifo isn't expired, next request to serve */
++ struct request *next_rq;
++ /* number of sync and async requests queued */
++ int queued[2];
++ /* number of sync and async requests currently allocated */
++ int allocated[2];
++ /* number of pending metadata requests */
++ int meta_pending;
++ /* fifo list of requests in sort_list */
++ struct list_head fifo;
++
++ /* entity representing this queue in the scheduler */
++ struct bfq_entity entity;
++
++ /* maximum budget allowed from the feedback mechanism */
++ int max_budget;
++ /* budget expiration (in jiffies) */
++ unsigned long budget_timeout;
++
++ /* number of requests on the dispatch list or inside driver */
++ int dispatched;
++
++ unsigned int flags; /* status flags.*/
++
++ /* node for active/idle bfqq list inside parent bfqd */
++ struct list_head bfqq_list;
++
++ /* bit vector: a 1 for each seeky requests in history */
++ u32 seek_history;
++
++ /* node for the device's burst list */
++ struct hlist_node burst_list_node;
++
++ /* position of the last request enqueued */
++ sector_t last_request_pos;
++
++ /* Number of consecutive pairs of request completion and
++ * arrival, such that the queue becomes idle after the
++ * completion, but the next request arrives within an idle
++ * time slice; used only if the queue's IO_bound flag has been
++ * cleared.
++ */
++ unsigned int requests_within_timer;
++
++ /* pid of the process owning the queue, used for logging purposes */
++ pid_t pid;
++
++ /*
++ * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL
++ * if the queue is shared.
++ */
++ struct bfq_io_cq *bic;
++
++ /* current maximum weight-raising time for this queue */
++ unsigned long wr_cur_max_time;
++ /*
++ * Minimum time instant such that, only if a new request is
++ * enqueued after this time instant in an idle @bfq_queue with
++ * no outstanding requests, then the task associated with the
++ * queue it is deemed as soft real-time (see the comments on
++ * the function bfq_bfqq_softrt_next_start())
++ */
++ unsigned long soft_rt_next_start;
++ /*
++ * Start time of the current weight-raising period if
++ * the @bfq-queue is being weight-raised, otherwise
++ * finish time of the last weight-raising period.
++ */
++ unsigned long last_wr_start_finish;
++ /* factor by which the weight of this queue is multiplied */
++ unsigned int wr_coeff;
++ /*
++ * Time of the last transition of the @bfq_queue from idle to
++ * backlogged.
++ */
++ unsigned long last_idle_bklogged;
++ /*
++ * Cumulative service received from the @bfq_queue since the
++ * last transition from idle to backlogged.
++ */
++ unsigned long service_from_backlogged;
++ /*
++ * Value of wr start time when switching to soft rt
++ */
++ unsigned long wr_start_at_switch_to_srt;
++
++ unsigned long split_time; /* time of last split */
++};
++
++/**
++ * struct bfq_ttime - per process thinktime stats.
++ */
++struct bfq_ttime {
++ u64 last_end_request; /* completion time of last request */
++
++ u64 ttime_total; /* total process thinktime */
++ unsigned long ttime_samples; /* number of thinktime samples */
++ u64 ttime_mean; /* average process thinktime */
++
++};
++
++/**
++ * struct bfq_io_cq - per (request_queue, io_context) structure.
++ */
++struct bfq_io_cq {
++ /* associated io_cq structure */
++ struct io_cq icq; /* must be the first member */
++ /* array of two process queues, the sync and the async */
++ struct bfq_queue *bfqq[2];
++ /* associated @bfq_ttime struct */
++ struct bfq_ttime ttime;
++ /* per (request_queue, blkcg) ioprio */
++ int ioprio;
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ uint64_t blkcg_serial_nr; /* the current blkcg serial */
++#endif
++
++ /*
++ * Snapshot of the has_short_time flag before merging; taken
++ * to remember its value while the queue is merged, so as to
++ * be able to restore it in case of split.
++ */
++ bool saved_has_short_ttime;
++ /*
++ * Same purpose as the previous two fields for the I/O bound
++ * classification of a queue.
++ */
++ bool saved_IO_bound;
++
++ /*
++ * Same purpose as the previous fields for the value of the
++ * field keeping the queue's belonging to a large burst
++ */
++ bool saved_in_large_burst;
++ /*
++ * True if the queue belonged to a burst list before its merge
++ * with another cooperating queue.
++ */
++ bool was_in_burst_list;
++
++ /*
++ * Similar to previous fields: save wr information.
++ */
++ unsigned long saved_wr_coeff;
++ unsigned long saved_last_wr_start_finish;
++ unsigned long saved_wr_start_at_switch_to_srt;
++ unsigned int saved_wr_cur_max_time;
++};
++
++enum bfq_device_speed {
++ BFQ_BFQD_FAST,
++ BFQ_BFQD_SLOW,
++};
++
++/**
++ * struct bfq_data - per-device data structure.
++ *
++ * All the fields are protected by the @queue lock.
++ */
++struct bfq_data {
++ /* request queue for the device */
++ struct request_queue *queue;
++
++ /* root bfq_group for the device */
++ struct bfq_group *root_group;
++
++ /*
++ * rbtree of weight counters of @bfq_queues, sorted by
++ * weight. Used to keep track of whether all @bfq_queues have
++ * the same weight. The tree contains one counter for each
++ * distinct weight associated to some active and not
++ * weight-raised @bfq_queue (see the comments to the functions
++ * bfq_weights_tree_[add|remove] for further details).
++ */
++ struct rb_root queue_weights_tree;
++ /*
++ * rbtree of non-queue @bfq_entity weight counters, sorted by
++ * weight. Used to keep track of whether all @bfq_groups have
++ * the same weight. The tree contains one counter for each
++ * distinct weight associated to some active @bfq_group (see
++ * the comments to the functions bfq_weights_tree_[add|remove]
++ * for further details).
++ */
++ struct rb_root group_weights_tree;
++
++ /*
++ * Number of bfq_queues containing requests (including the
++ * queue in service, even if it is idling).
++ */
++ int busy_queues;
++ /* number of weight-raised busy @bfq_queues */
++ int wr_busy_queues;
++ /* number of queued requests */
++ int queued;
++ /* number of requests dispatched and waiting for completion */
++ int rq_in_driver;
++
++ /*
++ * Maximum number of requests in driver in the last
++ * @hw_tag_samples completed requests.
++ */
++ int max_rq_in_driver;
++ /* number of samples used to calculate hw_tag */
++ int hw_tag_samples;
++ /* flag set to one if the driver is showing a queueing behavior */
++ int hw_tag;
++
++ /* number of budgets assigned */
++ int budgets_assigned;
++
++ /*
++ * Timer set when idling (waiting) for the next request from
++ * the queue in service.
++ */
++ struct hrtimer idle_slice_timer;
++ /* delayed work to restart dispatching on the request queue */
++ struct work_struct unplug_work;
++
++ /* bfq_queue in service */
++ struct bfq_queue *in_service_queue;
++ /* bfq_io_cq (bic) associated with the @in_service_queue */
++ struct bfq_io_cq *in_service_bic;
++
++ /* on-disk position of the last served request */
++ sector_t last_position;
++
++ /* time of last request completion (ns) */
++ u64 last_completion;
++
++ /* time of first rq dispatch in current observation interval (ns) */
++ u64 first_dispatch;
++ /* time of last rq dispatch in current observation interval (ns) */
++ u64 last_dispatch;
++
++ /* beginning of the last budget */
++ ktime_t last_budget_start;
++ /* beginning of the last idle slice */
++ ktime_t last_idling_start;
++
++ /* number of samples in current observation interval */
++ int peak_rate_samples;
++ /* num of samples of seq dispatches in current observation interval */
++ u32 sequential_samples;
++ /* total num of sectors transferred in current observation interval */
++ u64 tot_sectors_dispatched;
++ /* max rq size seen during current observation interval (sectors) */
++ u32 last_rq_max_size;
++ /* time elapsed from first dispatch in current observ. interval (us) */
++ u64 delta_from_first;
++ /* current estimate of device peak rate */
++ u32 peak_rate;
++
++ /* maximum budget allotted to a bfq_queue before rescheduling */
++ int bfq_max_budget;
++
++ /* list of all the bfq_queues active on the device */
++ struct list_head active_list;
++ /* list of all the bfq_queues idle on the device */
++ struct list_head idle_list;
++
++ /*
++ * Timeout for async/sync requests; when it fires, requests
++ * are served in fifo order.
++ */
++ u64 bfq_fifo_expire[2];
++ /* weight of backward seeks wrt forward ones */
++ unsigned int bfq_back_penalty;
++ /* maximum allowed backward seek */
++ unsigned int bfq_back_max;
++ /* maximum idling time */
++ u32 bfq_slice_idle;
++
++ /* user-configured max budget value (0 for auto-tuning) */
++ int bfq_user_max_budget;
++ /*
++ * Timeout for bfq_queues to consume their budget; used to
++ * prevent seeky queues from imposing long latencies to
++ * sequential or quasi-sequential ones (this also implies that
++ * seeky queues cannot receive guarantees in the service
++ * domain; after a timeout they are charged for the time they
++ * have been in service, to preserve fairness among them, but
++ * without service-domain guarantees).
++ */
++ unsigned int bfq_timeout;
++
++ /*
++ * Number of consecutive requests that must be issued within
++ * the idle time slice to set again idling to a queue which
++ * was marked as non-I/O-bound (see the definition of the
++ * IO_bound flag for further details).
++ */
++ unsigned int bfq_requests_within_timer;
++
++ /*
++ * Force device idling whenever needed to provide accurate
++ * service guarantees, without caring about throughput
++ * issues. CAVEAT: this may even increase latencies, in case
++ * of useless idling for processes that did stop doing I/O.
++ */
++ bool strict_guarantees;
++
++ /*
++ * Last time at which a queue entered the current burst of
++ * queues being activated shortly after each other; for more
++ * details about this and the following parameters related to
++ * a burst of activations, see the comments on the function
++ * bfq_handle_burst.
++ */
++ unsigned long last_ins_in_burst;
++ /*
++ * Reference time interval used to decide whether a queue has
++ * been activated shortly after @last_ins_in_burst.
++ */
++ unsigned long bfq_burst_interval;
++ /* number of queues in the current burst of queue activations */
++ int burst_size;
++
++ /* common parent entity for the queues in the burst */
++ struct bfq_entity *burst_parent_entity;
++ /* Maximum burst size above which the current queue-activation
++ * burst is deemed as 'large'.
++ */
++ unsigned long bfq_large_burst_thresh;
++ /* true if a large queue-activation burst is in progress */
++ bool large_burst;
++ /*
++ * Head of the burst list (as for the above fields, more
++ * details in the comments on the function bfq_handle_burst).
++ */
++ struct hlist_head burst_list;
++
++ /* if set to true, low-latency heuristics are enabled */
++ bool low_latency;
++ /*
++ * Maximum factor by which the weight of a weight-raised queue
++ * is multiplied.
++ */
++ unsigned int bfq_wr_coeff;
++ /* maximum duration of a weight-raising period (jiffies) */
++ unsigned int bfq_wr_max_time;
++
++ /* Maximum weight-raising duration for soft real-time processes */
++ unsigned int bfq_wr_rt_max_time;
++ /*
++ * Minimum idle period after which weight-raising may be
++ * reactivated for a queue (in jiffies).
++ */
++ unsigned int bfq_wr_min_idle_time;
++ /*
++ * Minimum period between request arrivals after which
++ * weight-raising may be reactivated for an already busy async
++ * queue (in jiffies).
++ */
++ unsigned long bfq_wr_min_inter_arr_async;
++
++ /* Max service-rate for a soft real-time queue, in sectors/sec */
++ unsigned int bfq_wr_max_softrt_rate;
++ /*
++ * Cached value of the product R*T, used for computing the
++ * maximum duration of weight raising automatically.
++ */
++ u64 RT_prod;
++ /* device-speed class for the low-latency heuristic */
++ enum bfq_device_speed device_speed;
++
++ /* fallback dummy bfqq for extreme OOM conditions */
++ struct bfq_queue oom_bfqq;
++};
++
++enum bfqq_state_flags {
++ BFQ_BFQQ_FLAG_just_created = 0, /* queue just allocated */
++ BFQ_BFQQ_FLAG_busy, /* has requests or is in service */
++ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */
++ BFQ_BFQQ_FLAG_non_blocking_wait_rq, /*
++ * waiting for a request
++ * without idling the device
++ */
++ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
++ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
++ BFQ_BFQQ_FLAG_has_short_ttime, /* queue has a short think time */
++ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
++ BFQ_BFQQ_FLAG_IO_bound, /*
++ * bfqq has timed-out at least once
++ * having consumed at most 2/10 of
++ * its budget
++ */
++ BFQ_BFQQ_FLAG_in_large_burst, /*
++ * bfqq activated in a large burst,
++ * see comments to bfq_handle_burst.
++ */
++ BFQ_BFQQ_FLAG_softrt_update, /*
++ * may need softrt-next-start
++ * update
++ */
++ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
++ BFQ_BFQQ_FLAG_split_coop /* shared bfqq will be split */
++};
++
++#define BFQ_BFQQ_FNS(name) \
++static void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
++{ \
++ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \
++}
++
++BFQ_BFQQ_FNS(just_created);
++BFQ_BFQQ_FNS(busy);
++BFQ_BFQQ_FNS(wait_request);
++BFQ_BFQQ_FNS(non_blocking_wait_rq);
++BFQ_BFQQ_FNS(must_alloc);
++BFQ_BFQQ_FNS(fifo_expire);
++BFQ_BFQQ_FNS(has_short_ttime);
++BFQ_BFQQ_FNS(sync);
++BFQ_BFQQ_FNS(IO_bound);
++BFQ_BFQQ_FNS(in_large_burst);
++BFQ_BFQQ_FNS(coop);
++BFQ_BFQQ_FNS(split_coop);
++BFQ_BFQQ_FNS(softrt_update);
++#undef BFQ_BFQQ_FNS
++
++/* Logging facilities. */
++#ifdef CONFIG_BFQ_REDIRECT_TO_CONSOLE
++
++static const char *checked_dev_name(const struct device *dev)
++{
++ static const char nodev[] = "nodev";
++
++ if (dev)
++ return dev_name(dev);
++
++ return nodev;
++}
++
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
++static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ assert_spin_locked((bfqd)->queue->queue_lock); \
++ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
++ pr_crit("%s bfq%d%c %s " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ (bfqq)->pid, \
++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ __pbuf, ##args); \
++} while (0)
++
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
++ pr_crit("%s %s " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ __pbuf, ##args); \
++} while (0)
++
++#else /* BFQ_GROUP_IOSCHED_ENABLED */
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++ pr_crit("%s bfq%d%c " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ ##args)
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
++
++#endif /* BFQ_GROUP_IOSCHED_ENABLED */
++
++#define bfq_log(bfqd, fmt, args...) \
++ pr_crit("%s bfq " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ ##args)
++
++#else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
++static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ assert_spin_locked((bfqd)->queue->queue_lock); \
++ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \
++ (bfqq)->pid, \
++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ __pbuf, ##args); \
++} while (0)
++
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
++ blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
++} while (0)
++
++#else /* BFQ_GROUP_IOSCHED_ENABLED */
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ ##args)
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
++
++#endif /* BFQ_GROUP_IOSCHED_ENABLED */
++
++#define bfq_log(bfqd, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++#endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
++
++/* Expiration reasons. */
++enum bfqq_expiration {
++ BFQ_BFQQ_TOO_IDLE = 0, /*
++ * queue has been idling for
++ * too long
++ */
++ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */
++ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */
++ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */
++ BFQ_BFQQ_PREEMPTED /* preemption in progress */
++};
++
++
++struct bfqg_stats {
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ /* number of ios merged */
++ struct blkg_rwstat merged;
++ /* total time spent on device in ns, may not be accurate w/ queueing */
++ struct blkg_rwstat service_time;
++ /* total time spent waiting in scheduler queue in ns */
++ struct blkg_rwstat wait_time;
++ /* number of IOs queued up */
++ struct blkg_rwstat queued;
++ /* total disk time and nr sectors dispatched by this group */
++ struct blkg_stat time;
++ /* sum of number of ios queued across all samples */
++ struct blkg_stat avg_queue_size_sum;
++ /* count of samples taken for average */
++ struct blkg_stat avg_queue_size_samples;
++ /* how many times this group has been removed from service tree */
++ struct blkg_stat dequeue;
++ /* total time spent waiting for it to be assigned a timeslice. */
++ struct blkg_stat group_wait_time;
++ /* time spent idling for this blkcg_gq */
++ struct blkg_stat idle_time;
++ /* total time with empty current active q with other requests queued */
++ struct blkg_stat empty_time;
++ /* fields after this shouldn't be cleared on stat reset */
++ uint64_t start_group_wait_time;
++ uint64_t start_idle_time;
++ uint64_t start_empty_time;
++ uint16_t flags;
++#endif
++};
++
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++/*
++ * struct bfq_group_data - per-blkcg storage for the blkio subsystem.
++ *
++ * @ps: @blkcg_policy_storage that this structure inherits
++ * @weight: weight of the bfq_group
++ */
++struct bfq_group_data {
++ /* must be the first member */
++ struct blkcg_policy_data pd;
++
++ unsigned int weight;
++};
++
++/**
++ * struct bfq_group - per (device, cgroup) data structure.
++ * @entity: schedulable entity to insert into the parent group sched_data.
++ * @sched_data: own sched_data, to contain child entities (they may be
++ * both bfq_queues and bfq_groups).
++ * @bfqd: the bfq_data for the device this group acts upon.
++ * @async_bfqq: array of async queues for all the tasks belonging to
++ * the group, one queue per ioprio value per ioprio_class,
++ * except for the idle class that has only one queue.
++ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
++ * @my_entity: pointer to @entity, %NULL for the toplevel group; used
++ * to avoid too many special cases during group creation/
++ * migration.
++ * @active_entities: number of active entities belonging to the group;
++ * unused for the root group. Used to know whether there
++ * are groups with more than one active @bfq_entity
++ * (see the comments to the function
++ * bfq_bfqq_may_idle()).
++ * @rq_pos_tree: rbtree sorted by next_request position, used when
++ * determining if two or more queues have interleaving
++ * requests (see bfq_find_close_cooperator()).
++ *
++ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
++ * there is a set of bfq_groups, each one collecting the lower-level
++ * entities belonging to the group that are acting on the same device.
++ *
++ * Locking works as follows:
++ * o @bfqd is protected by the queue lock, RCU is used to access it
++ * from the readers.
++ * o All the other fields are protected by the @bfqd queue lock.
++ */
++struct bfq_group {
++ /* must be the first member */
++ struct blkg_policy_data pd;
++
++ struct bfq_entity entity;
++ struct bfq_sched_data sched_data;
++
++ void *bfqd;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++
++ struct bfq_entity *my_entity;
++
++ int active_entities;
++
++ struct rb_root rq_pos_tree;
++
++ struct bfqg_stats stats;
++};
++
++#else
++struct bfq_group {
++ struct bfq_sched_data sched_data;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++
++ struct rb_root rq_pos_tree;
++};
++#endif
++
++static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
++
++static unsigned int bfq_class_idx(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ return bfqq ? bfqq->ioprio_class - 1 :
++ BFQ_DEFAULT_GRP_CLASS - 1;
++}
++
++static struct bfq_service_tree *
++bfq_entity_service_tree(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sched_data = entity->sched_data;
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ unsigned int idx = bfq_class_idx(entity);
++
++ BUG_ON(idx >= BFQ_IOPRIO_CLASSES);
++ BUG_ON(sched_data == NULL);
++
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "entity_service_tree %p %d",
++ sched_data->service_tree + idx, idx);
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "entity_service_tree %p %d",
++ sched_data->service_tree + idx, idx);
++ }
++#endif
++ return sched_data->service_tree + idx;
++}
++
++static struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
++{
++ return bic->bfqq[is_sync];
++}
++
++static void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq,
++ bool is_sync)
++{
++ bic->bfqq[is_sync] = bfqq;
++}
++
++static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
++{
++ return bic->icq.q->elevator->elevator_data;
++}
++
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++
++static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *group_entity = bfqq->entity.parent;
++
++ if (!group_entity)
++ group_entity = &bfqq->bfqd->root_group->entity;
++
++ return container_of(group_entity, struct bfq_group, entity);
++}
++
++#else
++
++static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
++{
++ return bfqq->bfqd->root_group;
++}
++
++#endif
++
++static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio);
++static void bfq_put_queue(struct bfq_queue *bfqq);
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bio *bio, bool is_sync,
++ struct bfq_io_cq *bic);
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg);
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
++#endif
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
++
++#endif /* _BFQ_H */
+
+From 0bd96428e086fd28800efdf5f0a5f62869af6e30 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Sat, 21 Jan 2017 12:41:14 +0100
+Subject: [PATCH 10/51] Move thinktime from bic to bfqq
+
+Prep change to make it possible to protect this field with a
+scheduler lock.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 28 ++++++++++++++--------------
+ block/bfq-mq.h | 30 ++++++++++++++++--------------
+ 2 files changed, 30 insertions(+), 28 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index d1125aee658c..65f5dfb79417 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -698,6 +698,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ if (unlikely(busy))
+ old_wr_coeff = bfqq->wr_coeff;
+
++ bfqq->ttime = bic->saved_ttime;
+ bfqq->wr_coeff = bic->saved_wr_coeff;
+ bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
+ BUG_ON(time_is_after_jiffies(bfqq->wr_start_at_switch_to_srt));
+@@ -1287,7 +1288,7 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
+ * details on the usage of the next variable.
+ */
+ arrived_in_time = ktime_get_ns() <=
+- RQ_BIC(rq)->ttime.last_end_request +
++ bfqq->ttime.last_end_request +
+ bfqd->bfq_slice_idle * 3;
+
+ bfq_log_bfqq(bfqd, bfqq,
+@@ -2048,6 +2049,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ if (!bic)
+ return;
+
++ bic->saved_ttime = bfqq->ttime;
+ bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
+ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
+@@ -3948,11 +3950,6 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_put_queue(bfqq); /* release process reference */
+ }
+
+-static void bfq_init_icq(struct io_cq *icq)
+-{
+- icq_to_bic(icq)->ttime.last_end_request = ktime_get_ns() - (1ULL<<32);
+-}
+-
+ static void bfq_exit_icq(struct io_cq *icq)
+ {
+ struct bfq_io_cq *bic = icq_to_bic(icq);
+@@ -4084,6 +4081,9 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfq_mark_bfqq_just_created(bfqq);
+ } else
+ bfq_clear_bfqq_sync(bfqq);
++
++ bfqq->ttime.last_end_request = ktime_get_ns() - (1ULL<<32);
++
+ bfq_mark_bfqq_IO_bound(bfqq);
+
+ /* Tentative initial value to trade off between thr and lat */
+@@ -4191,14 +4191,14 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
+ }
+
+ static void bfq_update_io_thinktime(struct bfq_data *bfqd,
+- struct bfq_io_cq *bic)
++ struct bfq_queue *bfqq)
+ {
+- struct bfq_ttime *ttime = &bic->ttime;
+- u64 elapsed = ktime_get_ns() - bic->ttime.last_end_request;
++ struct bfq_ttime *ttime = &bfqq->ttime;
++ u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
+
+ elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle);
+
+- ttime->ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
++ ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
+ ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
+ ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
+ ttime->ttime_samples);
+@@ -4240,8 +4240,8 @@ static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
+ * decide whether to mark as has_short_ttime
+ */
+ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
+- (bfq_sample_valid(bic->ttime.ttime_samples) &&
+- bic->ttime.ttime_mean > bfqd->bfq_slice_idle))
++ (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
++ bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
+ has_short_ttime = false;
+
+ bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
+@@ -4265,7 +4265,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ if (rq->cmd_flags & REQ_META)
+ bfqq->meta_pending++;
+
+- bfq_update_io_thinktime(bfqd, bic);
++ bfq_update_io_thinktime(bfqd, bfqq);
+ bfq_update_has_short_ttime(bfqd, bfqq, bic);
+ bfq_update_io_seektime(bfqd, bfqq, rq);
+
+@@ -4436,7 +4436,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
+
+ now_ns = ktime_get_ns();
+
+- RQ_BIC(rq)->ttime.last_end_request = now_ns;
++ bfqq->ttime.last_end_request = now_ns;
+
+ /*
+ * Using us instead of ns, to get a reasonable precision in
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 53954d1b87f8..0f51f270469c 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -210,6 +210,18 @@ struct bfq_entity {
+ struct bfq_group;
+
+ /**
++ * struct bfq_ttime - per process thinktime stats.
++ */
++struct bfq_ttime {
++ u64 last_end_request; /* completion time of last request */
++
++ u64 ttime_total; /* total process thinktime */
++ unsigned long ttime_samples; /* number of thinktime samples */
++ u64 ttime_mean; /* average process thinktime */
++
++};
++
++/**
+ * struct bfq_queue - leaf schedulable entity.
+ *
+ * A bfq_queue is a leaf request queue; it can be associated with an
+@@ -270,6 +282,9 @@ struct bfq_queue {
+ /* node for active/idle bfqq list inside parent bfqd */
+ struct list_head bfqq_list;
+
++ /* associated @bfq_ttime struct */
++ struct bfq_ttime ttime;
++
+ /* bit vector: a 1 for each seeky requests in history */
+ u32 seek_history;
+
+@@ -333,18 +348,6 @@ struct bfq_queue {
+ };
+
+ /**
+- * struct bfq_ttime - per process thinktime stats.
+- */
+-struct bfq_ttime {
+- u64 last_end_request; /* completion time of last request */
+-
+- u64 ttime_total; /* total process thinktime */
+- unsigned long ttime_samples; /* number of thinktime samples */
+- u64 ttime_mean; /* average process thinktime */
+-
+-};
+-
+-/**
+ * struct bfq_io_cq - per (request_queue, io_context) structure.
+ */
+ struct bfq_io_cq {
+@@ -352,8 +355,6 @@ struct bfq_io_cq {
+ struct io_cq icq; /* must be the first member */
+ /* array of two process queues, the sync and the async */
+ struct bfq_queue *bfqq[2];
+- /* associated @bfq_ttime struct */
+- struct bfq_ttime ttime;
+ /* per (request_queue, blkcg) ioprio */
+ int ioprio;
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+@@ -390,6 +391,7 @@ struct bfq_io_cq {
+ unsigned long saved_last_wr_start_finish;
+ unsigned long saved_wr_start_at_switch_to_srt;
+ unsigned int saved_wr_cur_max_time;
++ struct bfq_ttime saved_ttime;
+ };
+
+ enum bfq_device_speed {
+
+From 351a9aea7c0c9c30edacdbf2a3c0d089470de1e8 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 18 Jan 2017 11:42:22 +0100
+Subject: [PATCH 11/51] Embed bfq-ioc.c and add locking on request queue
+
+The version of bfq-ioc.c for bfq-iosched.c is not correct any more for
+bfq-mq, because, in bfq-mq, the request queue lock is not being held
+when bfq_bic_lookup is invoked. That function must then take that look
+on its own. This commit removes the inclusion of bfq-ioc.c, copies the
+content of bfq-ioc.c into bfq-mq-iosched.c, and adds the grabbing of
+the lock.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 39 ++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 36 insertions(+), 3 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 65f5dfb79417..756a618d5902 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -195,7 +195,39 @@ static int device_speed_thresh[2];
+
+ static void bfq_schedule_dispatch(struct bfq_data *bfqd);
+
+-#include "bfq-ioc.c"
++/**
++ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
++ * @icq: the iocontext queue.
++ */
++static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
++{
++ /* bic->icq is the first member, %NULL will convert to %NULL */
++ return container_of(icq, struct bfq_io_cq, icq);
++}
++
++/**
++ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
++ * @bfqd: the lookup key.
++ * @ioc: the io_context of the process doing I/O.
++ * @q: the request queue.
++ */
++static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
++ struct io_context *ioc,
++ struct request_queue *q)
++{
++ if (ioc) {
++ struct bfq_io_cq *icq;
++
++ spin_lock_irq(q->queue_lock);
++ icq = icq_to_bic(ioc_lookup_icq(ioc, q));
++ spin_unlock_irq(q->queue_lock);
++
++ return icq;
++ }
++
++ return NULL;
++}
++
+ #include "bfq-sched.c"
+ #include "bfq-cgroup-included.c"
+
+@@ -1520,13 +1552,14 @@ static void bfq_add_request(struct request *rq)
+ }
+
+ static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
+- struct bio *bio)
++ struct bio *bio,
++ struct request_queue *q)
+ {
+ struct task_struct *tsk = current;
+ struct bfq_io_cq *bic;
+ struct bfq_queue *bfqq;
+
+- bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ bic = bfq_bic_lookup(bfqd, tsk->io_context, q);
+ if (!bic)
+ return NULL;
+
+
+From ed0d64e27b2308813a2a846139e405e0479f0849 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Tue, 20 Dec 2016 09:07:19 +0100
+Subject: [PATCH 12/51] Modify interface and operation to comply with
+ blk-mq-sched
+
+As for modifications of the operation, the major changes are the introduction
+of a scheduler lock, and the moving to deferred work of the body of the hook
+exit_icq. The latter change has been made to avoid deadlocks caused by the
+combination of the following facts: 1) such a body takes the scheduler lock,
+and, if not deferred, 2) it does so from inside the exit_icq hook, which is
+invoked with the queue lock held, and 3) there is at least one code path,
+namely that starting from bfq_bio_merge, which takes these locks in the
+opposite order.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 4 -
+ block/bfq-mq-iosched.c | 695 ++++++++++++++++++++++++--------------------
+ block/bfq-mq.h | 35 +--
+ 3 files changed, 394 insertions(+), 340 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 9c483b658179..8a73de76f32b 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -472,8 +472,6 @@ static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
+ struct bfq_group *bfqg, *parent;
+ struct bfq_entity *entity;
+
+- assert_spin_locked(bfqd->queue->queue_lock);
+-
+ bfqg = bfq_lookup_bfqg(bfqd, blkcg);
+
+ if (unlikely(!bfqg))
+@@ -602,8 +600,6 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
+ struct bfq_group *bfqg;
+ struct bfq_entity *entity;
+
+- lockdep_assert_held(bfqd->queue->queue_lock);
+-
+ bfqg = bfq_find_set_group(bfqd, blkcg);
+
+ if (unlikely(!bfqg))
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 756a618d5902..c963d92a32c2 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -81,7 +81,13 @@
+ #include <linux/jiffies.h>
+ #include <linux/rbtree.h>
+ #include <linux/ioprio.h>
++#include <linux/sbitmap.h>
++#include <linux/delay.h>
++
+ #include "blk.h"
++#include "blk-mq.h"
++#include "blk-mq-tag.h"
++#include "blk-mq-sched.h"
+ #undef CONFIG_BFQ_GROUP_IOSCHED /* cgroups support not yet functional */
+ #include "bfq-mq.h"
+
+@@ -193,8 +199,6 @@ static int device_speed_thresh[2];
+ #define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
+ #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
+
+-static void bfq_schedule_dispatch(struct bfq_data *bfqd);
+-
+ /**
+ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
+ * @icq: the iocontext queue.
+@@ -216,11 +220,12 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
+ struct request_queue *q)
+ {
+ if (ioc) {
++ unsigned long flags;
+ struct bfq_io_cq *icq;
+
+- spin_lock_irq(q->queue_lock);
++ spin_lock_irqsave(q->queue_lock, flags);
+ icq = icq_to_bic(ioc_lookup_icq(ioc, q));
+- spin_unlock_irq(q->queue_lock);
++ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return icq;
+ }
+@@ -244,7 +249,7 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd)
+ {
+ if (bfqd->queued != 0) {
+ bfq_log(bfqd, "schedule dispatch");
+- kblockd_schedule_work(&bfqd->unplug_work);
++ blk_mq_run_hw_queues(bfqd->queue, true);
+ }
+ }
+
+@@ -768,9 +773,7 @@ static int bfqq_process_refs(struct bfq_queue *bfqq)
+ {
+ int process_refs, io_refs;
+
+- lockdep_assert_held(bfqq->bfqd->queue->queue_lock);
+-
+- io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++ io_refs = bfqq->allocated;
+ process_refs = bfqq->ref - io_refs - bfqq->entity.on_st;
+ BUG_ON(process_refs < 0);
+ return process_refs;
+@@ -1584,6 +1587,7 @@ static sector_t get_sdist(sector_t last_pos, struct request *rq)
+ return sdist;
+ }
+
++#if 0 /* Still not clear if we can do without next two functions */
+ static void bfq_activate_request(struct request_queue *q, struct request *rq)
+ {
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+@@ -1597,8 +1601,10 @@ static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
+ BUG_ON(bfqd->rq_in_driver == 0);
+ bfqd->rq_in_driver--;
+ }
++#endif
+
+-static void bfq_remove_request(struct request *rq)
++static void bfq_remove_request(struct request_queue *q,
++ struct request *rq)
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
+ struct bfq_data *bfqd = bfqq->bfqd;
+@@ -1619,6 +1625,10 @@ static void bfq_remove_request(struct request *rq)
+ bfqd->queued--;
+ elv_rb_del(&bfqq->sort_list, rq);
+
++ elv_rqhash_del(q, rq);
++ if (q->last_merge == rq)
++ q->last_merge = NULL;
++
+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
+ bfqq->next_rq = NULL;
+
+@@ -1659,13 +1669,36 @@ static void bfq_remove_request(struct request *rq)
+ bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
+ }
+
+-static enum elv_merge bfq_merge(struct request_queue *q, struct request **req,
+- struct bio *bio)
++static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
++{
++ struct request_queue *q = hctx->queue;
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct request *free = NULL;
++ bool ret;
++
++ spin_lock_irq(&bfqd->lock);
++ ret = blk_mq_sched_try_merge(q, bio, &free);
++
++ /*
++ * XXX Not yet freeing without lock held, to avoid an
++ * inconsistency with respect to the lock-protected invocation
++ * of blk_mq_sched_try_insert_merge in bfq_bio_merge. Waiting
++ * for clarifications from Jens.
++ */
++ if (free)
++ blk_mq_free_request(free);
++ spin_unlock_irq(&bfqd->lock);
++
++ return ret;
++}
++
++static int bfq_request_merge(struct request_queue *q, struct request **req,
++ struct bio *bio)
+ {
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct request *__rq;
+
+- __rq = bfq_find_rq_fmerge(bfqd, bio);
++ __rq = bfq_find_rq_fmerge(bfqd, bio, q);
+ if (__rq && elv_bio_merge_ok(__rq, bio)) {
+ *req = __rq;
+ return ELEVATOR_FRONT_MERGE;
+@@ -1674,7 +1707,7 @@ static enum elv_merge bfq_merge(struct request_queue *q, struct request **req,
+ return ELEVATOR_NO_MERGE;
+ }
+
+-static void bfq_merged_request(struct request_queue *q, struct request *req,
++static void bfq_request_merged(struct request_queue *q, struct request *req,
+ enum elv_merge type)
+ {
+ if (type == ELEVATOR_FRONT_MERGE &&
+@@ -1689,6 +1722,8 @@ static void bfq_merged_request(struct request_queue *q, struct request *req,
+ /* Reposition request in its sort_list */
+ elv_rb_del(&bfqq->sort_list, req);
+ elv_rb_add(&bfqq->sort_list, req);
++
++ spin_lock_irq(&bfqd->lock);
+ /* Choose next request to be served for bfqq */
+ prev = bfqq->next_rq;
+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
+@@ -1704,22 +1739,19 @@ static void bfq_merged_request(struct request_queue *q, struct request *req,
+ bfq_updated_next_req(bfqd, bfqq);
+ bfq_pos_tree_add_move(bfqd, bfqq);
+ }
++ spin_unlock_irq(&bfqd->lock);
+ }
+ }
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+-static void bfq_bio_merged(struct request_queue *q, struct request *req,
+- struct bio *bio)
+-{
+- bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_opf);
+-}
+-#endif
+-
+-static void bfq_merged_requests(struct request_queue *q, struct request *rq,
++static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+ struct request *next)
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
+
++ if (!RB_EMPTY_NODE(&rq->rb_node))
++ goto end;
++ spin_lock_irq(&bfqq->bfqd->lock);
++
+ /*
+ * If next and rq belong to the same bfq_queue and next is older
+ * than rq, then reposition rq in the fifo (by substituting next
+@@ -1740,7 +1772,10 @@ static void bfq_merged_requests(struct request_queue *q, struct request *rq,
+ if (bfqq->next_rq == next)
+ bfqq->next_rq = rq;
+
+- bfq_remove_request(next);
++ bfq_remove_request(q, next);
++
++ spin_unlock_irq(&bfqq->bfqd->lock);
++end:
+ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
+ }
+
+@@ -1786,7 +1821,7 @@ static void bfq_end_wr(struct bfq_data *bfqd)
+ {
+ struct bfq_queue *bfqq;
+
+- spin_lock_irq(bfqd->queue->queue_lock);
++ spin_lock_irq(&bfqd->lock);
+
+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
+ bfq_bfqq_end_wr(bfqq);
+@@ -1794,7 +1829,7 @@ static void bfq_end_wr(struct bfq_data *bfqd)
+ bfq_bfqq_end_wr(bfqq);
+ bfq_end_wr_async(bfqd);
+
+- spin_unlock_irq(bfqd->queue->queue_lock);
++ spin_unlock_irq(&bfqd->lock);
+ }
+
+ static sector_t bfq_io_struct_pos(void *io_struct, bool request)
+@@ -2184,8 +2219,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+ bfq_put_queue(bfqq);
+ }
+
+-static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+- struct bio *bio)
++static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
++ struct bio *bio)
+ {
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ bool is_sync = op_is_sync(bio->bi_opf);
+@@ -2203,7 +2238,7 @@ static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ * merge only if rq is queued there.
+ * Queue lock is held here.
+ */
+- bic = bfq_bic_lookup(bfqd, current->io_context);
++ bic = bfq_bic_lookup(bfqd, current->io_context, q);
+ if (!bic)
+ return false;
+
+@@ -2228,12 +2263,6 @@ static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ return bfqq == RQ_BFQQ(rq);
+ }
+
+-static int bfq_allow_rq_merge(struct request_queue *q, struct request *rq,
+- struct request *next)
+-{
+- return RQ_BFQQ(rq) == RQ_BFQQ(next);
+-}
+-
+ /*
+ * Set the maximum time for the in-service queue to consume its
+ * budget. This prevents seeky processes from lowering the throughput.
+@@ -2264,7 +2293,6 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
+ {
+ if (bfqq) {
+ bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
+- bfq_mark_bfqq_must_alloc(bfqq);
+ bfq_clear_bfqq_fifo_expire(bfqq);
+
+ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
+@@ -2703,27 +2731,28 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ }
+
+ /*
+- * Move request from internal lists to the dispatch list of the request queue
++ * Remove request from internal lists.
+ */
+-static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
++static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
+
+ /*
+- * For consistency, the next instruction should have been executed
+- * after removing the request from the queue and dispatching it.
+- * We execute instead this instruction before bfq_remove_request()
+- * (and hence introduce a temporary inconsistency), for efficiency.
+- * In fact, in a forced_dispatch, this prevents two counters related
+- * to bfqq->dispatched to risk to be uselessly decremented if bfqq
+- * is not in service, and then to be incremented again after
+- * incrementing bfqq->dispatched.
++ * For consistency, the next instruction should have been
++ * executed after removing the request from the queue and
++ * dispatching it. We execute instead this instruction before
++ * bfq_remove_request() (and hence introduce a temporary
++ * inconsistency), for efficiency. In fact, should this
++ * dispatch occur for a non in-service bfqq, this anticipated
++ * increment prevents two counters related to bfqq->dispatched
++ * from risking to be, first, uselessly decremented, and then
++ * incremented again when the (new) value of bfqq->dispatched
++ * happens to be taken into account.
+ */
+ bfqq->dispatched++;
+ bfq_update_peak_rate(q->elevator->elevator_data, rq);
+
+- bfq_remove_request(rq);
+- elv_dispatch_sort(q, rq);
++ bfq_remove_request(q, rq);
+ }
+
+ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+@@ -3605,7 +3634,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
+
+ if (bfq_may_expire_for_budg_timeout(bfqq) &&
+- !hrtimer_active(&bfqd->idle_slice_timer) &&
++ !bfq_bfqq_wait_request(bfqq) &&
+ !bfq_bfqq_must_idle(bfqq))
+ goto expire;
+
+@@ -3641,7 +3670,6 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ * arrives.
+ */
+ if (bfq_bfqq_wait_request(bfqq)) {
+- BUG_ON(!hrtimer_active(&bfqd->idle_slice_timer));
+ /*
+ * If we get here: 1) at least a new request
+ * has arrived but we have not disabled the
+@@ -3668,7 +3696,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ * for a new request, or has requests waiting for a completion and
+ * may idle after their completion, then keep it anyway.
+ */
+- if (hrtimer_active(&bfqd->idle_slice_timer) ||
++ if (bfq_bfqq_wait_request(bfqq) ||
+ (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
+ bfqq = NULL;
+ goto keep_queue;
+@@ -3753,13 +3781,11 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ }
+
+ /*
+- * Dispatch one request from bfqq, moving it to the request queue
+- * dispatch list.
++ * Dispatch next request from bfqq.
+ */
+-static int bfq_dispatch_request(struct bfq_data *bfqd,
+- struct bfq_queue *bfqq)
++static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
+ {
+- int dispatched = 0;
+ struct request *rq = bfqq->next_rq;
+ unsigned long service_to_charge;
+
+@@ -3775,7 +3801,7 @@ static int bfq_dispatch_request(struct bfq_data *bfqd,
+
+ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
+
+- bfq_dispatch_insert(bfqd->queue, rq);
++ bfq_dispatch_remove(bfqd->queue, rq);
+
+ /*
+ * If weight raising has to terminate for bfqq, then next
+@@ -3791,86 +3817,61 @@ static int bfq_dispatch_request(struct bfq_data *bfqd,
+ bfq_update_wr_data(bfqd, bfqq);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "dispatched %u sec req (%llu), budg left %d",
++ "dispatched %u sec req (%llu), budg left %d, new disp_nr %d",
+ blk_rq_sectors(rq),
+ (unsigned long long) blk_rq_pos(rq),
+- bfq_bfqq_budget_left(bfqq));
+-
+- dispatched++;
++ bfq_bfqq_budget_left(bfqq),
++ bfqq->dispatched);
+
+ if (!bfqd->in_service_bic) {
+ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
+ bfqd->in_service_bic = RQ_BIC(rq);
+ }
+
++ /*
++ * Expire bfqq, pretending that its budget expired, if bfqq
++ * belongs to CLASS_IDLE and other queues are waiting for
++ * service.
++ */
+ if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
+ goto expire;
+
+- return dispatched;
++ return rq;
+
+ expire:
+ bfq_bfqq_expire(bfqd, bfqq, false, BFQ_BFQQ_BUDGET_EXHAUSTED);
+- return dispatched;
+-}
+-
+-static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
+-{
+- int dispatched = 0;
+-
+- while (bfqq->next_rq) {
+- bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
+- dispatched++;
+- }
+-
+- BUG_ON(!list_empty(&bfqq->fifo));
+- return dispatched;
++ return rq;
+ }
+
+-/*
+- * Drain our current requests.
+- * Used for barriers and when switching io schedulers on-the-fly.
+- */
+-static int bfq_forced_dispatch(struct bfq_data *bfqd)
++static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
+ {
+- struct bfq_queue *bfqq, *n;
+- struct bfq_service_tree *st;
+- int dispatched = 0;
+-
+- bfqq = bfqd->in_service_queue;
+- if (bfqq)
+- __bfq_bfqq_expire(bfqd, bfqq);
++ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+
+ /*
+- * Loop through classes, and be careful to leave the scheduler
+- * in a consistent state, as feedback mechanisms and vtime
+- * updates cannot be disabled during the process.
++ * Avoiding lock: a race on bfqd->busy_queues should cause at
++ * most a call to dispatch for nothing
+ */
+- list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
+- st = bfq_entity_service_tree(&bfqq->entity);
+-
+- dispatched += __bfq_forced_dispatch_bfqq(bfqq);
+-
+- bfqq->max_budget = bfq_max_budget(bfqd);
+- bfq_forget_idle(st);
+- }
+-
+- BUG_ON(bfqd->busy_queues != 0);
+-
+- return dispatched;
++ return !list_empty_careful(&bfqd->dispatch) ||
++ bfqd->busy_queues > 0;
+ }
+
+-static int bfq_dispatch_requests(struct request_queue *q, int force)
++static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ {
+- struct bfq_data *bfqd = q->elevator->elevator_data;
+- struct bfq_queue *bfqq;
++ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
++ struct request *rq = NULL;
++ struct bfq_queue *bfqq = NULL;
++
++ if (!list_empty(&bfqd->dispatch)) {
++ rq = list_first_entry(&bfqd->dispatch, struct request,
++ queuelist);
++ list_del_init(&rq->queuelist);
++ goto exit;
++ }
+
+ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
+
+ if (bfqd->busy_queues == 0)
+- return 0;
+-
+- if (unlikely(force))
+- return bfq_forced_dispatch(bfqd);
++ goto exit;
+
+ /*
+ * Force device to serve one request at a time if
+@@ -3885,25 +3886,39 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
+ * throughput.
+ */
+ if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
+- return 0;
++ goto exit;
+
+ bfqq = bfq_select_queue(bfqd);
+ if (!bfqq)
+- return 0;
++ goto exit;
+
+ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
+
+ BUG_ON(bfq_bfqq_wait_request(bfqq));
+
+- if (!bfq_dispatch_request(bfqd, bfqq))
+- return 0;
+-
+- bfq_log_bfqq(bfqd, bfqq, "dispatched %s request",
+- bfq_bfqq_sync(bfqq) ? "sync" : "async");
++ rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
+
+ BUG_ON(bfqq->next_rq == NULL &&
+ bfqq->entity.budget < bfqq->entity.service);
+- return 1;
++exit:
++ if (rq) {
++ rq->rq_flags |= RQF_STARTED;
++ bfqd->rq_in_driver++;
++ }
++
++ return rq;
++}
++
++static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
++{
++ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
++ struct request *rq;
++
++ spin_lock_irq(&bfqd->lock);
++ rq = __bfq_dispatch_request(hctx);
++ spin_unlock_irq(&bfqd->lock);
++
++ return rq;
+ }
+
+ /*
+@@ -3921,13 +3936,14 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+
+ BUG_ON(bfqq->ref <= 0);
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
++ if (bfqq->bfqd)
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
++
+ bfqq->ref--;
+ if (bfqq->ref)
+ return;
+
+ BUG_ON(rb_first(&bfqq->sort_list));
+- BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
+ BUG_ON(bfqq->entity.tree);
+ BUG_ON(bfq_bfqq_busy(bfqq));
+
+@@ -3942,7 +3958,8 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ */
+ hlist_del_init(&bfqq->burst_list_node);
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
++ if (bfqq->bfqd)
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+
+ kmem_cache_free(bfq_pool, bfqq);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+@@ -3983,29 +4000,52 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_put_queue(bfqq); /* release process reference */
+ }
+
+-static void bfq_exit_icq(struct io_cq *icq)
++static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ {
+- struct bfq_io_cq *bic = icq_to_bic(icq);
+- struct bfq_data *bfqd = bic_to_bfqd(bic);
++ struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
++ struct bfq_data *bfqd;
+
+- if (bic_to_bfqq(bic, false)) {
+- bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, false));
+- bic_set_bfqq(bic, NULL, false);
+- }
++ if (bfqq)
++ bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
+
+- if (bic_to_bfqq(bic, true)) {
++ if (bfqq && bfqd) {
++ spin_lock_irq(&bfqd->lock);
+ /*
+ * If the bic is using a shared queue, put the reference
+ * taken on the io_context when the bic started using a
+ * shared bfq_queue.
+ */
+- if (bfq_bfqq_coop(bic_to_bfqq(bic, true)))
+- put_io_context(icq->ioc);
+- bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, true));
+- bic_set_bfqq(bic, NULL, true);
++ if (is_sync && bfq_bfqq_coop(bfqq))
++ put_io_context(bic->icq.ioc);
++ bfq_exit_bfqq(bfqd, bfqq);
++ bic_set_bfqq(bic, NULL, is_sync);
++ spin_unlock_irq(&bfqd->lock);
+ }
+ }
+
++static void bfq_exit_icq_body(struct work_struct *work)
++{
++ struct bfq_io_cq *bic =
++ container_of(work, struct bfq_io_cq, exit_icq_work);
++
++ bfq_exit_icq_bfqq(bic, true);
++ bfq_exit_icq_bfqq(bic, false);
++}
++
++static void bfq_init_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++
++ INIT_WORK(&bic->exit_icq_work, bfq_exit_icq_body);
++}
++
++static void bfq_exit_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++
++ kblockd_schedule_work(&bic->exit_icq_work);
++}
++
+ /*
+ * Update the entity prio values; note that the new values will not
+ * be used until the next (re)activation.
+@@ -4015,6 +4055,10 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
+ {
+ struct task_struct *tsk = current;
+ int ioprio_class;
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ if (!bfqd)
++ return;
+
+ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
+ switch (ioprio_class) {
+@@ -4095,6 +4139,8 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ INIT_HLIST_NODE(&bfqq->burst_list_node);
+ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
+
++ spin_lock_init(&bfqq->lock);
++
+ bfqq->ref = 0;
+ bfqq->bfqd = bfqd;
+
+@@ -4351,22 +4397,13 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ if (budget_timeout)
+ bfq_bfqq_expire(bfqd, bfqq, false,
+ BFQ_BFQQ_BUDGET_TIMEOUT);
+-
+- /*
+- * Let the request rip immediately, or let a new queue be
+- * selected if bfqq has just been expired.
+- */
+- __blk_run_queue(bfqd->queue);
+ }
+ }
+
+-static void bfq_insert_request(struct request_queue *q, struct request *rq)
++static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ {
+- struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
+
+- assert_spin_locked(bfqd->queue->queue_lock);
+-
+ /*
+ * An unplug may trigger a requeue of a request from the device
+ * driver: make sure we are in process context while trying to
+@@ -4381,8 +4418,8 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq)
+ * Release the request's reference to the old bfqq
+ * and make sure one is taken to the shared queue.
+ */
+- new_bfqq->allocated[rq_data_dir(rq)]++;
+- bfqq->allocated[rq_data_dir(rq)]--;
++ new_bfqq->allocated++;
++ bfqq->allocated--;
+ new_bfqq->ref++;
+ bfq_clear_bfqq_just_created(bfqq);
+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
+@@ -4406,6 +4443,55 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq)
+ bfq_rq_enqueued(bfqd, bfqq, rq);
+ }
+
++static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
++ bool at_head)
++{
++ struct request_queue *q = hctx->queue;
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ spin_lock_irq(&bfqd->lock);
++ if (blk_mq_sched_try_insert_merge(q, rq))
++ goto done;
++ spin_unlock_irq(&bfqd->lock);
++
++ blk_mq_sched_request_inserted(rq);
++
++ spin_lock_irq(&bfqd->lock);
++ if (at_head || blk_rq_is_passthrough(rq)) {
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ if (at_head)
++ list_add(&rq->queuelist, &bfqd->dispatch);
++ else
++ list_add_tail(&rq->queuelist, &bfqd->dispatch);
++
++ if (bfqq)
++ bfqq->dispatched++;
++ } else {
++ __bfq_insert_request(bfqd, rq);
++
++ if (rq_mergeable(rq)) {
++ elv_rqhash_add(q, rq);
++ if (!q->last_merge)
++ q->last_merge = rq;
++ }
++ }
++done:
++ spin_unlock_irq(&bfqd->lock);
++}
++
++static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
++ struct list_head *list, bool at_head)
++{
++ while (!list_empty(list)) {
++ struct request *rq;
++
++ rq = list_first_entry(list, struct request, queuelist);
++ list_del_init(&rq->queuelist);
++ bfq_insert_request(hctx, rq, at_head);
++ }
++}
++
+ static void bfq_update_hw_tag(struct bfq_data *bfqd)
+ {
+ bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
+@@ -4431,27 +4517,17 @@ static void bfq_update_hw_tag(struct bfq_data *bfqd)
+ bfqd->hw_tag_samples = 0;
+ }
+
+-static void bfq_completed_request(struct request_queue *q, struct request *rq)
++static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ {
+- struct bfq_queue *bfqq = RQ_BFQQ(rq);
+- struct bfq_data *bfqd = bfqq->bfqd;
+ u64 now_ns;
+ u32 delta_us;
+
+- bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left",
+- blk_rq_sectors(rq));
+-
+- assert_spin_locked(bfqd->queue->queue_lock);
+ bfq_update_hw_tag(bfqd);
+
+ BUG_ON(!bfqd->rq_in_driver);
+ BUG_ON(!bfqq->dispatched);
+ bfqd->rq_in_driver--;
+ bfqq->dispatched--;
+- bfqg_stats_update_completion(bfqq_group(bfqq),
+- rq_start_time_ns(rq),
+- rq_io_start_time_ns(rq),
+- rq->cmd_flags);
+
+ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
+@@ -4477,7 +4553,8 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
+ */
+ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
+
+- bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
++ bfq_log_bfqq(bfqd, bfqq,
++ "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
+ (USEC_PER_SEC*
+ (u64)((bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us))
+@@ -4527,7 +4604,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
+ if (bfqd->in_service_queue == bfqq) {
+ if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
+ bfq_arm_slice_timer(bfqd);
+- goto out;
++ return;
+ } else if (bfq_may_expire_for_budg_timeout(bfqq))
+ bfq_bfqq_expire(bfqd, bfqq, false,
+ BFQ_BFQQ_BUDGET_TIMEOUT);
+@@ -4537,68 +4614,55 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
+ bfq_bfqq_expire(bfqd, bfqq, false,
+ BFQ_BFQQ_NO_MORE_REQUESTS);
+ }
+-
+- if (!bfqd->rq_in_driver)
+- bfq_schedule_dispatch(bfqd);
+-
+-out:
+- return;
+ }
+
+-static int __bfq_may_queue(struct bfq_queue *bfqq)
++static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
+ {
+- if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
+- bfq_clear_bfqq_must_alloc(bfqq);
+- return ELV_MQUEUE_MUST;
+- }
++ bfqq->allocated--;
+
+- return ELV_MQUEUE_MAY;
++ bfq_put_queue(bfqq);
+ }
+
+-static int bfq_may_queue(struct request_queue *q, unsigned int op)
++static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ {
+- struct bfq_data *bfqd = q->elevator->elevator_data;
+- struct task_struct *tsk = current;
+- struct bfq_io_cq *bic;
+- struct bfq_queue *bfqq;
+-
+- /*
+- * Don't force setup of a queue from here, as a call to may_queue
+- * does not necessarily imply that a request actually will be
+- * queued. So just lookup a possibly existing queue, or return
+- * 'may queue' if that fails.
+- */
+- bic = bfq_bic_lookup(bfqd, tsk->io_context);
+- if (!bic)
+- return ELV_MQUEUE_MAY;
+-
+- bfqq = bic_to_bfqq(bic, op_is_sync(op));
+- if (bfqq)
+- return __bfq_may_queue(bfqq);
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
+
+- return ELV_MQUEUE_MAY;
+-}
++ if (rq->rq_flags & RQF_STARTED)
++ bfqg_stats_update_completion(bfqq_group(bfqq),
++ rq_start_time_ns(rq),
++ rq_io_start_time_ns(rq),
++ rq->cmd_flags);
+
+-/*
+- * Queue lock held here.
+- */
+-static void bfq_put_request(struct request *rq)
+-{
+- struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ if (likely(rq->rq_flags & RQF_STARTED)) {
++ unsigned long flags;
+
+- if (bfqq) {
+- const int rw = rq_data_dir(rq);
++ spin_lock_irqsave(&bfqd->lock, flags);
+
+- BUG_ON(!bfqq->allocated[rw]);
+- bfqq->allocated[rw]--;
++ bfq_completed_request(bfqq, bfqd);
++ bfq_put_rq_priv_body(bfqq);
+
+- rq->elv.priv[0] = NULL;
+- rq->elv.priv[1] = NULL;
++ spin_unlock_irqrestore(&bfqd->lock, flags);
++ } else {
++ /*
++ * Request rq may be still/already in the scheduler,
++ * in which case we need to remove it. And we cannot
++ * defer such a check and removal, to avoid
++ * inconsistencies in the time interval from the end
++ * of this function to the start of the deferred work.
++ * Fortunately, this situation occurs only in process
++ * context, so taking the scheduler lock does not
++ * cause any deadlock, even if other locks are already
++ * (correctly) held by this process.
++ */
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
+- bfqq, bfqq->ref);
+- bfq_put_queue(bfqq);
++ if (!RB_EMPTY_NODE(&rq->rb_node))
++ bfq_remove_request(q, rq);
++ bfq_put_rq_priv_body(bfqq);
+ }
++
++ rq->elv.priv[0] = NULL;
++ rq->elv.priv[1] = NULL;
+ }
+
+ /*
+@@ -4630,18 +4694,16 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
+ /*
+ * Allocate bfq data structures associated with this request.
+ */
+-static int bfq_set_request(struct request_queue *q, struct request *rq,
+- struct bio *bio, gfp_t gfp_mask)
++static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
++ struct bio *bio)
+ {
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
+- const int rw = rq_data_dir(rq);
+ const int is_sync = rq_is_sync(rq);
+ struct bfq_queue *bfqq;
+- unsigned long flags;
+ bool bfqq_already_existing = false, split = false;
+
+- spin_lock_irqsave(q->queue_lock, flags);
++ spin_lock_irq(&bfqd->lock);
+
+ if (!bic)
+ goto queue_fail;
+@@ -4661,7 +4723,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ bic_set_bfqq(bic, bfqq, is_sync);
+ if (split && is_sync) {
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_request: was_in_list %d "
++ "get_request: was_in_list %d "
+ "was_in_large_burst %d "
+ "large burst in progress %d",
+ bic->was_in_burst_list,
+@@ -4671,12 +4733,12 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ if ((bic->was_in_burst_list && bfqd->large_burst) ||
+ bic->saved_in_large_burst) {
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_request: marking in "
++ "get_request: marking in "
+ "large burst");
+ bfq_mark_bfqq_in_large_burst(bfqq);
+ } else {
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_request: clearing in "
++ "get_request: clearing in "
+ "large burst");
+ bfq_clear_bfqq_in_large_burst(bfqq);
+ if (bic->was_in_burst_list)
+@@ -4703,9 +4765,12 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ }
+ }
+
+- bfqq->allocated[rw]++;
++ bfqq->allocated++;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "get_request: new allocated %d", bfqq->allocated);
++
+ bfqq->ref++;
+- bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "get_request: bfqq %p, %d", bfqq, bfqq->ref);
+
+ rq->elv.priv[0] = bic;
+ rq->elv.priv[1] = bfqq;
+@@ -4733,26 +4798,53 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ if (unlikely(bfq_bfqq_just_created(bfqq)))
+ bfq_handle_burst(bfqd, bfqq);
+
+- spin_unlock_irqrestore(q->queue_lock, flags);
++ spin_unlock_irq(&bfqd->lock);
+
+ return 0;
+
+ queue_fail:
+- bfq_schedule_dispatch(bfqd);
+- spin_unlock_irqrestore(q->queue_lock, flags);
++ spin_unlock_irq(&bfqd->lock);
+
+ return 1;
+ }
+
+-static void bfq_kick_queue(struct work_struct *work)
++static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+ {
+- struct bfq_data *bfqd =
+- container_of(work, struct bfq_data, unplug_work);
+- struct request_queue *q = bfqd->queue;
++ struct bfq_data *bfqd = bfqq->bfqd;
++ enum bfqq_expiration reason;
++ unsigned long flags;
++
++ spin_lock_irqsave(&bfqd->lock, flags);
++ bfq_clear_bfqq_wait_request(bfqq);
+
+- spin_lock_irq(q->queue_lock);
+- __blk_run_queue(q);
+- spin_unlock_irq(q->queue_lock);
++ if (bfqq != bfqd->in_service_queue) {
++ spin_unlock_irqrestore(&bfqd->lock, flags);
++ return;
++ }
++
++ if (bfq_bfqq_budget_timeout(bfqq))
++ /*
++ * Also here the queue can be safely expired
++ * for budget timeout without wasting
++ * guarantees
++ */
++ reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
++ /*
++ * The queue may not be empty upon timer expiration,
++ * because we may not disable the timer when the
++ * first request of the in-service queue arrives
++ * during disk idling.
++ */
++ reason = BFQ_BFQQ_TOO_IDLE;
++ else
++ goto schedule_dispatch;
++
++ bfq_bfqq_expire(bfqd, bfqq, true, reason);
++
++schedule_dispatch:
++ spin_unlock_irqrestore(&bfqd->lock, flags);
++ bfq_schedule_dispatch(bfqd);
+ }
+
+ /*
+@@ -4763,59 +4855,22 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
+ {
+ struct bfq_data *bfqd = container_of(timer, struct bfq_data,
+ idle_slice_timer);
+- struct bfq_queue *bfqq;
+- unsigned long flags;
+- enum bfqq_expiration reason;
+-
+- spin_lock_irqsave(bfqd->queue->queue_lock, flags);
++ struct bfq_queue *bfqq = bfqd->in_service_queue;
+
+- bfqq = bfqd->in_service_queue;
+ /*
+ * Theoretical race here: the in-service queue can be NULL or
+- * different from the queue that was idling if the timer handler
+- * spins on the queue_lock and a new request arrives for the
+- * current queue and there is a full dispatch cycle that changes
+- * the in-service queue. This can hardly happen, but in the worst
+- * case we just expire a queue too early.
++ * different from the queue that was idling if a new request
++ * arrives for the current queue and there is a full dispatch
++ * cycle that changes the in-service queue. This can hardly
++ * happen, but in the worst case we just expire a queue too
++ * early.
+ */
+- if (bfqq) {
+- bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
+- bfq_clear_bfqq_wait_request(bfqq);
+-
+- if (bfq_bfqq_budget_timeout(bfqq))
+- /*
+- * Also here the queue can be safely expired
+- * for budget timeout without wasting
+- * guarantees
+- */
+- reason = BFQ_BFQQ_BUDGET_TIMEOUT;
+- else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
+- /*
+- * The queue may not be empty upon timer expiration,
+- * because we may not disable the timer when the
+- * first request of the in-service queue arrives
+- * during disk idling.
+- */
+- reason = BFQ_BFQQ_TOO_IDLE;
+- else
+- goto schedule_dispatch;
+-
+- bfq_bfqq_expire(bfqd, bfqq, true, reason);
+- }
+-
+-schedule_dispatch:
+- bfq_schedule_dispatch(bfqd);
++ if (bfqq)
++ bfq_idle_slice_timer_body(bfqq);
+
+- spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
+ return HRTIMER_NORESTART;
+ }
+
+-static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
+-{
+- hrtimer_cancel(&bfqd->idle_slice_timer);
+- cancel_work_sync(&bfqd->unplug_work);
+-}
+-
+ static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
+ struct bfq_queue **bfqq_ptr)
+ {
+@@ -4852,28 +4907,40 @@ static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
+ static void bfq_exit_queue(struct elevator_queue *e)
+ {
+ struct bfq_data *bfqd = e->elevator_data;
+- struct request_queue *q = bfqd->queue;
+ struct bfq_queue *bfqq, *n;
+
+- bfq_shutdown_timer_wq(bfqd);
+-
+- spin_lock_irq(q->queue_lock);
++ hrtimer_cancel(&bfqd->idle_slice_timer);
+
+ BUG_ON(bfqd->in_service_queue);
+- list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
+- bfq_deactivate_bfqq(bfqd, bfqq, false, false);
+
+- spin_unlock_irq(q->queue_lock);
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) {
++ if (bfqq->bic) /* bfqqs without bic are handled below */
++ cancel_work_sync(&bfqq->bic->exit_icq_work);
++ }
++
++ spin_lock_irq(&bfqd->lock);
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) {
++ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
++ /*
++ * Make sure that deferred exit_icq_work completes
++ * without errors for bfq_queues without bic
++ */
++ if (!bfqq->bic)
++ bfqq->bfqd = NULL;
++ }
++ spin_unlock_irq(&bfqd->lock);
+
+- bfq_shutdown_timer_wq(bfqd);
++ hrtimer_cancel(&bfqd->idle_slice_timer);
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+- blkcg_deactivate_policy(q, &blkcg_policy_bfq);
++ blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
+ #else
++ spin_lock_irq(&bfqd->lock);
+ bfq_put_async_queues(bfqd, bfqd->root_group);
+ kfree(bfqd->root_group);
++ spin_unlock_irq(&bfqd->lock);
+ #endif
+
+ kfree(bfqd);
+@@ -4934,10 +5001,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+
+ bfqd->queue = q;
+
+- spin_lock_irq(q->queue_lock);
+- q->elevator = eq;
+- spin_unlock_irq(q->queue_lock);
+-
+ bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
+ if (!bfqd->root_group)
+ goto out_free;
+@@ -4951,8 +5014,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ bfqd->queue_weights_tree = RB_ROOT;
+ bfqd->group_weights_tree = RB_ROOT;
+
+- INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
+-
+ INIT_LIST_HEAD(&bfqd->active_list);
+ INIT_LIST_HEAD(&bfqd->idle_list);
+ INIT_HLIST_HEAD(&bfqd->burst_list);
+@@ -5001,6 +5062,11 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
+ bfqd->device_speed = BFQ_BFQD_FAST;
+
++ spin_lock_init(&bfqd->lock);
++ INIT_LIST_HEAD(&bfqd->dispatch);
++
++ q->elevator = eq;
++
+ return 0;
+
+ out_free:
+@@ -5057,7 +5123,7 @@ static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
+ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
+ bfqd->queued);
+
+- spin_lock_irq(bfqd->queue->queue_lock);
++ spin_lock_irq(&bfqd->lock);
+
+ num_char += sprintf(page + num_char, "Active:\n");
+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
+@@ -5086,7 +5152,7 @@ static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
+ jiffies_to_msecs(bfqq->wr_cur_max_time));
+ }
+
+- spin_unlock_irq(bfqd->queue->queue_lock);
++ spin_unlock_irq(&bfqd->lock);
+
+ return num_char;
+ }
+@@ -5294,35 +5360,31 @@ static struct elv_fs_entry bfq_attrs[] = {
+ __ATTR_NULL
+ };
+
+-static struct elevator_type iosched_bfq = {
+- .ops.sq = {
+- .elevator_merge_fn = bfq_merge,
+- .elevator_merged_fn = bfq_merged_request,
+- .elevator_merge_req_fn = bfq_merged_requests,
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+- .elevator_bio_merged_fn = bfq_bio_merged,
+-#endif
+- .elevator_allow_bio_merge_fn = bfq_allow_bio_merge,
+- .elevator_allow_rq_merge_fn = bfq_allow_rq_merge,
+- .elevator_dispatch_fn = bfq_dispatch_requests,
+- .elevator_add_req_fn = bfq_insert_request,
+- .elevator_activate_req_fn = bfq_activate_request,
+- .elevator_deactivate_req_fn = bfq_deactivate_request,
+- .elevator_completed_req_fn = bfq_completed_request,
+- .elevator_former_req_fn = elv_rb_former_request,
+- .elevator_latter_req_fn = elv_rb_latter_request,
+- .elevator_init_icq_fn = bfq_init_icq,
+- .elevator_exit_icq_fn = bfq_exit_icq,
+- .elevator_set_req_fn = bfq_set_request,
+- .elevator_put_req_fn = bfq_put_request,
+- .elevator_may_queue_fn = bfq_may_queue,
+- .elevator_init_fn = bfq_init_queue,
+- .elevator_exit_fn = bfq_exit_queue,
++static struct elevator_type iosched_bfq_mq = {
++ .ops.mq = {
++ .get_rq_priv = bfq_get_rq_private,
++ .put_rq_priv = bfq_put_rq_private,
++ .init_icq = bfq_init_icq,
++ .exit_icq = bfq_exit_icq,
++ .insert_requests = bfq_insert_requests,
++ .dispatch_request = bfq_dispatch_request,
++ .next_request = elv_rb_latter_request,
++ .former_request = elv_rb_former_request,
++ .allow_merge = bfq_allow_bio_merge,
++ .bio_merge = bfq_bio_merge,
++ .request_merge = bfq_request_merge,
++ .requests_merged = bfq_requests_merged,
++ .request_merged = bfq_request_merged,
++ .has_work = bfq_has_work,
++ .init_sched = bfq_init_queue,
++ .exit_sched = bfq_exit_queue,
+ },
++
++ .uses_mq = true,
+ .icq_size = sizeof(struct bfq_io_cq),
+ .icq_align = __alignof__(struct bfq_io_cq),
+ .elevator_attrs = bfq_attrs,
+- .elevator_name = "bfq-sq",
++ .elevator_name = "bfq-mq",
+ .elevator_owner = THIS_MODULE,
+ };
+
+@@ -5392,7 +5454,7 @@ static int __init bfq_init(void)
+ device_speed_thresh[0] = (4 * R_slow[0]) / 3;
+ device_speed_thresh[1] = (4 * R_slow[1]) / 3;
+
+- ret = elv_register(&iosched_bfq);
++ ret = elv_register(&iosched_bfq_mq);
+ if (ret)
+ goto err_pol_unreg;
+
+@@ -5412,8 +5474,8 @@ static int __init bfq_init(void)
+
+ static void __exit bfq_exit(void)
+ {
+- elv_unregister(&iosched_bfq);
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ elv_unregister(&iosched_bfq_mq);
++#ifdef CONFIG_BFQ_GROUP_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+ #endif
+ bfq_slab_kill();
+@@ -5422,5 +5484,6 @@ static void __exit bfq_exit(void)
+ module_init(bfq_init);
+ module_exit(bfq_exit);
+
+-MODULE_AUTHOR("Arianna Avanzini, Fabio Checconi, Paolo Valente");
++MODULE_AUTHOR("Paolo Valente");
+ MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 0f51f270469c..c3fcd5ebd735 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -19,15 +19,8 @@
+ #include <linux/hrtimer.h>
+ #include <linux/blk-cgroup.h>
+
+-/*
+- * Define an alternative macro to compile cgroups support. This is one
+- * of the steps needed to let bfq-mq share the files bfq-sched.c and
+- * bfq-cgroup.c with bfq-sq. For bfq-mq, the macro
+- * BFQ_GROUP_IOSCHED_ENABLED will be defined as a function of whether
+- * the configuration option CONFIG_BFQ_MQ_GROUP_IOSCHED, and not
+- * CONFIG_BFQ_GROUP_IOSCHED, is defined.
+- */
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++/* see comments on CONFIG_BFQ_GROUP_IOSCHED in bfq.h */
++#ifdef CONFIG_BFQ_MQ_GROUP_IOSCHED
+ #define BFQ_GROUP_IOSCHED_ENABLED
+ #endif
+
+@@ -259,8 +252,8 @@ struct bfq_queue {
+ struct request *next_rq;
+ /* number of sync and async requests queued */
+ int queued[2];
+- /* number of sync and async requests currently allocated */
+- int allocated[2];
++ /* number of requests currently allocated */
++ int allocated;
+ /* number of pending metadata requests */
+ int meta_pending;
+ /* fifo list of requests in sort_list */
+@@ -345,6 +338,8 @@ struct bfq_queue {
+ unsigned long wr_start_at_switch_to_srt;
+
+ unsigned long split_time; /* time of last split */
++
++ spinlock_t lock;
+ };
+
+ /**
+@@ -361,6 +356,9 @@ struct bfq_io_cq {
+ uint64_t blkcg_serial_nr; /* the current blkcg serial */
+ #endif
+
++ /* delayed work to exec the body of the the exit_icq handler */
++ struct work_struct exit_icq_work;
++
+ /*
+ * Snapshot of the has_short_time flag before merging; taken
+ * to remember its value while the queue is merged, so as to
+@@ -402,11 +400,13 @@ enum bfq_device_speed {
+ /**
+ * struct bfq_data - per-device data structure.
+ *
+- * All the fields are protected by the @queue lock.
++ * All the fields are protected by @lock.
+ */
+ struct bfq_data {
+- /* request queue for the device */
++ /* device request queue */
+ struct request_queue *queue;
++ /* dispatch queue */
++ struct list_head dispatch;
+
+ /* root bfq_group for the device */
+ struct bfq_group *root_group;
+@@ -460,8 +460,6 @@ struct bfq_data {
+ * the queue in service.
+ */
+ struct hrtimer idle_slice_timer;
+- /* delayed work to restart dispatching on the request queue */
+- struct work_struct unplug_work;
+
+ /* bfq_queue in service */
+ struct bfq_queue *in_service_queue;
+@@ -612,6 +610,8 @@ struct bfq_data {
+
+ /* fallback dummy bfqq for extreme OOM conditions */
+ struct bfq_queue oom_bfqq;
++
++ spinlock_t lock;
+ };
+
+ enum bfqq_state_flags {
+@@ -622,7 +622,6 @@ enum bfqq_state_flags {
+ * waiting for a request
+ * without idling the device
+ */
+- BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
+ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
+ BFQ_BFQQ_FLAG_has_short_ttime, /* queue has a short think time */
+ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
+@@ -661,7 +660,6 @@ BFQ_BFQQ_FNS(just_created);
+ BFQ_BFQQ_FNS(busy);
+ BFQ_BFQQ_FNS(wait_request);
+ BFQ_BFQQ_FNS(non_blocking_wait_rq);
+-BFQ_BFQQ_FNS(must_alloc);
+ BFQ_BFQQ_FNS(fifo_expire);
+ BFQ_BFQQ_FNS(has_short_ttime);
+ BFQ_BFQQ_FNS(sync);
+@@ -692,7 +690,6 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+- assert_spin_locked((bfqd)->queue->queue_lock); \
+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
+ pr_crit("%s bfq%d%c %s " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+@@ -734,7 +731,6 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+- assert_spin_locked((bfqd)->queue->queue_lock); \
+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \
+ (bfqq)->pid, \
+@@ -961,7 +957,6 @@ static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
+
+ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio);
+ static void bfq_put_queue(struct bfq_queue *bfqq);
+-static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
+ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
+ struct bio *bio, bool is_sync,
+ struct bfq_io_cq *bic);
+
+From bde5235de2241502c1c00337bd51c96d9b60b6df Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 3 Mar 2017 08:52:40 +0100
+Subject: [PATCH 13/51] Add checks and extra log messages - Part I
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 112 +++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 109 insertions(+), 3 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index c963d92a32c2..40eadb3f7073 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -773,6 +773,8 @@ static int bfqq_process_refs(struct bfq_queue *bfqq)
+ {
+ int process_refs, io_refs;
+
++ lockdep_assert_held(&bfqq->bfqd->lock);
++
+ io_refs = bfqq->allocated;
+ process_refs = bfqq->ref - io_refs - bfqq->entity.on_st;
+ BUG_ON(process_refs < 0);
+@@ -1483,6 +1485,8 @@ static void bfq_add_request(struct request *rq)
+ bfqq->queued[rq_is_sync(rq)]++;
+ bfqd->queued++;
+
++ BUG_ON(!RQ_BFQQ(rq));
++ BUG_ON(RQ_BFQQ(rq) != bfqq);
+ elv_rb_add(&bfqq->sort_list, rq);
+
+ /*
+@@ -1491,6 +1495,8 @@ static void bfq_add_request(struct request *rq)
+ prev = bfqq->next_rq;
+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
+ BUG_ON(!next_rq);
++ BUG_ON(!RQ_BFQQ(next_rq));
++ BUG_ON(RQ_BFQQ(next_rq) != bfqq);
+ bfqq->next_rq = next_rq;
+
+ /*
+@@ -1615,6 +1621,19 @@ static void bfq_remove_request(struct request_queue *q,
+
+ if (bfqq->next_rq == rq) {
+ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
++ if (bfqq->next_rq && !RQ_BFQQ(bfqq->next_rq)) {
++ pr_crit("no bfqq! for next rq %p bfqq %p\n",
++ bfqq->next_rq, bfqq);
++ }
++
++ BUG_ON(bfqq->next_rq && !RQ_BFQQ(bfqq->next_rq));
++ if (bfqq->next_rq && RQ_BFQQ(bfqq->next_rq) != bfqq) {
++ pr_crit(
++ "wrong bfqq! for next rq %p, rq_bfqq %p bfqq %p\n",
++ bfqq->next_rq, RQ_BFQQ(bfqq->next_rq), bfqq);
++ }
++ BUG_ON(bfqq->next_rq && RQ_BFQQ(bfqq->next_rq) != bfqq);
++
+ bfq_updated_next_req(bfqd, bfqq);
+ }
+
+@@ -1701,6 +1720,8 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
+ __rq = bfq_find_rq_fmerge(bfqd, bio, q);
+ if (__rq && elv_bio_merge_ok(__rq, bio)) {
+ *req = __rq;
++ bfq_log(bfqd, "request_merge: req %p", __rq);
++
+ return ELEVATOR_FRONT_MERGE;
+ }
+
+@@ -1721,6 +1742,8 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+
+ /* Reposition request in its sort_list */
+ elv_rb_del(&bfqq->sort_list, req);
++ BUG_ON(!RQ_BFQQ(req));
++ BUG_ON(RQ_BFQQ(req) != bfqq);
+ elv_rb_add(&bfqq->sort_list, req);
+
+ spin_lock_irq(&bfqd->lock);
+@@ -1729,7 +1752,13 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
+ bfqd->last_position);
+ BUG_ON(!next_rq);
++
+ bfqq->next_rq = next_rq;
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "requests_merged: req %p prev %p next_rq %p bfqq %p",
++ req, prev, next_rq, bfqq);
++
+ /*
+ * If next_rq changes, update both the queue's budget to
+ * fit the new request and the queue's position in its
+@@ -1748,8 +1777,16 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
+
++ BUG_ON(!RQ_BFQQ(rq));
++ BUG_ON(!RQ_BFQQ(next));
++
+ if (!RB_EMPTY_NODE(&rq->rb_node))
+ goto end;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "requests_merged: rq %p next %p bfqq %p next_bfqq %p",
++ rq, next, bfqq, next_bfqq);
++
+ spin_lock_irq(&bfqq->bfqd->lock);
+
+ /*
+@@ -3847,6 +3884,9 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
+ {
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+
++ bfq_log(bfqd, "has_work, dispatch_non_empty %d busy_queues %d",
++ !list_empty_careful(&bfqd->dispatch), bfqd->busy_queues > 0);
++
+ /*
+ * Avoiding lock: a race on bfqd->busy_queues should cause at
+ * most a call to dispatch for nothing
+@@ -3865,6 +3905,8 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ rq = list_first_entry(&bfqd->dispatch, struct request,
+ queuelist);
+ list_del_init(&rq->queuelist);
++ bfq_log(bfqd,
++ "dispatch requests: picked %p from dispatch list", rq);
+ goto exit;
+ }
+
+@@ -3904,7 +3946,20 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ if (rq) {
+ rq->rq_flags |= RQF_STARTED;
+ bfqd->rq_in_driver++;
+- }
++ if (bfqq)
++ bfq_log_bfqq(bfqd, bfqq,
++ "dispatched %s request %p, rq_in_driver %d",
++ bfq_bfqq_sync(bfqq) ? "sync" : "async",
++ rq,
++ bfqd->rq_in_driver);
++ else
++ bfq_log(bfqd,
++ "dispatched request %p from dispatch list, rq_in_driver %d",
++ rq, bfqd->rq_in_driver);
++ } else
++ bfq_log(bfqd,
++ "returned NULL request, rq_in_driver %d",
++ bfqd->rq_in_driver);
+
+ return rq;
+ }
+@@ -3944,6 +3999,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ return;
+
+ BUG_ON(rb_first(&bfqq->sort_list));
++ BUG_ON(bfqq->allocated != 0);
+ BUG_ON(bfqq->entity.tree);
+ BUG_ON(bfq_bfqq_busy(bfqq));
+
+@@ -4043,6 +4099,7 @@ static void bfq_exit_icq(struct io_cq *icq)
+ {
+ struct bfq_io_cq *bic = icq_to_bic(icq);
+
++ BUG_ON(!bic);
+ kblockd_schedule_work(&bic->exit_icq_work);
+ }
+
+@@ -4057,6 +4114,7 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
+ int ioprio_class;
+ struct bfq_data *bfqd = bfqq->bfqd;
+
++ WARN_ON(!bfqd);
+ if (!bfqd)
+ return;
+
+@@ -4404,6 +4462,10 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
+
++ assert_spin_locked(&bfqd->lock);
++
++ bfq_log_bfqq(bfqd, bfqq, "__insert_req: rq %p bfqq %p", rq, bfqq);
++
+ /*
+ * An unplug may trigger a requeue of a request from the device
+ * driver: make sure we are in process context while trying to
+@@ -4420,6 +4482,12 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ */
+ new_bfqq->allocated++;
+ bfqq->allocated--;
++ bfq_log_bfqq(bfqd, bfqq,
++ "insert_request: new allocated %d", bfqq->allocated);
++ bfq_log_bfqq(bfqd, new_bfqq,
++ "insert_request: new_bfqq new allocated %d",
++ bfqq->allocated);
++
+ new_bfqq->ref++;
+ bfq_clear_bfqq_just_created(bfqq);
+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
+@@ -4529,6 +4597,10 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ bfqd->rq_in_driver--;
+ bfqq->dispatched--;
+
++ bfq_log_bfqq(bfqd, bfqq,
++ "completed_requests: new disp %d, new rq_in_driver %d",
++ bfqq->dispatched, bfqd->rq_in_driver);
++
+ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
+ /*
+@@ -4618,6 +4690,9 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+
+ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
+ {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "put_request_body: allocated %d", bfqq->allocated);
++ BUG_ON(!bfqq->allocated);
+ bfqq->allocated--;
+
+ bfq_put_queue(bfqq);
+@@ -4625,8 +4700,27 @@ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
+
+ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ {
+- struct bfq_queue *bfqq = RQ_BFQQ(rq);
+- struct bfq_data *bfqd = bfqq->bfqd;
++ struct bfq_queue *bfqq;
++ struct bfq_data *bfqd;
++ struct bfq_io_cq *bic;
++
++ BUG_ON(!rq);
++ bfqq = RQ_BFQQ(rq);
++ BUG_ON(!bfqq);
++
++ bic = RQ_BIC(rq);
++ BUG_ON(!bic);
++
++ bfqd = bfqq->bfqd;
++ BUG_ON(!bfqd);
++
++ BUG_ON(rq->rq_flags & RQF_QUEUED);
++ BUG_ON(!(rq->rq_flags & RQF_ELVPRIV));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "putting rq %p with %u sects left, STARTED %d",
++ rq, blk_rq_sectors(rq),
++ rq->rq_flags & RQF_STARTED);
+
+ if (rq->rq_flags & RQF_STARTED)
+ bfqg_stats_update_completion(bfqq_group(bfqq),
+@@ -4634,6 +4728,8 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ rq_io_start_time_ns(rq),
+ rq->cmd_flags);
+
++ BUG_ON(blk_rq_sectors(rq) == 0 && !(rq->rq_flags & RQF_STARTED));
++
+ if (likely(rq->rq_flags & RQF_STARTED)) {
+ unsigned long flags;
+
+@@ -4655,7 +4751,9 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ * cause any deadlock, even if other locks are already
+ * (correctly) held by this process.
+ */
++ BUG_ON(in_interrupt());
+
++ assert_spin_locked(&bfqd->lock);
+ if (!RB_EMPTY_NODE(&rq->rb_node))
+ bfq_remove_request(q, rq);
+ bfq_put_rq_priv_body(bfqq);
+@@ -4814,7 +4912,9 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+ enum bfqq_expiration reason;
+ unsigned long flags;
+
++ BUG_ON(!bfqd);
+ spin_lock_irqsave(&bfqd->lock, flags);
++ bfq_log_bfqq(bfqd, bfqq, "handling slice_timer expiration");
+ bfq_clear_bfqq_wait_request(bfqq);
+
+ if (bfqq != bfqd->in_service_queue) {
+@@ -4857,6 +4957,8 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
+ idle_slice_timer);
+ struct bfq_queue *bfqq = bfqd->in_service_queue;
+
++ bfq_log(bfqd, "slice_timer expired");
++
+ /*
+ * Theoretical race here: the in-service queue can be NULL or
+ * different from the queue that was idling if a new request
+@@ -4909,9 +5011,12 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ struct bfq_data *bfqd = e->elevator_data;
+ struct bfq_queue *bfqq, *n;
+
++ bfq_log(bfqd, "exit_queue: starting ...");
++
+ hrtimer_cancel(&bfqd->idle_slice_timer);
+
+ BUG_ON(bfqd->in_service_queue);
++ BUG_ON(!list_empty(&bfqd->active_list));
+
+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) {
+ if (bfqq->bic) /* bfqqs without bic are handled below */
+@@ -4943,6 +5048,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ spin_unlock_irq(&bfqd->lock);
+ #endif
+
++ bfq_log(bfqd, "exit_queue: finished ...");
+ kfree(bfqd);
+ }
+
+
+From 7f59486861e368d25f59d4136cf8e51a75b7edf9 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 9 Feb 2017 10:36:27 +0100
+Subject: [PATCH 14/51] Add lock check in bfq_allow_bio_merge
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 40eadb3f7073..21b876aeba16 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -2279,6 +2279,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ if (!bic)
+ return false;
+
++ assert_spin_locked(&bfqd->lock);
+ bfqq = bic_to_bfqq(bic, is_sync);
+ /*
+ * We take advantage of this function to perform an early merge
+
+From a2dd19a4d95cf401268c144c79ce549c7fc4bbca Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Tue, 7 Feb 2017 15:14:29 +0100
+Subject: [PATCH 15/51] bfq-mq: execute exit_icq operations immediately
+
+Exploting Omar's patch that removes the taking of the queue lock in
+put_io_context_active, this patch moves back the operation of the bfq_exit_icq
+hook from a deferred work to the body of the function.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 34 +++-------------------------------
+ block/bfq-mq.h | 3 ---
+ 2 files changed, 3 insertions(+), 34 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 21b876aeba16..1deb79a47181 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4080,28 +4080,13 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ }
+ }
+
+-static void bfq_exit_icq_body(struct work_struct *work)
+-{
+- struct bfq_io_cq *bic =
+- container_of(work, struct bfq_io_cq, exit_icq_work);
+-
+- bfq_exit_icq_bfqq(bic, true);
+- bfq_exit_icq_bfqq(bic, false);
+-}
+-
+-static void bfq_init_icq(struct io_cq *icq)
+-{
+- struct bfq_io_cq *bic = icq_to_bic(icq);
+-
+- INIT_WORK(&bic->exit_icq_work, bfq_exit_icq_body);
+-}
+-
+ static void bfq_exit_icq(struct io_cq *icq)
+ {
+ struct bfq_io_cq *bic = icq_to_bic(icq);
+
+ BUG_ON(!bic);
+- kblockd_schedule_work(&bic->exit_icq_work);
++ bfq_exit_icq_bfqq(bic, true);
++ bfq_exit_icq_bfqq(bic, false);
+ }
+
+ /*
+@@ -5019,21 +5004,9 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ BUG_ON(bfqd->in_service_queue);
+ BUG_ON(!list_empty(&bfqd->active_list));
+
+- list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) {
+- if (bfqq->bic) /* bfqqs without bic are handled below */
+- cancel_work_sync(&bfqq->bic->exit_icq_work);
+- }
+-
+ spin_lock_irq(&bfqd->lock);
+- list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) {
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
+ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
+- /*
+- * Make sure that deferred exit_icq_work completes
+- * without errors for bfq_queues without bic
+- */
+- if (!bfqq->bic)
+- bfqq->bfqd = NULL;
+- }
+ spin_unlock_irq(&bfqd->lock);
+
+ hrtimer_cancel(&bfqd->idle_slice_timer);
+@@ -5471,7 +5444,6 @@ static struct elevator_type iosched_bfq_mq = {
+ .ops.mq = {
+ .get_rq_priv = bfq_get_rq_private,
+ .put_rq_priv = bfq_put_rq_private,
+- .init_icq = bfq_init_icq,
+ .exit_icq = bfq_exit_icq,
+ .insert_requests = bfq_insert_requests,
+ .dispatch_request = bfq_dispatch_request,
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index c3fcd5ebd735..23744b246db6 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -356,9 +356,6 @@ struct bfq_io_cq {
+ uint64_t blkcg_serial_nr; /* the current blkcg serial */
+ #endif
+
+- /* delayed work to exec the body of the the exit_icq handler */
+- struct work_struct exit_icq_work;
+-
+ /*
+ * Snapshot of the has_short_time flag before merging; taken
+ * to remember its value while the queue is merged, so as to
+
+From ab7e78a0ff095101de74e700f8743295a500bb20 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Tue, 21 Feb 2017 10:26:22 +0100
+Subject: [PATCH 16/51] Unnest request-queue and ioc locks from scheduler locks
+
+In some bio-merging functions, the request-queue lock needs to be
+taken, to lookup for the bic associated with the process that issued
+the bio that may need to be merged. In addition, put_io_context must
+be invoked in some other functions, and put_io_context may cause the
+lock of the involved ioc to be taken. In both cases, these extra
+request-queue or ioc locks are taken, or might be taken, while the
+scheduler lock is being held. In this respect, there are other code
+paths, in part external to bfq-mq, in which the same locks are taken
+(nested) in the opposite order, i.e., it is the scheduler lock to be
+taken while the request-queue or the ioc lock is being held. This
+leads to circular deadlocks.
+
+This commit addresses this issue by modifying the logic of the above
+functions, so as to let the lookup and put_io_context be performed,
+and thus the extra locks be taken, outside the critical sections
+protected by the scheduler lock.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 9 ++
+ block/bfq-mq-iosched.c | 264 ++++++++++++++++++++++++++++----------------
+ block/bfq-mq.h | 25 ++++-
+ block/bfq-sched.c | 11 ++
+ 4 files changed, 213 insertions(+), 96 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 8a73de76f32b..cf59eeb7f08e 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -716,6 +716,9 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ struct bfq_group *bfqg;
+ struct bfq_data *bfqd;
+ struct bfq_entity *entity;
++#ifdef BFQ_MQ
++ unsigned long flags;
++#endif
+ int i;
+
+ BUG_ON(!pd);
+@@ -729,6 +732,9 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ if (!entity) /* root group */
+ return;
+
++#ifdef BFQ_MQ
++ spin_lock_irqsave(&bfqd->lock, flags);
++#endif
+ /*
+ * Empty all service_trees belonging to this group before
+ * deactivating the group itself.
+@@ -766,6 +772,9 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ __bfq_deactivate_entity(entity, false);
+ bfq_put_async_queues(bfqd, bfqg);
+
++#ifdef BFQ_MQ
++ bfq_unlock_put_ioc_restore(bfqd, flags);
++#endif
+ /*
+ * @blkg is going offline and will be ignored by
+ * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 1deb79a47181..69ef3761c95d 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -233,6 +233,7 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
+ return NULL;
+ }
+
++#define BFQ_MQ
+ #include "bfq-sched.c"
+ #include "bfq-cgroup-included.c"
+
+@@ -1564,15 +1565,9 @@ static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
+ struct bio *bio,
+ struct request_queue *q)
+ {
+- struct task_struct *tsk = current;
+- struct bfq_io_cq *bic;
+- struct bfq_queue *bfqq;
++ struct bfq_queue *bfqq = bfqd->bio_bfqq;
+
+- bic = bfq_bic_lookup(bfqd, tsk->io_context, q);
+- if (!bic)
+- return NULL;
+
+- bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
+ if (bfqq)
+ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
+
+@@ -1693,9 +1688,26 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
+ struct request_queue *q = hctx->queue;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct request *free = NULL;
++ /*
++ * bfq_bic_lookup grabs the queue_lock: invoke it now and
++ * store its return value for later use, to avoid nesting
++ * queue_lock inside the bfqd->lock. We assume that the bic
++ * returned by bfq_bic_lookup does not go away before
++ * bfqd->lock is taken.
++ */
++ struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
+ bool ret;
+
+ spin_lock_irq(&bfqd->lock);
++
++ if (bic)
++ bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
++ else
++ bfqd->bio_bfqq = NULL;
++ bfqd->bio_bic = bic;
++ /* Set next flag just for testing purposes */
++ bfqd->bio_bfqq_set = true;
++
+ ret = blk_mq_sched_try_merge(q, bio, &free);
+
+ /*
+@@ -1706,6 +1718,7 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
+ */
+ if (free)
+ blk_mq_free_request(free);
++ bfqd->bio_bfqq_set = false;
+ spin_unlock_irq(&bfqd->lock);
+
+ return ret;
+@@ -2261,8 +2274,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ {
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ bool is_sync = op_is_sync(bio->bi_opf);
+- struct bfq_io_cq *bic;
+- struct bfq_queue *bfqq, *new_bfqq;
++ struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
+
+ /*
+ * Disallow merge of a sync bio into an async request.
+@@ -2273,31 +2285,40 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ /*
+ * Lookup the bfqq that this bio will be queued with. Allow
+ * merge only if rq is queued there.
+- * Queue lock is held here.
+ */
+- bic = bfq_bic_lookup(bfqd, current->io_context, q);
+- if (!bic)
++ if (!bfqq)
+ return false;
+
+- assert_spin_locked(&bfqd->lock);
+- bfqq = bic_to_bfqq(bic, is_sync);
+ /*
+ * We take advantage of this function to perform an early merge
+ * of the queues of possible cooperating processes.
+ */
+- if (bfqq) {
+- new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
+- if (new_bfqq) {
+- bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
+- /*
+- * If we get here, the bio will be queued in the
+- * shared queue, i.e., new_bfqq, so use new_bfqq
+- * to decide whether bio and rq can be merged.
+- */
+- bfqq = new_bfqq;
+- }
+- }
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
++ if (new_bfqq) {
++ /*
++ * bic still points to bfqq, then it has not yet been
++ * redirected to some other bfq_queue, and a queue
++ * merge beween bfqq and new_bfqq can be safely
++ * fulfillled, i.e., bic can be redirected to new_bfqq
++ * and bfqq can be put.
++ */
++ bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
++ new_bfqq);
++ /*
++ * If we get here, bio will be queued into new_queue,
++ * so use new_bfqq to decide whether bio and rq can be
++ * merged.
++ */
++ bfqq = new_bfqq;
+
++ /*
++ * Change also bqfd->bio_bfqq, as
++ * bfqd->bio_bic now points to new_bfqq, and
++ * this function may be invoked again (and then may
++ * use again bqfd->bio_bfqq).
++ */
++ bfqd->bio_bfqq = bfqq;
++ }
+ return bfqq == RQ_BFQQ(rq);
+ }
+
+@@ -3965,14 +3986,43 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ return rq;
+ }
+
++/*
++ * Next two functions release bfqd->lock and put the io context
++ * pointed by bfqd->ioc_to_put. This delayed put is used to not risk
++ * to take an ioc->lock while the scheduler lock is being held.
++ */
++static void bfq_unlock_put_ioc(struct bfq_data *bfqd)
++{
++ struct io_context *ioc_to_put = bfqd->ioc_to_put;
++
++ bfqd->ioc_to_put = NULL;
++ spin_unlock_irq(&bfqd->lock);
++
++ if (ioc_to_put)
++ put_io_context(ioc_to_put);
++}
++
++static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd,
++ unsigned long flags)
++{
++ struct io_context *ioc_to_put = bfqd->ioc_to_put;
++
++ bfqd->ioc_to_put = NULL;
++ spin_unlock_irqrestore(&bfqd->lock, flags);
++
++ if (ioc_to_put)
++ put_io_context(ioc_to_put);
++}
++
+ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ {
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+ struct request *rq;
+
+ spin_lock_irq(&bfqd->lock);
++
+ rq = __bfq_dispatch_request(hctx);
+- spin_unlock_irq(&bfqd->lock);
++ bfq_unlock_put_ioc(bfqd);
+
+ return rq;
+ }
+@@ -3981,7 +4031,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ * Task holds one reference to the queue, dropped when task exits. Each rq
+ * in-flight on this queue also holds a reference, dropped when rq is freed.
+ *
+- * Queue lock must be held here. Recall not to use bfqq after calling
++ * Scheduler lock must be held here. Recall not to use bfqq after calling
+ * this function on it.
+ */
+ static void bfq_put_queue(struct bfq_queue *bfqq)
+@@ -4066,17 +4116,23 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
+
+ if (bfqq && bfqd) {
+- spin_lock_irq(&bfqd->lock);
++ unsigned long flags;
++
++ spin_lock_irqsave(&bfqd->lock, flags);
+ /*
+- * If the bic is using a shared queue, put the reference
+- * taken on the io_context when the bic started using a
+- * shared bfq_queue.
++ * If the bic is using a shared queue, put the
++ * reference taken on the io_context when the bic
++ * started using a shared bfq_queue. This put cannot
++ * make ioc->ref_count reach 0, then no ioc->lock
++ * risks to be taken (leading to possible deadlock
++ * scenarios).
+ */
+ if (is_sync && bfq_bfqq_coop(bfqq))
+ put_io_context(bic->icq.ioc);
++
+ bfq_exit_bfqq(bfqd, bfqq);
+ bic_set_bfqq(bic, NULL, is_sync);
+- spin_unlock_irq(&bfqd->lock);
++ bfq_unlock_put_ioc_restore(bfqd, flags);
+ }
+ }
+
+@@ -4183,8 +4239,6 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ INIT_HLIST_NODE(&bfqq->burst_list_node);
+ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
+
+- spin_lock_init(&bfqq->lock);
+-
+ bfqq->ref = 0;
+ bfqq->bfqd = bfqd;
+
+@@ -4476,6 +4530,14 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+
+ new_bfqq->ref++;
+ bfq_clear_bfqq_just_created(bfqq);
++ /*
++ * If the bic associated with the process
++ * issuing this request still points to bfqq
++ * (and thus has not been already redirected
++ * to new_bfqq or even some other bfq_queue),
++ * then complete the merge and redirect it to
++ * new_bfqq.
++ */
+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
+ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
+ bfqq, new_bfqq);
+@@ -4498,14 +4560,17 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ }
+
+ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+- bool at_head)
++ bool at_head)
+ {
+ struct request_queue *q = hctx->queue;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+
+ spin_lock_irq(&bfqd->lock);
+- if (blk_mq_sched_try_insert_merge(q, rq))
+- goto done;
++ if (blk_mq_sched_try_insert_merge(q, rq)) {
++ spin_unlock_irq(&bfqd->lock);
++ return;
++ }
++
+ spin_unlock_irq(&bfqd->lock);
+
+ blk_mq_sched_request_inserted(rq);
+@@ -4530,8 +4595,8 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ q->last_merge = rq;
+ }
+ }
+-done:
+- spin_unlock_irq(&bfqd->lock);
++
++ bfq_unlock_put_ioc(bfqd);
+ }
+
+ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
+@@ -4724,7 +4789,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ bfq_completed_request(bfqq, bfqd);
+ bfq_put_rq_priv_body(bfqq);
+
+- spin_unlock_irqrestore(&bfqd->lock, flags);
++ bfq_unlock_put_ioc_restore(bfqd, flags);
+ } else {
+ /*
+ * Request rq may be still/already in the scheduler,
+@@ -4732,10 +4797,10 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ * defer such a check and removal, to avoid
+ * inconsistencies in the time interval from the end
+ * of this function to the start of the deferred work.
+- * Fortunately, this situation occurs only in process
+- * context, so taking the scheduler lock does not
+- * cause any deadlock, even if other locks are already
+- * (correctly) held by this process.
++ * This situation seems to occur only in process
++ * context, as a consequence of a merge. In the
++ * current version of the code, this implies that the
++ * lock is held.
+ */
+ BUG_ON(in_interrupt());
+
+@@ -4758,8 +4823,6 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
+
+- put_io_context(bic->icq.ioc);
+-
+ if (bfqq_process_refs(bfqq) == 1) {
+ bfqq->pid = current->pid;
+ bfq_clear_bfqq_coop(bfqq);
+@@ -4775,6 +4838,41 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
+ return NULL;
+ }
+
++static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic,
++ struct bio *bio,
++ bool split, bool is_sync,
++ bool *new_queue)
++{
++ struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
++
++ if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
++ return bfqq;
++
++ if (new_queue)
++ *new_queue = true;
++
++ if (bfqq)
++ bfq_put_queue(bfqq);
++ bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
++
++ bic_set_bfqq(bic, bfqq, is_sync);
++ if (split && is_sync) {
++ if ((bic->was_in_burst_list && bfqd->large_burst) ||
++ bic->saved_in_large_burst)
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ else {
++ bfq_clear_bfqq_in_large_burst(bfqq);
++ if (bic->was_in_burst_list)
++ hlist_add_head(&bfqq->burst_list_node,
++ &bfqd->burst_list);
++ }
++ bfqq->split_time = jiffies;
++ }
++
++ return bfqq;
++}
++
+ /*
+ * Allocate bfq data structures associated with this request.
+ */
+@@ -4786,6 +4884,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ const int is_sync = rq_is_sync(rq);
+ struct bfq_queue *bfqq;
+ bool bfqq_already_existing = false, split = false;
++ bool new_queue = false;
+
+ spin_lock_irq(&bfqd->lock);
+
+@@ -4796,42 +4895,10 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ bfq_bic_update_cgroup(bic, bio);
+
+-new_queue:
+- bfqq = bic_to_bfqq(bic, is_sync);
+- if (!bfqq || bfqq == &bfqd->oom_bfqq) {
+- if (bfqq)
+- bfq_put_queue(bfqq);
+- bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
+- BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
++ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
++ &new_queue);
+
+- bic_set_bfqq(bic, bfqq, is_sync);
+- if (split && is_sync) {
+- bfq_log_bfqq(bfqd, bfqq,
+- "get_request: was_in_list %d "
+- "was_in_large_burst %d "
+- "large burst in progress %d",
+- bic->was_in_burst_list,
+- bic->saved_in_large_burst,
+- bfqd->large_burst);
+-
+- if ((bic->was_in_burst_list && bfqd->large_burst) ||
+- bic->saved_in_large_burst) {
+- bfq_log_bfqq(bfqd, bfqq,
+- "get_request: marking in "
+- "large burst");
+- bfq_mark_bfqq_in_large_burst(bfqq);
+- } else {
+- bfq_log_bfqq(bfqd, bfqq,
+- "get_request: clearing in "
+- "large burst");
+- bfq_clear_bfqq_in_large_burst(bfqq);
+- if (bic->was_in_burst_list)
+- hlist_add_head(&bfqq->burst_list_node,
+- &bfqd->burst_list);
+- }
+- bfqq->split_time = jiffies;
+- }
+- } else {
++ if (unlikely(!new_queue)) {
+ /* If the queue was seeky for too long, break it apart. */
+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
+ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
+@@ -4841,9 +4908,19 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ bic->saved_in_large_burst = true;
+
+ bfqq = bfq_split_bfqq(bic, bfqq);
+- split = true;
++ /*
++ * A reference to bic->icq.ioc needs to be
++ * released after a queue split. Do not do it
++ * immediately, to not risk to possibly take
++ * an ioc->lock while holding the scheduler
++ * lock.
++ */
++ bfqd->ioc_to_put = bic->icq.ioc;
++
+ if (!bfqq)
+- goto new_queue;
++ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
++ true, is_sync,
++ NULL);
+ else
+ bfqq_already_existing = true;
+ }
+@@ -4861,18 +4938,17 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ /*
+ * If a bfq_queue has only one process reference, it is owned
+- * by only one bfq_io_cq: we can set the bic field of the
+- * bfq_queue to the address of that structure. Also, if the
+- * queue has just been split, mark a flag so that the
+- * information is available to the other scheduler hooks.
++ * by only this bic: we can then set bfqq->bic = bic. in
++ * addition, if the queue has also just been split, we have to
++ * resume its state.
+ */
+ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
+ bfqq->bic = bic;
+- if (split) {
++ if (bfqd->ioc_to_put) { /* if true, then there has been a split */
+ /*
+- * If the queue has just been split from a shared
+- * queue, restore the idle window and the possible
+- * weight raising period.
++ * The queue has just been split from a shared
++ * queue: restore the idle window and the
++ * possible weight raising period.
+ */
+ bfq_bfqq_resume_state(bfqq, bfqd, bic,
+ bfqq_already_existing);
+@@ -4882,7 +4958,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ if (unlikely(bfq_bfqq_just_created(bfqq)))
+ bfq_handle_burst(bfqd, bfqq);
+
+- spin_unlock_irq(&bfqd->lock);
++ bfq_unlock_put_ioc(bfqd);
+
+ return 0;
+
+@@ -4929,7 +5005,7 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+ bfq_bfqq_expire(bfqd, bfqq, true, reason);
+
+ schedule_dispatch:
+- spin_unlock_irqrestore(&bfqd->lock, flags);
++ bfq_unlock_put_ioc_restore(bfqd, flags);
+ bfq_schedule_dispatch(bfqd);
+ }
+
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 23744b246db6..bd83f1c02573 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -338,8 +338,6 @@ struct bfq_queue {
+ unsigned long wr_start_at_switch_to_srt;
+
+ unsigned long split_time; /* time of last split */
+-
+- spinlock_t lock;
+ };
+
+ /**
+@@ -609,6 +607,29 @@ struct bfq_data {
+ struct bfq_queue oom_bfqq;
+
+ spinlock_t lock;
++
++ /*
++ * bic associated with the task issuing current bio for
++ * merging. This and the next field are used as a support to
++ * be able to perform the bic lookup, needed by bio-merge
++ * functions, before the scheduler lock is taken, and thus
++ * avoid taking the request-queue lock while the scheduler
++ * lock is being held.
++ */
++ struct bfq_io_cq *bio_bic;
++ /* bfqq associated with the task issuing current bio for merging */
++ struct bfq_queue *bio_bfqq;
++ /* Extra flag used only for TESTING */
++ bool bio_bfqq_set;
++
++ /*
++ * io context to put right after bfqd->lock is released. This
++ * filed is used to perform put_io_context, when needed, to
++ * after the scheduler lock has been released, and thus
++ * prevent an ioc->lock from being possibly taken while the
++ * scheduler lock is being held.
++ */
++ struct io_context *ioc_to_put;
+ };
+
+ enum bfqq_state_flags {
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index b54a638186e3..a5c8b4acd33c 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -1905,7 +1905,18 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+ struct bfq_entity *entity = in_serv_entity;
+
+ if (bfqd->in_service_bic) {
++#ifdef BFQ_MQ
++ /*
++ * Schedule the release of a reference to
++ * bfqd->in_service_bic->icq.ioc to right after the
++ * scheduler lock is released. This ioc is not
++ * released immediately, to not risk to possibly take
++ * an ioc->lock while holding the scheduler lock.
++ */
++ bfqd->ioc_to_put = bfqd->in_service_bic->icq.ioc;
++#else
+ put_io_context(bfqd->in_service_bic->icq.ioc);
++#endif
+ bfqd->in_service_bic = NULL;
+ }
+
+
+From 84cc7140cb4f0574710625f51abbb076a1dd2920 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 3 Mar 2017 09:31:14 +0100
+Subject: [PATCH 17/51] Add checks and extra log messages - Part II
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 42 ++++++++++++++++++++++++++++++++++++++++--
+ block/bfq-sched.c | 1 +
+ 2 files changed, 41 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 69ef3761c95d..5707d42b160d 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1567,6 +1567,7 @@ static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
+ {
+ struct bfq_queue *bfqq = bfqd->bio_bfqq;
+
++ BUG_ON(!bfqd->bio_bfqq_set);
+
+ if (bfqq)
+ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
+@@ -1719,6 +1720,7 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
+ if (free)
+ blk_mq_free_request(free);
+ bfqd->bio_bfqq_set = false;
++ BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+
+ return ret;
+@@ -1781,6 +1783,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ bfq_updated_next_req(bfqd, bfqq);
+ bfq_pos_tree_add_move(bfqd, bfqq);
+ }
++ BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+ }
+ }
+@@ -1824,6 +1827,7 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+
+ bfq_remove_request(q, next);
+
++ BUG_ON(bfqq->bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqq->bfqd->lock);
+ end:
+ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
+@@ -2195,9 +2199,11 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+ {
+ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
+ (unsigned long) new_bfqq->pid);
++ BUG_ON(bfqq->bic && bfqq->bic == new_bfqq->bic);
+ /* Save weight raising and idle window of the merged queues */
+ bfq_bfqq_save_state(bfqq);
+ bfq_bfqq_save_state(new_bfqq);
++
+ if (bfq_bfqq_IO_bound(bfqq))
+ bfq_mark_bfqq_IO_bound(new_bfqq);
+ bfq_clear_bfqq_IO_bound(bfqq);
+@@ -2276,6 +2282,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ bool is_sync = op_is_sync(bio->bi_opf);
+ struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
+
++ assert_spin_locked(&bfqd->lock);
+ /*
+ * Disallow merge of a sync bio into an async request.
+ */
+@@ -2286,6 +2293,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ * Lookup the bfqq that this bio will be queued with. Allow
+ * merge only if rq is queued there.
+ */
++ BUG_ON(!bfqd->bio_bfqq_set);
+ if (!bfqq)
+ return false;
+
+@@ -2294,6 +2302,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ * of the queues of possible cooperating processes.
+ */
+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
++ BUG_ON(new_bfqq == bfqq);
+ if (new_bfqq) {
+ /*
+ * bic still points to bfqq, then it has not yet been
+@@ -4040,6 +4049,8 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ struct bfq_group *bfqg = bfqq_group(bfqq);
+ #endif
+
++ assert_spin_locked(&bfqq->bfqd->lock);
++
+ BUG_ON(bfqq->ref <= 0);
+
+ if (bfqq->bfqd)
+@@ -4119,6 +4130,7 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfqd->lock, flags);
++ BUG_ON(bfqd->ioc_to_put);
+ /*
+ * If the bic is using a shared queue, put the
+ * reference taken on the io_context when the bic
+@@ -4567,10 +4579,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+
+ spin_lock_irq(&bfqd->lock);
+ if (blk_mq_sched_try_insert_merge(q, rq)) {
++ BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+ return;
+ }
+
++ BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+
+ blk_mq_sched_request_inserted(rq);
+@@ -4785,6 +4799,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfqd->lock, flags);
++ BUG_ON(bfqd->ioc_to_put);
+
+ bfq_completed_request(bfqq, bfqd);
+ bfq_put_rq_priv_body(bfqq);
+@@ -4855,13 +4870,28 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
+ if (bfqq)
+ bfq_put_queue(bfqq);
+ bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
++ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
+
+ bic_set_bfqq(bic, bfqq, is_sync);
+ if (split && is_sync) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "get_request: was_in_list %d "
++ "was_in_large_burst %d "
++ "large burst in progress %d",
++ bic->was_in_burst_list,
++ bic->saved_in_large_burst,
++ bfqd->large_burst);
++
+ if ((bic->was_in_burst_list && bfqd->large_burst) ||
+- bic->saved_in_large_burst)
++ bic->saved_in_large_burst) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "get_request: marking in "
++ "large burst");
+ bfq_mark_bfqq_in_large_burst(bfqq);
+- else {
++ } else {
++ bfq_log_bfqq(bfqd, bfqq,
++ "get_request: clearing in "
++ "large burst");
+ bfq_clear_bfqq_in_large_burst(bfqq);
+ if (bic->was_in_burst_list)
+ hlist_add_head(&bfqq->burst_list_node,
+@@ -4897,10 +4927,12 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
+ &new_queue);
++ BUG_ON(bfqd->ioc_to_put);
+
+ if (unlikely(!new_queue)) {
+ /* If the queue was seeky for too long, break it apart. */
+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
++ BUG_ON(!is_sync);
+ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
+
+ /* Update bic before losing reference to bfqq */
+@@ -4923,6 +4955,9 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ NULL);
+ else
+ bfqq_already_existing = true;
++
++ BUG_ON(!bfqq);
++ BUG_ON(bfqq == &bfqd->oom_bfqq);
+ }
+ }
+
+@@ -4976,6 +5011,8 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+
+ BUG_ON(!bfqd);
+ spin_lock_irqsave(&bfqd->lock, flags);
++ BUG_ON(bfqd->ioc_to_put);
++
+ bfq_log_bfqq(bfqd, bfqq, "handling slice_timer expiration");
+ bfq_clear_bfqq_wait_request(bfqq);
+
+@@ -5083,6 +5120,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ spin_lock_irq(&bfqd->lock);
+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
+ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
++ BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+
+ hrtimer_cancel(&bfqd->idle_slice_timer);
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index a5c8b4acd33c..85e59eeb3569 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -1906,6 +1906,7 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+
+ if (bfqd->in_service_bic) {
+ #ifdef BFQ_MQ
++ BUG_ON(bfqd->ioc_to_put);
+ /*
+ * Schedule the release of a reference to
+ * bfqd->in_service_bic->icq.ioc to right after the
+
+From 3d54cb804f1db2e08ce4a6cc335868538542f587 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 22 Feb 2017 11:30:01 +0100
+Subject: [PATCH 18/51] Fix unbalanced increment of rq_in_driver
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 52 +++++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 43 insertions(+), 9 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 5707d42b160d..9cbcb8d43d81 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -3936,9 +3936,45 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ rq = list_first_entry(&bfqd->dispatch, struct request,
+ queuelist);
+ list_del_init(&rq->queuelist);
++
+ bfq_log(bfqd,
+ "dispatch requests: picked %p from dispatch list", rq);
+- goto exit;
++ bfqq = RQ_BFQQ(rq);
++
++ if (bfqq) {
++ /*
++ * Increment counters here, because this
++ * dispatch does not follow the standard
++ * dispatch flow (where counters are
++ * incremented)
++ */
++ bfqq->dispatched++;
++
++ goto inc_in_driver_start_rq;
++ }
++
++ /*
++ * We exploit the put_rq_private hook to decrement
++ * rq_in_driver, but put_rq_private will not be
++ * invoked on this request. So, to avoid unbalance,
++ * just start this request, without incrementing
++ * rq_in_driver. As a negative consequence,
++ * rq_in_driver is deceptively lower than it should be
++ * while this request is in service. This may cause
++ * bfq_schedule_dispatch to be invoked uselessly.
++ *
++ * As for implementing an exact solution, the
++ * put_request hook, if defined, is probably invoked
++ * also on this request. So, by exploiting this hook,
++ * we could 1) increment rq_in_driver here, and 2)
++ * decrement it in put_request. Such a solution would
++ * let the value of the counter be always accurate,
++ * but it would entail using an extra interface
++ * function. This cost seems higher than the benefit,
++ * being the frequency of non-elevator-private
++ * requests very low.
++ */
++ goto start_rq;
+ }
+
+ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
+@@ -3973,10 +4009,12 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+
+ BUG_ON(bfqq->next_rq == NULL &&
+ bfqq->entity.budget < bfqq->entity.service);
+-exit:
++
+ if (rq) {
+- rq->rq_flags |= RQF_STARTED;
++ inc_in_driver_start_rq:
+ bfqd->rq_in_driver++;
++ start_rq:
++ rq->rq_flags |= RQF_STARTED;
+ if (bfqq)
+ bfq_log_bfqq(bfqd, bfqq,
+ "dispatched %s request %p, rq_in_driver %d",
+@@ -3992,6 +4030,7 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ "returned NULL request, rq_in_driver %d",
+ bfqd->rq_in_driver);
+
++exit:
+ return rq;
+ }
+
+@@ -4591,15 +4630,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+
+ spin_lock_irq(&bfqd->lock);
+ if (at_head || blk_rq_is_passthrough(rq)) {
+- struct bfq_queue *bfqq = RQ_BFQQ(rq);
+-
+ if (at_head)
+ list_add(&rq->queuelist, &bfqd->dispatch);
+ else
+ list_add_tail(&rq->queuelist, &bfqd->dispatch);
+-
+- if (bfqq)
+- bfqq->dispatched++;
+ } else {
+ __bfq_insert_request(bfqd, rq);
+
+@@ -4966,7 +5000,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ "get_request: new allocated %d", bfqq->allocated);
+
+ bfqq->ref++;
+- bfq_log_bfqq(bfqd, bfqq, "get_request: bfqq %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", rq, bfqq, bfqq->ref);
+
+ rq->elv.priv[0] = bic;
+ rq->elv.priv[1] = bfqq;
+
+From 7ba977d696b239569b4cd233aebc99e136ecf487 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 3 Mar 2017 09:39:35 +0100
+Subject: [PATCH 19/51] Add checks and extra log messages - Part III
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 9cbcb8d43d81..24b529a2edc7 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4630,10 +4630,21 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+
+ spin_lock_irq(&bfqd->lock);
+ if (at_head || blk_rq_is_passthrough(rq)) {
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
+ if (at_head)
+ list_add(&rq->queuelist, &bfqd->dispatch);
+ else
+ list_add_tail(&rq->queuelist, &bfqd->dispatch);
++
++ if (bfqq)
++ bfq_log_bfqq(bfqd, bfqq,
++ "insert_request %p in disp: at_head %d",
++ rq, at_head);
++ else
++ bfq_log(bfqd,
++ "insert_request %p in disp: at_head %d",
++ rq, at_head);
+ } else {
+ __bfq_insert_request(bfqd, rq);
+
+
+From c94e47b2908600b8ba89f84b0ac7febddd313141 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 17 Feb 2017 14:28:02 +0100
+Subject: [PATCH 20/51] TESTING: Check wrong invocation of merge and
+ put_rq_priv functions
+
+Check that merge functions are not invoked on requests queued in the
+dispatch queue, and that neither put_rq_private is invoked on these
+requests if, in addition, they have not passed through get_rq_private.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 22 ++++++++++++++++++++++
+ include/linux/blkdev.h | 2 ++
+ 2 files changed, 24 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 24b529a2edc7..b4d40bb712d2 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1746,6 +1746,8 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
+ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ enum elv_merge type)
+ {
++ BUG_ON(req->rq_flags & RQF_DISP_LIST);
++
+ if (type == ELEVATOR_FRONT_MERGE &&
+ rb_prev(&req->rb_node) &&
+ blk_rq_pos(req) <
+@@ -1795,6 +1797,8 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+
+ BUG_ON(!RQ_BFQQ(rq));
+ BUG_ON(!RQ_BFQQ(next));
++ BUG_ON(rq->rq_flags & RQF_DISP_LIST);
++ BUG_ON(next->rq_flags & RQF_DISP_LIST);
+
+ if (!RB_EMPTY_NODE(&rq->rb_node))
+ goto end;
+@@ -3936,6 +3940,7 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ rq = list_first_entry(&bfqd->dispatch, struct request,
+ queuelist);
+ list_del_init(&rq->queuelist);
++ rq->rq_flags &= ~RQF_DISP_LIST;
+
+ bfq_log(bfqd,
+ "dispatch requests: picked %p from dispatch list", rq);
+@@ -3950,6 +3955,17 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ */
+ bfqq->dispatched++;
+
++ /*
++ * TESTING: reset DISP_LIST flag, because: 1)
++ * this rq this request has passed through
++ * get_rq_private, 2) then it will have
++ * put_rq_private invoked on it, and 3) in
++ * put_rq_private we use this flag to check
++ * that put_rq_private is not invoked on
++ * requests for which get_rq_private has been
++ * invoked.
++ */
++ rq->rq_flags &= ~RQF_DISP_LIST;
+ goto inc_in_driver_start_rq;
+ }
+
+@@ -4637,6 +4653,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ else
+ list_add_tail(&rq->queuelist, &bfqd->dispatch);
+
++ rq->rq_flags |= RQF_DISP_LIST;
+ if (bfqq)
+ bfq_log_bfqq(bfqd, bfqq,
+ "insert_request %p in disp: at_head %d",
+@@ -4824,6 +4841,10 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ bfqd = bfqq->bfqd;
+ BUG_ON(!bfqd);
+
++ if (rq->rq_flags & RQF_DISP_LIST) {
++ pr_crit("putting disp rq %p for %d", rq, bfqq->pid);
++ BUG();
++ }
+ BUG_ON(rq->rq_flags & RQF_QUEUED);
+ BUG_ON(!(rq->rq_flags & RQF_ELVPRIV));
+
+@@ -5015,6 +5036,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ rq->elv.priv[0] = bic;
+ rq->elv.priv[1] = bfqq;
++ rq->rq_flags &= ~RQF_DISP_LIST;
+
+ /*
+ * If a bfq_queue has only one process reference, it is owned
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 10f892ca585d..0048e59e6d07 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -121,6 +121,8 @@ typedef __u32 __bitwise req_flags_t;
+ /* Look at ->special_vec for the actual data payload instead of the
+ bio chain. */
+ #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
++/* DEBUG: rq in bfq-mq dispatch list */
++#define RQF_DISP_LIST ((__force req_flags_t)(1 << 19))
+
+ /* flags that prevent us from merging requests: */
+ #define RQF_NOMERGE_FLAGS \
+
+From 49206f9052d13c96d49dbc36c612bed41b2d6552 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Sat, 25 Feb 2017 17:38:05 +0100
+Subject: [PATCH 21/51] Complete support for cgroups
+
+This commit completes cgroups support for bfq-mq. In particular, it deals with
+a sort of circular dependency introduced in blk-mq: the function
+blkcg_activate_policy, invoked during scheduler initialization, triggers the
+invocation of the has_work scheduler hook (before the init function is
+finished). To adress this issue, this commit moves the invocation of
+blkcg_activate_policy after the initialization of all the fields that could be
+initialized before invoking blkcg_activate_policy itself. This enables has_work
+to correctly return false, and thus to prevent the blk-mq stack from invoking
+further scheduler hooks before the init function is finished.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/Kconfig.iosched | 9 +++++
+ block/bfq-mq-iosched.c | 108 ++++++++++++++++++++++++++++---------------------
+ block/bfq-mq.h | 2 +-
+ 3 files changed, 72 insertions(+), 47 deletions(-)
+
+diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
+index 2d94af3d8b0a..299a6861fb90 100644
+--- a/block/Kconfig.iosched
++++ b/block/Kconfig.iosched
+@@ -106,6 +106,15 @@ config MQ_IOSCHED_BFQ
+ guarantees a low latency to interactive and soft real-time
+ applications. Details in Documentation/block/bfq-iosched.txt
+
++config MQ_BFQ_GROUP_IOSCHED
++ bool "BFQ-MQ hierarchical scheduling support"
++ depends on MQ_IOSCHED_BFQ && BLK_CGROUP
++ default n
++ ---help---
++
++ Enable hierarchical scheduling in BFQ-MQ, using the blkio
++ (cgroups-v1) or io (cgroups-v2) controller.
++
+ config MQ_IOSCHED_DEADLINE
+ tristate "MQ deadline I/O scheduler"
+ default y
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index b4d40bb712d2..02a1e7fd0ea4 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -88,7 +88,6 @@
+ #include "blk-mq.h"
+ #include "blk-mq-tag.h"
+ #include "blk-mq-sched.h"
+-#undef CONFIG_BFQ_GROUP_IOSCHED /* cgroups support not yet functional */
+ #include "bfq-mq.h"
+
+ /* Expiration time of sync (0) and async (1) requests, in ns. */
+@@ -233,15 +232,6 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
+ return NULL;
+ }
+
+-#define BFQ_MQ
+-#include "bfq-sched.c"
+-#include "bfq-cgroup-included.c"
+-
+-#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
+-#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
+-
+-#define bfq_sample_valid(samples) ((samples) > 80)
+-
+ /*
+ * Scheduler run of queue, if there are requests pending and no one in the
+ * driver that will restart queueing.
+@@ -255,6 +245,43 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd)
+ }
+
+ /*
++ * Next two functions release bfqd->lock and put the io context
++ * pointed by bfqd->ioc_to_put. This delayed put is used to not risk
++ * to take an ioc->lock while the scheduler lock is being held.
++ */
++static void bfq_unlock_put_ioc(struct bfq_data *bfqd)
++{
++ struct io_context *ioc_to_put = bfqd->ioc_to_put;
++
++ bfqd->ioc_to_put = NULL;
++ spin_unlock_irq(&bfqd->lock);
++
++ if (ioc_to_put)
++ put_io_context(ioc_to_put);
++}
++
++static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd,
++ unsigned long flags)
++{
++ struct io_context *ioc_to_put = bfqd->ioc_to_put;
++
++ bfqd->ioc_to_put = NULL;
++ spin_unlock_irqrestore(&bfqd->lock, flags);
++
++ if (ioc_to_put)
++ put_io_context(ioc_to_put);
++}
++
++#define BFQ_MQ
++#include "bfq-sched.c"
++#include "bfq-cgroup-included.c"
++
++#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
++#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
++
++#define bfq_sample_valid(samples) ((samples) > 80)
++
++/*
+ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
+ * We choose the request that is closesr to the head right now. Distance
+ * behind the head is penalized and only allowed to a certain extent.
+@@ -4050,34 +4077,6 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ return rq;
+ }
+
+-/*
+- * Next two functions release bfqd->lock and put the io context
+- * pointed by bfqd->ioc_to_put. This delayed put is used to not risk
+- * to take an ioc->lock while the scheduler lock is being held.
+- */
+-static void bfq_unlock_put_ioc(struct bfq_data *bfqd)
+-{
+- struct io_context *ioc_to_put = bfqd->ioc_to_put;
+-
+- bfqd->ioc_to_put = NULL;
+- spin_unlock_irq(&bfqd->lock);
+-
+- if (ioc_to_put)
+- put_io_context(ioc_to_put);
+-}
+-
+-static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd,
+- unsigned long flags)
+-{
+- struct io_context *ioc_to_put = bfqd->ioc_to_put;
+-
+- bfqd->ioc_to_put = NULL;
+- spin_unlock_irqrestore(&bfqd->lock, flags);
+-
+- if (ioc_to_put)
+- put_io_context(ioc_to_put);
+-}
+-
+ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ {
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+@@ -5239,6 +5238,10 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ }
+ eq->elevator_data = bfqd;
+
++ spin_lock_irq(q->queue_lock);
++ q->elevator = eq;
++ spin_unlock_irq(q->queue_lock);
++
+ /*
+ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
+ * Grab a permanent reference to it, so that the normal code flow
+@@ -5261,12 +5264,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ bfqd->oom_bfqq.entity.prio_changed = 1;
+
+ bfqd->queue = q;
+-
+- bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
+- if (!bfqd->root_group)
+- goto out_free;
+- bfq_init_root_group(bfqd->root_group, bfqd);
+- bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
++ INIT_LIST_HEAD(&bfqd->dispatch);
+
+ hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+@@ -5324,9 +5322,27 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ bfqd->device_speed = BFQ_BFQD_FAST;
+
+ spin_lock_init(&bfqd->lock);
+- INIT_LIST_HEAD(&bfqd->dispatch);
+
+- q->elevator = eq;
++ /*
++ * The invocation of the next bfq_create_group_hierarchy
++ * function is the head of a chain of function calls
++ * (bfq_create_group_hierarchy->blkcg_activate_policy->
++ * blk_mq_freeze_queue) that may lead to the invocation of the
++ * has_work hook function. For this reason,
++ * bfq_create_group_hierarchy is invoked only after all
++ * scheduler data has been initialized, apart from the fields
++ * that can be initialized only after invoking
++ * bfq_create_group_hierarchy. This, in particular, enables
++ * has_work to correctly return false. Of course, to avoid
++ * other inconsistencies, the blk-mq stack must then refrain
++ * from invoking further scheduler hooks before this init
++ * function is finished.
++ */
++ bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
++ if (!bfqd->root_group)
++ goto out_free;
++ bfq_init_root_group(bfqd->root_group, bfqd);
++ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
+
+ return 0;
+
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index bd83f1c02573..2c81c02bccc4 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -20,7 +20,7 @@
+ #include <linux/blk-cgroup.h>
+
+ /* see comments on CONFIG_BFQ_GROUP_IOSCHED in bfq.h */
+-#ifdef CONFIG_BFQ_MQ_GROUP_IOSCHED
++#ifdef CONFIG_MQ_BFQ_GROUP_IOSCHED
+ #define BFQ_GROUP_IOSCHED_ENABLED
+ #endif
+
+
+From 62d12db23ce14d2716b5cff7d2635fbc817b96d0 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 17 Mar 2017 06:15:18 +0100
+Subject: [PATCH 22/51] Remove all get and put of I/O contexts
+
+When a bfq queue is set in service and when it is merged, a reference
+to the I/O context associated with the queue is taken. This reference
+is then released when the queue is deselected from service or
+split. More precisely, the release of the reference is postponed to
+when the scheduler lock is released, to avoid nesting between the
+scheduler and the I/O-context lock. In fact, such nesting would lead
+to deadlocks, because of other code paths that take the same locks in
+the opposite order. This postponing of I/O-context releases does
+complicate code.
+
+This commit addresses this issue by modifying involved operations in
+such a way to not need to get the above I/O-context references any
+more. Then it also removes any get and release of these references.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 2 +-
+ block/bfq-mq-iosched.c | 127 ++++++++------------------------------------
+ block/bfq-mq.h | 11 ----
+ block/bfq-sched.c | 17 ------
+ 4 files changed, 22 insertions(+), 135 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index cf59eeb7f08e..dfacca799b5e 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -773,7 +773,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ bfq_put_async_queues(bfqd, bfqg);
+
+ #ifdef BFQ_MQ
+- bfq_unlock_put_ioc_restore(bfqd, flags);
++ spin_unlock_irqrestore(&bfqd->lock, flags);
+ #endif
+ /*
+ * @blkg is going offline and will be ignored by
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 02a1e7fd0ea4..8e7589d3280f 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -244,34 +244,6 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd)
+ }
+ }
+
+-/*
+- * Next two functions release bfqd->lock and put the io context
+- * pointed by bfqd->ioc_to_put. This delayed put is used to not risk
+- * to take an ioc->lock while the scheduler lock is being held.
+- */
+-static void bfq_unlock_put_ioc(struct bfq_data *bfqd)
+-{
+- struct io_context *ioc_to_put = bfqd->ioc_to_put;
+-
+- bfqd->ioc_to_put = NULL;
+- spin_unlock_irq(&bfqd->lock);
+-
+- if (ioc_to_put)
+- put_io_context(ioc_to_put);
+-}
+-
+-static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd,
+- unsigned long flags)
+-{
+- struct io_context *ioc_to_put = bfqd->ioc_to_put;
+-
+- bfqd->ioc_to_put = NULL;
+- spin_unlock_irqrestore(&bfqd->lock, flags);
+-
+- if (ioc_to_put)
+- put_io_context(ioc_to_put);
+-}
+-
+ #define BFQ_MQ
+ #include "bfq-sched.c"
+ #include "bfq-cgroup-included.c"
+@@ -1747,7 +1719,6 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
+ if (free)
+ blk_mq_free_request(free);
+ bfqd->bio_bfqq_set = false;
+- BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+
+ return ret;
+@@ -1812,7 +1783,6 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ bfq_updated_next_req(bfqd, bfqq);
+ bfq_pos_tree_add_move(bfqd, bfqq);
+ }
+- BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+ }
+ }
+@@ -1858,7 +1828,6 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+
+ bfq_remove_request(q, next);
+
+- BUG_ON(bfqq->bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqq->bfqd->lock);
+ end:
+ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
+@@ -2035,20 +2004,18 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+ * first time that the requests of some process are redirected to
+ * it.
+ *
+- * We redirect bfqq to new_bfqq and not the opposite, because we
+- * are in the context of the process owning bfqq, hence we have
+- * the io_cq of this process. So we can immediately configure this
+- * io_cq to redirect the requests of the process to new_bfqq.
++ * We redirect bfqq to new_bfqq and not the opposite, because
++ * we are in the context of the process owning bfqq, thus we
++ * have the io_cq of this process. So we can immediately
++ * configure this io_cq to redirect the requests of the
++ * process to new_bfqq. In contrast, the io_cq of new_bfqq is
++ * not available any more (new_bfqq->bic == NULL).
+ *
+- * NOTE, even if new_bfqq coincides with the in-service queue, the
+- * io_cq of new_bfqq is not available, because, if the in-service
+- * queue is shared, bfqd->in_service_bic may not point to the
+- * io_cq of the in-service queue.
+- * Redirecting the requests of the process owning bfqq to the
+- * currently in-service queue is in any case the best option, as
+- * we feed the in-service queue with new requests close to the
+- * last request served and, by doing so, hopefully increase the
+- * throughput.
++ * Anyway, even in case new_bfqq coincides with the in-service
++ * queue, redirecting requests the in-service queue is the
++ * best option, as we feed the in-service queue with new
++ * requests close to the last request served and, by doing so,
++ * are likely to increase the throughput.
+ */
+ bfqq->new_bfqq = new_bfqq;
+ new_bfqq->ref += process_refs;
+@@ -2147,13 +2114,13 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ in_service_bfqq = bfqd->in_service_queue;
+
+ if (in_service_bfqq && in_service_bfqq != bfqq &&
+- bfqd->in_service_bic && wr_from_too_long(in_service_bfqq)
++ wr_from_too_long(in_service_bfqq)
+ && likely(in_service_bfqq == &bfqd->oom_bfqq))
+ bfq_log_bfqq(bfqd, bfqq,
+ "would have tried merge with in-service-queue, but wr");
+
+- if (!in_service_bfqq || in_service_bfqq == bfqq ||
+- !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) ||
++ if (!in_service_bfqq || in_service_bfqq == bfqq
++ || wr_from_too_long(in_service_bfqq) ||
+ unlikely(in_service_bfqq == &bfqd->oom_bfqq))
+ goto check_scheduled;
+
+@@ -2214,16 +2181,6 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+ }
+
+-static void bfq_get_bic_reference(struct bfq_queue *bfqq)
+-{
+- /*
+- * If bfqq->bic has a non-NULL value, the bic to which it belongs
+- * is about to begin using a shared bfq_queue.
+- */
+- if (bfqq->bic)
+- atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
+-}
+-
+ static void
+ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+@@ -2280,12 +2237,6 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+ bfqd->wr_busy_queues);
+
+ /*
+- * Grab a reference to the bic, to prevent it from being destroyed
+- * before being possibly touched by a bfq_split_bfqq().
+- */
+- bfq_get_bic_reference(bfqq);
+- bfq_get_bic_reference(new_bfqq);
+- /*
+ * Merge queues (that is, let bic redirect its requests to new_bfqq)
+ */
+ bic_set_bfqq(bic, new_bfqq, 1);
+@@ -2472,16 +2423,10 @@ static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
+ static void bfq_arm_slice_timer(struct bfq_data *bfqd)
+ {
+ struct bfq_queue *bfqq = bfqd->in_service_queue;
+- struct bfq_io_cq *bic;
+ u32 sl;
+
+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
+
+- /* Processes have exited, don't wait. */
+- bic = bfqd->in_service_bic;
+- if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0)
+- return;
+-
+ bfq_mark_bfqq_wait_request(bfqq);
+
+ /*
+@@ -3922,11 +3867,6 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
+ bfq_bfqq_budget_left(bfqq),
+ bfqq->dispatched);
+
+- if (!bfqd->in_service_bic) {
+- atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
+- bfqd->in_service_bic = RQ_BIC(rq);
+- }
+-
+ /*
+ * Expire bfqq, pretending that its budget expired, if bfqq
+ * belongs to CLASS_IDLE and other queues are waiting for
+@@ -4085,7 +4025,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ spin_lock_irq(&bfqd->lock);
+
+ rq = __bfq_dispatch_request(hctx);
+- bfq_unlock_put_ioc(bfqd);
++ spin_unlock_irq(&bfqd->lock);
+
+ return rq;
+ }
+@@ -4184,21 +4124,10 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfqd->lock, flags);
+- BUG_ON(bfqd->ioc_to_put);
+- /*
+- * If the bic is using a shared queue, put the
+- * reference taken on the io_context when the bic
+- * started using a shared bfq_queue. This put cannot
+- * make ioc->ref_count reach 0, then no ioc->lock
+- * risks to be taken (leading to possible deadlock
+- * scenarios).
+- */
+- if (is_sync && bfq_bfqq_coop(bfqq))
+- put_io_context(bic->icq.ioc);
+
+ bfq_exit_bfqq(bfqd, bfqq);
+ bic_set_bfqq(bic, NULL, is_sync);
+- bfq_unlock_put_ioc_restore(bfqd, flags);
++ spin_unlock_irqrestore(&bfqd->lock, flags);
+ }
+ }
+
+@@ -4633,12 +4562,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+
+ spin_lock_irq(&bfqd->lock);
+ if (blk_mq_sched_try_insert_merge(q, rq)) {
+- BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+ return;
+ }
+
+- BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+
+ blk_mq_sched_request_inserted(rq);
+@@ -4671,7 +4598,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ }
+ }
+
+- bfq_unlock_put_ioc(bfqd);
++ spin_unlock_irq(&bfqd->lock);
+ }
+
+ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
+@@ -4864,12 +4791,11 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfqd->lock, flags);
+- BUG_ON(bfqd->ioc_to_put);
+
+ bfq_completed_request(bfqq, bfqd);
+ bfq_put_rq_priv_body(bfqq);
+
+- bfq_unlock_put_ioc_restore(bfqd, flags);
++ spin_unlock_irqrestore(&bfqd->lock, flags);
+ } else {
+ /*
+ * Request rq may be still/already in the scheduler,
+@@ -4992,7 +4918,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
+ &new_queue);
+- BUG_ON(bfqd->ioc_to_put);
+
+ if (unlikely(!new_queue)) {
+ /* If the queue was seeky for too long, break it apart. */
+@@ -5005,14 +4930,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ bic->saved_in_large_burst = true;
+
+ bfqq = bfq_split_bfqq(bic, bfqq);
+- /*
+- * A reference to bic->icq.ioc needs to be
+- * released after a queue split. Do not do it
+- * immediately, to not risk to possibly take
+- * an ioc->lock while holding the scheduler
+- * lock.
+- */
+- bfqd->ioc_to_put = bic->icq.ioc;
+
+ if (!bfqq)
+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
+@@ -5045,7 +4962,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ */
+ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
+ bfqq->bic = bic;
+- if (bfqd->ioc_to_put) { /* if true, then there has been a split */
++ if (split) {
+ /*
+ * The queue has just been split from a shared
+ * queue: restore the idle window and the
+@@ -5059,7 +4976,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ if (unlikely(bfq_bfqq_just_created(bfqq)))
+ bfq_handle_burst(bfqd, bfqq);
+
+- bfq_unlock_put_ioc(bfqd);
++ spin_unlock_irq(&bfqd->lock);
+
+ return 0;
+
+@@ -5077,7 +4994,6 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+
+ BUG_ON(!bfqd);
+ spin_lock_irqsave(&bfqd->lock, flags);
+- BUG_ON(bfqd->ioc_to_put);
+
+ bfq_log_bfqq(bfqd, bfqq, "handling slice_timer expiration");
+ bfq_clear_bfqq_wait_request(bfqq);
+@@ -5108,7 +5024,7 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+ bfq_bfqq_expire(bfqd, bfqq, true, reason);
+
+ schedule_dispatch:
+- bfq_unlock_put_ioc_restore(bfqd, flags);
++ spin_unlock_irqrestore(&bfqd->lock, flags);
+ bfq_schedule_dispatch(bfqd);
+ }
+
+@@ -5186,7 +5102,6 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ spin_lock_irq(&bfqd->lock);
+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
+ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
+- BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+
+ hrtimer_cancel(&bfqd->idle_slice_timer);
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 2c81c02bccc4..36ee24a87dda 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -458,8 +458,6 @@ struct bfq_data {
+
+ /* bfq_queue in service */
+ struct bfq_queue *in_service_queue;
+- /* bfq_io_cq (bic) associated with the @in_service_queue */
+- struct bfq_io_cq *in_service_bic;
+
+ /* on-disk position of the last served request */
+ sector_t last_position;
+@@ -621,15 +619,6 @@ struct bfq_data {
+ struct bfq_queue *bio_bfqq;
+ /* Extra flag used only for TESTING */
+ bool bio_bfqq_set;
+-
+- /*
+- * io context to put right after bfqd->lock is released. This
+- * filed is used to perform put_io_context, when needed, to
+- * after the scheduler lock has been released, and thus
+- * prevent an ioc->lock from being possibly taken while the
+- * scheduler lock is being held.
+- */
+- struct io_context *ioc_to_put;
+ };
+
+ enum bfqq_state_flags {
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 85e59eeb3569..9c4e6797d8c9 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -1904,23 +1904,6 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+ struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
+ struct bfq_entity *entity = in_serv_entity;
+
+- if (bfqd->in_service_bic) {
+-#ifdef BFQ_MQ
+- BUG_ON(bfqd->ioc_to_put);
+- /*
+- * Schedule the release of a reference to
+- * bfqd->in_service_bic->icq.ioc to right after the
+- * scheduler lock is released. This ioc is not
+- * released immediately, to not risk to possibly take
+- * an ioc->lock while holding the scheduler lock.
+- */
+- bfqd->ioc_to_put = bfqd->in_service_bic->icq.ioc;
+-#else
+- put_io_context(bfqd->in_service_bic->icq.ioc);
+-#endif
+- bfqd->in_service_bic = NULL;
+- }
+-
+ bfq_clear_bfqq_wait_request(in_serv_bfqq);
+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
+ bfqd->in_service_queue = NULL;
+
+From 1521ad11f8684cf0a1b7249249cd406fee50da6d Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 29 Mar 2017 18:41:46 +0200
+Subject: [PATCH 23/51] BUGFIX: Remove unneeded and deadlock-causing lock in
+ request_merged
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 8e7589d3280f..bb046335ff4f 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1761,7 +1761,6 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ BUG_ON(RQ_BFQQ(req) != bfqq);
+ elv_rb_add(&bfqq->sort_list, req);
+
+- spin_lock_irq(&bfqd->lock);
+ /* Choose next request to be served for bfqq */
+ prev = bfqq->next_rq;
+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
+@@ -1783,7 +1782,6 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ bfq_updated_next_req(bfqd, bfqq);
+ bfq_pos_tree_add_move(bfqd, bfqq);
+ }
+- spin_unlock_irq(&bfqd->lock);
+ }
+ }
+
+
+From 9136b4c953918ea937254c57cfb787b55b5bc2c6 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 29 Mar 2017 18:55:30 +0200
+Subject: [PATCH 24/51] Fix wrong unlikely
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index bb046335ff4f..3ae9bd424b3f 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4917,7 +4917,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
+ &new_queue);
+
+- if (unlikely(!new_queue)) {
++ if (likely(!new_queue)) {
+ /* If the queue was seeky for too long, break it apart. */
+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
+ BUG_ON(!is_sync);
+
+From 8e05f722f19645f2278f6962368ca3b7c2a81c9c Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 12 May 2017 09:51:18 +0200
+Subject: [PATCH 25/51] Change cgroup params prefix to bfq-mq for bfq-mq
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 54 ++++++++++++++++++++++++++-------------------
+ 1 file changed, 31 insertions(+), 23 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index dfacca799b5e..9e9b0a09e26f 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -995,9 +995,15 @@ bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
+ return blkg_to_bfqg(bfqd->queue->root_blkg);
+ }
+
++#ifdef BFQ_MQ
++#define BFQ_CGROUP_FNAME(param) "bfq-mq."#param
++#else
++#define BFQ_CGROUP_FNAME(param) "bfq."#param
++#endif
++
+ static struct cftype bfq_blkcg_legacy_files[] = {
+ {
+- .name = "bfq.weight",
++ .name = BFQ_CGROUP_FNAME(weight),
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = bfq_io_show_weight,
+ .write_u64 = bfq_io_set_weight_legacy,
+@@ -1005,106 +1011,106 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+
+ /* statistics, covers only the tasks in the bfqg */
+ {
+- .name = "bfq.time",
++ .name = BFQ_CGROUP_FNAME(time),
+ .private = offsetof(struct bfq_group, stats.time),
+ .seq_show = bfqg_print_stat,
+ },
+ {
+- .name = "bfq.sectors",
++ .name = BFQ_CGROUP_FNAME(sectors),
+ .seq_show = bfqg_print_stat_sectors,
+ },
+ {
+- .name = "bfq.io_service_bytes",
++ .name = BFQ_CGROUP_FNAME(io_service_bytes),
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_bytes,
+ },
+ {
+- .name = "bfq.io_serviced",
++ .name = BFQ_CGROUP_FNAME(io_serviced),
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_ios,
+ },
+ {
+- .name = "bfq.io_service_time",
++ .name = BFQ_CGROUP_FNAME(io_service_time),
+ .private = offsetof(struct bfq_group, stats.service_time),
+ .seq_show = bfqg_print_rwstat,
+ },
+ {
+- .name = "bfq.io_wait_time",
++ .name = BFQ_CGROUP_FNAME(io_wait_time),
+ .private = offsetof(struct bfq_group, stats.wait_time),
+ .seq_show = bfqg_print_rwstat,
+ },
+ {
+- .name = "bfq.io_merged",
++ .name = BFQ_CGROUP_FNAME(io_merged),
+ .private = offsetof(struct bfq_group, stats.merged),
+ .seq_show = bfqg_print_rwstat,
+ },
+ {
+- .name = "bfq.io_queued",
++ .name = BFQ_CGROUP_FNAME(io_queued),
+ .private = offsetof(struct bfq_group, stats.queued),
+ .seq_show = bfqg_print_rwstat,
+ },
+
+ /* the same statictics which cover the bfqg and its descendants */
+ {
+- .name = "bfq.time_recursive",
++ .name = BFQ_CGROUP_FNAME(time_recursive),
+ .private = offsetof(struct bfq_group, stats.time),
+ .seq_show = bfqg_print_stat_recursive,
+ },
+ {
+- .name = "bfq.sectors_recursive",
++ .name = BFQ_CGROUP_FNAME(sectors_recursive),
+ .seq_show = bfqg_print_stat_sectors_recursive,
+ },
+ {
+- .name = "bfq.io_service_bytes_recursive",
++ .name = BFQ_CGROUP_FNAME(io_service_bytes_recursive),
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_bytes_recursive,
+ },
+ {
+- .name = "bfq.io_serviced_recursive",
++ .name = BFQ_CGROUP_FNAME(io_serviced_recursive),
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_ios_recursive,
+ },
+ {
+- .name = "bfq.io_service_time_recursive",
++ .name = BFQ_CGROUP_FNAME(io_service_time_recursive),
+ .private = offsetof(struct bfq_group, stats.service_time),
+ .seq_show = bfqg_print_rwstat_recursive,
+ },
+ {
+- .name = "bfq.io_wait_time_recursive",
++ .name = BFQ_CGROUP_FNAME(io_wait_time_recursive),
+ .private = offsetof(struct bfq_group, stats.wait_time),
+ .seq_show = bfqg_print_rwstat_recursive,
+ },
+ {
+- .name = "bfq.io_merged_recursive",
++ .name = BFQ_CGROUP_FNAME(io_merged_recursive),
+ .private = offsetof(struct bfq_group, stats.merged),
+ .seq_show = bfqg_print_rwstat_recursive,
+ },
+ {
+- .name = "bfq.io_queued_recursive",
++ .name = BFQ_CGROUP_FNAME(io_queued_recursive),
+ .private = offsetof(struct bfq_group, stats.queued),
+ .seq_show = bfqg_print_rwstat_recursive,
+ },
+ {
+- .name = "bfq.avg_queue_size",
++ .name = BFQ_CGROUP_FNAME(avg_queue_size),
+ .seq_show = bfqg_print_avg_queue_size,
+ },
+ {
+- .name = "bfq.group_wait_time",
++ .name = BFQ_CGROUP_FNAME(group_wait_time),
+ .private = offsetof(struct bfq_group, stats.group_wait_time),
+ .seq_show = bfqg_print_stat,
+ },
+ {
+- .name = "bfq.idle_time",
++ .name = BFQ_CGROUP_FNAME(idle_time),
+ .private = offsetof(struct bfq_group, stats.idle_time),
+ .seq_show = bfqg_print_stat,
+ },
+ {
+- .name = "bfq.empty_time",
++ .name = BFQ_CGROUP_FNAME(empty_time),
+ .private = offsetof(struct bfq_group, stats.empty_time),
+ .seq_show = bfqg_print_stat,
+ },
+ {
+- .name = "bfq.dequeue",
++ .name = BFQ_CGROUP_FNAME(dequeue),
+ .private = offsetof(struct bfq_group, stats.dequeue),
+ .seq_show = bfqg_print_stat,
+ },
+@@ -1113,7 +1119,7 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+
+ static struct cftype bfq_blkg_files[] = {
+ {
+- .name = "bfq.weight",
++ .name = BFQ_CGROUP_FNAME(weight),
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = bfq_io_show_weight,
+ .write = bfq_io_set_weight,
+@@ -1121,6 +1127,8 @@ static struct cftype bfq_blkg_files[] = {
+ {} /* terminate */
+ };
+
++#undef BFQ_CGROUP_FNAME
++
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg,
+
+From abdf7565dadbb00e78be5f4fb2cc9b157649840e Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 12 May 2017 11:56:13 +0200
+Subject: [PATCH 26/51] Add tentative extra tests on groups, reqs and queues
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 1 +
+ block/bfq-mq-iosched.c | 5 +++++
+ include/linux/blkdev.h | 2 ++
+ 3 files changed, 8 insertions(+)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 9e9b0a09e26f..72107ad12220 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -412,6 +412,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
+ BUG_ON(!blkg);
+ bfqg = blkg_to_bfqg(blkg);
+ bfqd = blkg->q->elevator->elevator_data;
++ BUG_ON(bfqg == bfqd->root_group);
+ entity = &bfqg->entity;
+ d = blkcg_to_bfqgd(blkg->blkcg);
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 3ae9bd424b3f..a9e3406fef06 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4494,6 +4494,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
++ BUG_ON(!bfqq);
+
+ assert_spin_locked(&bfqd->lock);
+
+@@ -4587,6 +4588,9 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ "insert_request %p in disp: at_head %d",
+ rq, at_head);
+ } else {
++ BUG_ON(!(rq->rq_flags & RQF_GOT));
++ rq->rq_flags &= ~RQF_GOT;
++
+ __bfq_insert_request(bfqd, rq);
+
+ if (rq_mergeable(rq)) {
+@@ -4974,6 +4978,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ if (unlikely(bfq_bfqq_just_created(bfqq)))
+ bfq_handle_burst(bfqd, bfqq);
+
++ rq->rq_flags |= RQF_GOT;
+ spin_unlock_irq(&bfqd->lock);
+
+ return 0;
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 0048e59e6d07..9ae814743095 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -123,6 +123,8 @@ typedef __u32 __bitwise req_flags_t;
+ #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
+ /* DEBUG: rq in bfq-mq dispatch list */
+ #define RQF_DISP_LIST ((__force req_flags_t)(1 << 19))
++/* DEBUG: rq had get_rq_private executed on it */
++#define RQF_GOT ((__force req_flags_t)(1 << 20))
+
+ /* flags that prevent us from merging requests: */
+ #define RQF_NOMERGE_FLAGS \
+
+From 9e1c1514bc947c4e04502331372b1cc58459d8d1 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 15 May 2017 22:25:03 +0200
+Subject: [PATCH 27/51] block, bfq-mq: access and cache blkg data only when
+ safe
+
+In blk-cgroup, operations on blkg objects are protected with the
+request_queue lock. This is no more the lock that protects
+I/O-scheduler operations in blk-mq. In fact, the latter are now
+protected with a finer-grained per-scheduler-instance lock. As a
+consequence, although blkg lookups are also rcu-protected, blk-mq I/O
+schedulers may see inconsistent data when they access blkg and
+blkg-related objects. BFQ does access these objects, and does incur
+this problem, in the following case.
+
+The blkg_lookup performed in bfq_get_queue, being protected (only)
+through rcu, may happen to return the address of a copy of the
+original blkg. If this is the case, then the blkg_get performed in
+bfq_get_queue, to pin down the blkg, is useless: it does not prevent
+blk-cgroup code from destroying both the original blkg and all objects
+directly or indirectly referred by the copy of the blkg. BFQ accesses
+these objects, which typically causes a crash for NULL-pointer
+dereference of memory-protection violation.
+
+Some additional protection mechanism should be added to blk-cgroup to
+address this issue. In the meantime, this commit provides a quick
+temporary fix for BFQ: cache (when safe) blkg data that might
+disappear right after a blkg_lookup.
+
+In particular, this commit exploits the following facts to achieve its
+goal without introducing further locks. Destroy operations on a blkg
+invoke, as a first step, hooks of the scheduler associated with the
+blkg. And these hooks are executed with bfqd->lock held for BFQ. As a
+consequence, for any blkg associated with the request queue an
+instance of BFQ is attached to, we are guaranteed that such a blkg is
+not destroyed, and that all the pointers it contains are consistent,
+while that instance is holding its bfqd->lock. A blkg_lookup performed
+with bfqd->lock held then returns a fully consistent blkg, which
+remains consistent until this lock is held. In more detail, this holds
+even if the returned blkg is a copy of the original one.
+
+Finally, also the object describing a group inside BFQ needs to be
+protected from destruction on the blkg_free of the original blkg
+(which invokes bfq_pd_free). This commit adds private refcounting for
+this object, to let it disappear only after no bfq_queue refers to it
+any longer.
+
+This commit also removes or updates some stale comments on locking
+issues related to blk-cgroup operations.
+
+Reported-by: Tomas Konir <tomas.konir@gmail.com>
+Reported-by: Lee Tibbert <lee.tibbert@gmail.com>
+Reported-by: Marco Piazza <mpiazza@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Tested-by: Tomas Konir <tomas.konir@gmail.com>
+Tested-by: Lee Tibbert <lee.tibbert@gmail.com>
+Tested-by: Marco Piazza <mpiazza@gmail.com>
+---
+ block/bfq-cgroup-included.c | 149 ++++++++++++++++++++++++++++++++++++++++----
+ block/bfq-mq-iosched.c | 2 +-
+ block/bfq-mq.h | 26 +++-----
+ 3 files changed, 148 insertions(+), 29 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 72107ad12220..d903393ee78a 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -43,7 +43,11 @@ BFQG_FLAG_FNS(idling)
+ BFQG_FLAG_FNS(empty)
+ #undef BFQG_FLAG_FNS
+
++#ifdef BFQ_MQ
++/* This should be called with the scheduler lock held. */
++#else
+ /* This should be called with the queue_lock held. */
++#endif
+ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
+ {
+ unsigned long long now;
+@@ -58,7 +62,11 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
+ bfqg_stats_clear_waiting(stats);
+ }
+
++#ifdef BFQ_MQ
++/* This should be called with the scheduler lock held. */
++#else
+ /* This should be called with the queue_lock held. */
++#endif
+ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
+ struct bfq_group *curr_bfqg)
+ {
+@@ -72,7 +80,11 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
+ bfqg_stats_mark_waiting(stats);
+ }
+
++#ifdef BFQ_MQ
++/* This should be called with the scheduler lock held. */
++#else
+ /* This should be called with the queue_lock held. */
++#endif
+ static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
+ {
+ unsigned long long now;
+@@ -198,14 +210,43 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
+
+ static void bfqg_get(struct bfq_group *bfqg)
+ {
+- return blkg_get(bfqg_to_blkg(bfqg));
++#ifdef BFQ_MQ
++ bfqg->ref++;
++#else
++ blkg_get(bfqg_to_blkg(bfqg));
++#endif
+ }
+
+ static void bfqg_put(struct bfq_group *bfqg)
+ {
+- return blkg_put(bfqg_to_blkg(bfqg));
++#ifdef BFQ_MQ
++ bfqg->ref--;
++
++ BUG_ON(bfqg->ref < 0);
++ if (bfqg->ref == 0)
++ kfree(bfqg);
++#else
++ blkg_put(bfqg_to_blkg(bfqg));
++#endif
++}
++
++#ifdef BFQ_MQ
++static void bfqg_and_blkg_get(struct bfq_group *bfqg)
++{
++ /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
++ bfqg_get(bfqg);
++
++ blkg_get(bfqg_to_blkg(bfqg));
+ }
+
++static void bfqg_and_blkg_put(struct bfq_group *bfqg)
++{
++ bfqg_put(bfqg);
++
++ blkg_put(bfqg_to_blkg(bfqg));
++}
++#endif
++
+ static void bfqg_stats_update_io_add(struct bfq_group *bfqg,
+ struct bfq_queue *bfqq,
+ unsigned int op)
+@@ -310,7 +351,15 @@ static void bfq_init_entity(struct bfq_entity *entity,
+ if (bfqq) {
+ bfqq->ioprio = bfqq->new_ioprio;
+ bfqq->ioprio_class = bfqq->new_ioprio_class;
++#ifdef BFQ_MQ
++ /*
++ * Make sure that bfqg and its associated blkg do not
++ * disappear before entity.
++ */
++ bfqg_and_blkg_get(bfqg);
++#else
+ bfqg_get(bfqg);
++#endif
+ }
+ entity->parent = bfqg->my_entity; /* NULL for root group */
+ entity->sched_data = &bfqg->sched_data;
+@@ -397,6 +446,10 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
+ return NULL;
+ }
+
++#ifdef BFQ_MQ
++ /* see comments in bfq_bic_update_cgroup for why refcounting */
++ bfqg_get(bfqg);
++#endif
+ return &bfqg->pd;
+ }
+
+@@ -432,7 +485,11 @@ static void bfq_pd_free(struct blkg_policy_data *pd)
+ struct bfq_group *bfqg = pd_to_bfqg(pd);
+
+ bfqg_stats_exit(&bfqg->stats);
+- return kfree(bfqg);
++#ifdef BFQ_MQ
++ bfqg_put(bfqg);
++#else
++ kfree(bfqg);
++#endif
+ }
+
+ static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
+@@ -516,9 +573,16 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
+ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
+ * it on the new one. Avoid putting the entity on the old group idle tree.
+ *
++#ifdef BFQ_MQ
++ * Must be called under the scheduler lock, to make sure that the blkg
++ * owning @bfqg does not disappear (see comments in
++ * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
++ * objects).
++#else
+ * Must be called under the queue lock; the cgroup owning @bfqg must
+ * not disappear (by now this just means that we are called under
+ * rcu_read_lock()).
++#endif
+ */
+ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ struct bfq_group *bfqg)
+@@ -555,16 +619,20 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ entity->tree);
+ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
+ }
++#ifdef BFQ_MQ
++ bfqg_and_blkg_put(bfqq_group(bfqq));
++#else
+ bfqg_put(bfqq_group(bfqq));
++#endif
+
+- /*
+- * Here we use a reference to bfqg. We don't need a refcounter
+- * as the cgroup reference will not be dropped, so that its
+- * destroy() callback will not be invoked.
+- */
+ entity->parent = bfqg->my_entity;
+ entity->sched_data = &bfqg->sched_data;
++#ifdef BFQ_MQ
++ /* pin down bfqg and its associated blkg */
++ bfqg_and_blkg_get(bfqg);
++#else
+ bfqg_get(bfqg);
++#endif
+
+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_busy(bfqq));
+ if (bfq_bfqq_busy(bfqq)) {
+@@ -585,8 +653,14 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * @bic: the bic to move.
+ * @blkcg: the blk-cgroup to move to.
+ *
++#ifdef BFQ_MQ
++ * Move bic to blkcg, assuming that bfqd->lock is held; which makes
++ * sure that the reference to cgroup is valid across the call (see
++ * comments in bfq_bic_update_cgroup on this issue)
++#else
+ * Move bic to blkcg, assuming that bfqd->queue is locked; the caller
+ * has to make sure that the reference to cgroup is valid across the call.
++#endif
+ *
+ * NOTE: an alternative approach might have been to store the current
+ * cgroup in bfqq and getting a reference to it, reducing the lookup
+@@ -645,6 +719,59 @@ static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
+ goto out;
+
+ bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
++#ifdef BFQ_MQ
++ /*
++ * Update blkg_path for bfq_log_* functions. We cache this
++ * path, and update it here, for the following
++ * reasons. Operations on blkg objects in blk-cgroup are
++ * protected with the request_queue lock, and not with the
++ * lock that protects the instances of this scheduler
++ * (bfqd->lock). This exposes BFQ to the following sort of
++ * race.
++ *
++ * The blkg_lookup performed in bfq_get_queue, protected
++ * through rcu, may happen to return the address of a copy of
++ * the original blkg. If this is the case, then the
++ * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
++ * the blkg, is useless: it does not prevent blk-cgroup code
++ * from destroying both the original blkg and all objects
++ * directly or indirectly referred by the copy of the
++ * blkg.
++ *
++ * On the bright side, destroy operations on a blkg invoke, as
++ * a first step, hooks of the scheduler associated with the
++ * blkg. And these hooks are executed with bfqd->lock held for
++ * BFQ. As a consequence, for any blkg associated with the
++ * request queue this instance of the scheduler is attached
++ * to, we are guaranteed that such a blkg is not destroyed, and
++ * that all the pointers it contains are consistent, while we
++ * are holding bfqd->lock. A blkg_lookup performed with
++ * bfqd->lock held then returns a fully consistent blkg, which
++ * remains consistent until this lock is held.
++ *
++ * Thanks to the last fact, and to the fact that: (1) bfqg has
++ * been obtained through a blkg_lookup in the above
++ * assignment, and (2) bfqd->lock is being held, here we can
++ * safely use the policy data for the involved blkg (i.e., the
++ * field bfqg->pd) to get to the blkg associated with bfqg,
++ * and then we can safely use any field of blkg. After we
++ * release bfqd->lock, even just getting blkg through this
++ * bfqg may cause dangling references to be traversed, as
++ * bfqg->pd may not exist any more.
++ *
++ * In view of the above facts, here we cache, in the bfqg, any
++ * blkg data we may need for this bic, and for its associated
++ * bfq_queue. As of now, we need to cache only the path of the
++ * blkg, which is used in the bfq_log_* functions.
++ *
++ * Finally, note that bfqg itself needs to be protected from
++ * destruction on the blkg_free of the original blkg (which
++ * invokes bfq_pd_free). We use an additional private
++ * refcounter for bfqg, to let it disappear only after no
++ * bfq_queue refers to it any longer.
++ */
++ blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
++#endif
+ bic->blkcg_serial_nr = serial_nr;
+ out:
+ rcu_read_unlock();
+@@ -682,8 +809,6 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
+ * @bfqd: the device data structure with the root group.
+ * @bfqg: the group to move from.
+ * @st: the service tree with the entities.
+- *
+- * Needs queue_lock to be taken and reference to be valid over the call.
+ */
+ static void bfq_reparent_active_entities(struct bfq_data *bfqd,
+ struct bfq_group *bfqg,
+@@ -736,6 +861,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ #ifdef BFQ_MQ
+ spin_lock_irqsave(&bfqd->lock, flags);
+ #endif
++
+ /*
+ * Empty all service_trees belonging to this group before
+ * deactivating the group itself.
+@@ -746,8 +872,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ /*
+ * The idle tree may still contain bfq_queues belonging
+ * to exited task because they never migrated to a different
+- * cgroup from the one being destroyed now. No one else
+- * can access them so it's safe to act without any lock.
++ * cgroup from the one being destroyed now.
+ */
+ bfq_flush_idle_tree(st);
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index a9e3406fef06..4eb668eeacdc 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4073,7 +4073,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+
+ kmem_cache_free(bfq_pool, bfqq);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+- bfqg_put(bfqg);
++ bfqg_and_blkg_put(bfqg);
+ #endif
+ }
+
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 36ee24a87dda..77ab0f22ed22 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -695,23 +695,17 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+- char __pbuf[128]; \
+- \
+- blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
+ pr_crit("%s bfq%d%c %s " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+ (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- __pbuf, ##args); \
++ bfqq_group(bfqq)->blkg_path, ##args); \
+ } while (0)
+
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
+- char __pbuf[128]; \
+- \
+- blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
+ pr_crit("%s %s " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+- __pbuf, ##args); \
++ bfqg->blkg_path, ##args); \
+ } while (0)
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+@@ -736,20 +730,14 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+- char __pbuf[128]; \
+- \
+- blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \
+ (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- __pbuf, ##args); \
++ bfqq_group(bfqq)->blkg_path, ##args); \
+ } while (0)
+
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
+- char __pbuf[128]; \
+- \
+- blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
+- blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
++ blk_add_trace_msg((bfqd)->queue, "%s " fmt, bfqg->blkg_path, ##args);\
+ } while (0)
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+@@ -860,6 +848,12 @@ struct bfq_group {
+ /* must be the first member */
+ struct blkg_policy_data pd;
+
++ /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
++ char blkg_path[128];
++
++ /* reference counter (see comments in bfq_bic_update_cgroup) */
++ int ref;
++
+ struct bfq_entity entity;
+ struct bfq_sched_data sched_data;
+
+
+From c9137b749aceef6c2dde88e99b2fc978d5952e76 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Sat, 17 Jun 2017 11:18:11 +0200
+Subject: [PATCH 28/51] bfq-mq: fix macro name in conditional invocation of
+ policy_unregister
+
+This commit fixes the name of the macro in the conditional group that
+invokes blkcg_policy_unregister in bfq_exit for bfq-mq. Because of
+this error, blkcg_policy_unregister was never invoked.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 4eb668eeacdc..bc1de3f70ea8 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -5669,7 +5669,7 @@ static int __init bfq_init(void)
+ static void __exit bfq_exit(void)
+ {
+ elv_unregister(&iosched_bfq_mq);
+-#ifdef CONFIG_BFQ_GROUP_ENABLED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+ #endif
+ bfq_slab_kill();
+
+From c7ceb37496f63b2dba4d06946ab85ec97b87bfb5 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 5 Jul 2017 11:48:17 +0200
+Subject: [PATCH 29/51] Port of "blk-mq-sched: unify request finished methods"
+
+No need to have two different callouts of bfq vs kyber.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+---
+ block/bfq-mq-iosched.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index bc1de3f70ea8..2598602a0b10 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4753,7 +4753,7 @@ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
+ bfq_put_queue(bfqq);
+ }
+
+-static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
++static void bfq_finish_request(struct request *rq)
+ {
+ struct bfq_queue *bfqq;
+ struct bfq_data *bfqd;
+@@ -4814,7 +4814,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+
+ assert_spin_locked(&bfqd->lock);
+ if (!RB_EMPTY_NODE(&rq->rb_node))
+- bfq_remove_request(q, rq);
++ bfq_remove_request(rq->q, rq);
+ bfq_put_rq_priv_body(bfqq);
+ }
+
+@@ -5558,7 +5558,7 @@ static struct elv_fs_entry bfq_attrs[] = {
+ static struct elevator_type iosched_bfq_mq = {
+ .ops.mq = {
+ .get_rq_priv = bfq_get_rq_private,
+- .put_rq_priv = bfq_put_rq_private,
++ .finish_request = bfq_finish_request,
+ .exit_icq = bfq_exit_icq,
+ .insert_requests = bfq_insert_requests,
+ .dispatch_request = bfq_dispatch_request,
+
+From 12bef026fe114ab5e2e284772ddc52a8be83fdbc Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 5 Jul 2017 11:54:57 +0200
+Subject: [PATCH 30/51] Port of "bfq-iosched: fix NULL ioc check in
+ bfq_get_rq_private"
+
+icq_to_bic is a container_of operation, so we need to check for NULL
+before it. Also move the check outside the spinlock while we're at
+it.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+---
+ block/bfq-mq-iosched.c | 15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 2598602a0b10..c57774a60911 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4903,16 +4903,17 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+ {
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+- struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
++ struct bfq_io_cq *bic;
+ const int is_sync = rq_is_sync(rq);
+ struct bfq_queue *bfqq;
+ bool bfqq_already_existing = false, split = false;
+ bool new_queue = false;
+
+- spin_lock_irq(&bfqd->lock);
++ if (!rq->elv.icq)
++ return 1;
++ bic = icq_to_bic(rq->elv.icq);
+
+- if (!bic)
+- goto queue_fail;
++ spin_lock_irq(&bfqd->lock);
+
+ bfq_check_ioprio_change(bic, bio);
+
+@@ -4980,13 +4981,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ rq->rq_flags |= RQF_GOT;
+ spin_unlock_irq(&bfqd->lock);
+-
+ return 0;
+-
+-queue_fail:
+- spin_unlock_irq(&bfqd->lock);
+-
+- return 1;
+ }
+
+ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+
+From 633e5711347df1bf4ca935fd0aa9118a0054f75d Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 5 Jul 2017 12:02:16 +0200
+Subject: [PATCH 31/51] Port of "blk-mq-sched: unify request prepare methods"
+
+This patch makes sure we always allocate requests in the core blk-mq
+code and use a common prepare_request method to initialize them for
+both mq I/O schedulers. For Kyber and additional limit_depth method
+is added that is called before allocating the request.
+
+Also because none of the intializations can really fail the new method
+does not return an error - instead the bfq finish method is hardened
+to deal with the no-IOC case.
+
+Last but not least this removes the abuse of RQF_QUEUE by the blk-mq
+scheduling code as RQF_ELFPRIV is all that is needed now.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+---
+ block/bfq-mq-iosched.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index c57774a60911..49ffca1ad6e7 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4760,6 +4760,10 @@ static void bfq_finish_request(struct request *rq)
+ struct bfq_io_cq *bic;
+
+ BUG_ON(!rq);
++
++ if (!rq->elv.icq)
++ return;
++
+ bfqq = RQ_BFQQ(rq);
+ BUG_ON(!bfqq);
+
+@@ -4899,9 +4903,9 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
+ /*
+ * Allocate bfq data structures associated with this request.
+ */
+-static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+- struct bio *bio)
++static void bfq_prepare_request(struct request *rq, struct bio *bio)
+ {
++ struct request_queue *q = rq->q;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_io_cq *bic;
+ const int is_sync = rq_is_sync(rq);
+@@ -4910,7 +4914,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ bool new_queue = false;
+
+ if (!rq->elv.icq)
+- return 1;
++ return;
+ bic = icq_to_bic(rq->elv.icq);
+
+ spin_lock_irq(&bfqd->lock);
+@@ -4981,7 +4985,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ rq->rq_flags |= RQF_GOT;
+ spin_unlock_irq(&bfqd->lock);
+- return 0;
+ }
+
+ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+@@ -5552,7 +5555,7 @@ static struct elv_fs_entry bfq_attrs[] = {
+
+ static struct elevator_type iosched_bfq_mq = {
+ .ops.mq = {
+- .get_rq_priv = bfq_get_rq_private,
++ .prepare_request = bfq_prepare_request,
+ .finish_request = bfq_finish_request,
+ .exit_icq = bfq_exit_icq,
+ .insert_requests = bfq_insert_requests,
+
+From 5a321acfce282c3e58ac63582faf6f928ad17f27 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 5 Jul 2017 12:43:22 +0200
+Subject: [PATCH 32/51] Add list of bfq instances to documentation
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ Documentation/block/bfq-iosched.txt | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index 3d6951d63489..8ce6b9a9bacd 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -11,6 +11,15 @@ controllers), BFQ's main features are:
+ groups (switching back to time distribution when needed to keep
+ throughput high).
+
++If bfq-mq patches have been applied, then the following three
++instances of BFQ are available (otherwise only the first instance):
++- bfq: mainline version of BFQ, for blk-mq
++- bfq-mq: development version of BFQ for blk-mq; this version contains
++ also all latest features not yet landed in mainline, plus many
++ safety checks
++- bfq: BFQ for legacy blk; also this version contains both latest
++ features and safety checks
++
+ In its default configuration, BFQ privileges latency over
+ throughput. So, when needed for achieving a lower latency, BFQ builds
+ schedules that may lead to a lower throughput. If your main or only
+@@ -27,7 +36,7 @@ sequential I/O (e.g., 8-12 GB/s if I/O requests are 256 KB large), and
+ to 120-200 MB/s with 4KB random I/O. BFQ is currently being tested on
+ multi-queue devices too.
+
+-The table of contents follow. Impatients can just jump to Section 3.
++The table of contents follows. Impatients can just jump to Section 3.
+
+ CONTENTS
+
+
+From 9f2e5b27227fd9254cc258572dc2d4531838c30b Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 5 Jul 2017 16:28:00 +0200
+Subject: [PATCH 33/51] bfq-sq: fix prefix of names of cgroups parameters
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ Documentation/block/bfq-iosched.txt | 12 +++++++-----
+ block/bfq-cgroup-included.c | 2 +-
+ 2 files changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index 8ce6b9a9bacd..965d82f94db9 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -503,10 +503,12 @@ To get proportional sharing of bandwidth with BFQ for a given device,
+ BFQ must of course be the active scheduler for that device.
+
+ Within each group directory, the names of the files associated with
+-BFQ-specific cgroup parameters and stats begin with the "bfq."
+-prefix. So, with cgroups-v1 or cgroups-v2, the full prefix for
+-BFQ-specific files is "blkio.bfq." or "io.bfq." For example, the group
+-parameter to set the weight of a group with BFQ is blkio.bfq.weight
++BFQ-specific cgroup parameters and stats begin with the "bfq.",
++"bfq-sq." or "bfq-mq." prefix, depending on which instance of bfq you
++want to use. So, with cgroups-v1 or cgroups-v2, the full prefix for
++BFQ-specific files is "blkio.bfqX." or "io.bfqX.", where X can be ""
++(i.e., null string), "-sq" or "-mq". For example, the group parameter
++to set the weight of a group with the mainline BFQ is blkio.bfq.weight
+ or io.bfq.weight.
+
+ Parameters to set
+@@ -514,7 +516,7 @@ Parameters to set
+
+ For each group, there is only the following parameter to set.
+
+-weight (namely blkio.bfq.weight or io.bfq-weight): the weight of the
++weight (namely blkio.bfqX.weight or io.bfqX.weight): the weight of the
+ group inside its parent. Available values: 1..10000 (default 100). The
+ linear mapping between ioprio and weights, described at the beginning
+ of the tunable section, is still valid, but all weights higher than
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index d903393ee78a..631e53d9150d 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -1124,7 +1124,7 @@ bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
+ #ifdef BFQ_MQ
+ #define BFQ_CGROUP_FNAME(param) "bfq-mq."#param
+ #else
+-#define BFQ_CGROUP_FNAME(param) "bfq."#param
++#define BFQ_CGROUP_FNAME(param) "bfq-sq."#param
+ #endif
+
+ static struct cftype bfq_blkcg_legacy_files[] = {
+
+From 92b42df8166939ccf26aa450125b5b575cf6d505 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 5 Jul 2017 21:08:32 +0200
+Subject: [PATCH 34/51] Add to documentation that bfq-mq and bfq-sq contain
+ last fixes too
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ Documentation/block/bfq-iosched.txt | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index 965d82f94db9..0e59f1c9d30e 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -15,10 +15,10 @@ If bfq-mq patches have been applied, then the following three
+ instances of BFQ are available (otherwise only the first instance):
+ - bfq: mainline version of BFQ, for blk-mq
+ - bfq-mq: development version of BFQ for blk-mq; this version contains
+- also all latest features not yet landed in mainline, plus many
++ also all latest features and fixes not yet landed in mainline, plus many
+ safety checks
+-- bfq: BFQ for legacy blk; also this version contains both latest
+- features and safety checks
++- bfq: BFQ for legacy blk; also this version contains latest features
++ and fixes, as well as safety checks
+
+ In its default configuration, BFQ privileges latency over
+ throughput. So, when needed for achieving a lower latency, BFQ builds
+
+From 7f9bdd433b848d4f53c167258bf4d0b3f1ae1923 Mon Sep 17 00:00:00 2001
+From: Lee Tibbert <lee.tibbert@gmail.com>
+Date: Wed, 19 Jul 2017 10:28:32 -0400
+Subject: [PATCH 35/51] Improve most frequently used no-logging path
+
+This patch originated as a fix for compiler unused-variable warnings
+issued when compiling bfq-mq with logging disabled (both
+CONFIG_BLK_DEV_IO_TRACE and CONFIG_BFQ_REDIRECT_TO_CONSOLE
+undefined).
+
+It turns out to also have benefits for the bfq-sq path as well.
+
+In most performance sensitive production builds blktrace_api logging
+will probably be turned off, so it is worth making the no-logging path
+compile without warnings. Any performance benefit is a bonus.
+
+Thank you to T. B. on the bfq-iosched@googlegroups.com list
+for ((void) (bfqq)) simplification/suggestion/improvement. All bugs
+and unclear descriptions are my own doing.
+
+The discussion below is based on the gcc compiler with optimization
+level of at least 02. Lower optimization levels are unlikely to
+remove no-op instruction equivalents.
+
+Provide three improvements in this likely case.
+
+ 1) Fix multiple occurrences of an unused-variable warning
+ issued when compiling bfq-mq with no logging. The warning
+ occurred each time the bfq_log_bfqg macro was expanded inside
+ a code block such as the following snippet from
+ block/bfq-sched.c, line 139 and few following, lightly edited for
+ indentation in order to pass checkpatch.pl maximum line lengths.
+
+else {
+ struct bfq_group *bfqg =
+ container_of(next_in_service,
+ struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+ "update_next_in_service: chosen this entity");
+ }
+
+ Previously bfq-mq.h expanded bfq_log_bfqg to blk_add_trace_msg.
+ When both bfq console logging and blktrace_api logging are
+ disabled, include/linux/blktrace_api expands to
+ do { } while (0), leaving the code block local variable unused.
+
+ bfq_log_bfqq() had similar behavior but is never called with
+ a potentially unused variable. This patch fixes that macro for
+ consistency.
+
+ bfq-sq.h (single queue) with blktrace_api enabled, and the bfq
+ console logging macros have code paths which not trigger this
+ warning.
+
+ kernel.org (4.12 & 4.13) bfq (bfq-iosched.h) could trigger
+ the warning but no code does so now. This patch fixes
+ bfq-iosched.h for consistency.
+
+ The style above enables a software engineering approach where
+ complex expressions are moved to a local variable before the
+ bfq_log* call. This makes it easier to read the expression and
+ use breakpoints to verify it. bfq-mq uses this approach in
+ several places.
+
+ New bfq_log* macros are provided for the no-logging case.
+ I touch only the second argument, because current code never
+ uses the local variable approach with the first or other
+ arguments. I tried to balance consistency with simplicity.
+
+ 2) For bfq-sq, reduce to zero, the number of instructions executed
+ when no logging is configured. No sense marshaling arguments
+ which are never going to be used.
+
+ On a trial V8R11 builds, this reduced the size of bfq-iosched.o
+ by 14.3 KiB. The size went from 70304 to 55664 bytes.
+
+ bfq-mq and kernel.org bfq code size does not change because
+ existing macros already optimize to zero bytes when not logging.
+ The current changes maintains consistency with the bfq-sq path
+ and makes the bfq-mq & bfq no-logging paths resistant to future
+ logging path macro changes which might cause generated code.
+
+ 3) Slightly reduce compile time of all bfq variants by including
+ blktrace_api.h only when it will be used.
+
+Signed-off-by: Lee Tibbert <lee.tibbert@gmail.com>
+---
+ block/bfq-mq.h | 18 +++++++++++++++++-
+ block/bfq.h | 18 +++++++++++++++++-
+ 2 files changed, 34 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 77ab0f22ed22..7ed2cc29be57 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -15,7 +15,6 @@
+ #ifndef _BFQ_H
+ #define _BFQ_H
+
+-#include <linux/blktrace_api.h>
+ #include <linux/hrtimer.h>
+ #include <linux/blk-cgroup.h>
+
+@@ -725,6 +724,21 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ ##args)
+
+ #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
++
++#if !defined(CONFIG_BLK_DEV_IO_TRACE)
++
++/* Avoid possible "unused-variable" warning. See commit message. */
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) ((void) (bfqq))
++
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) ((void) (bfqg))
++
++#define bfq_log(bfqd, fmt, args...) do {} while (0)
++
++#else /* CONFIG_BLK_DEV_IO_TRACE */
++
++#include <linux/blktrace_api.h>
++
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+@@ -752,6 +766,8 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+ #define bfq_log(bfqd, fmt, args...) \
+ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++
++#endif /* CONFIG_BLK_DEV_IO_TRACE */
+ #endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+
+ /* Expiration reasons. */
+diff --git a/block/bfq.h b/block/bfq.h
+index 53954d1b87f8..15d326f466b7 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -15,7 +15,6 @@
+ #ifndef _BFQ_H
+ #define _BFQ_H
+
+-#include <linux/blktrace_api.h>
+ #include <linux/hrtimer.h>
+ #include <linux/blk-cgroup.h>
+
+@@ -725,6 +724,21 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ ##args)
+
+ #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
++
++#if !defined(CONFIG_BLK_DEV_IO_TRACE)
++
++/* Avoid possible "unused-variable" warning. See commit message. */
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) ((void) (bfqq))
++
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) ((void) (bfqg))
++
++#define bfq_log(bfqd, fmt, args...) do {} while (0)
++
++#else /* CONFIG_BLK_DEV_IO_TRACE */
++
++#include <linux/blktrace_api.h>
++
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+@@ -759,6 +773,8 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+ #define bfq_log(bfqd, fmt, args...) \
+ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++
++#endif /* CONFIG_BLK_DEV_IO_TRACE */
+ #endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+
+ /* Expiration reasons. */
+
+From f11a0e751e741bf94c6a48234824d50b3c0100ad Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 9 Aug 2017 16:40:39 +0200
+Subject: [PATCH 36/51] bfq-sq: fix commit "Remove all get and put of I/O
+ contexts" in branch bfq-mq
+
+The commit "Remove all get and put of I/O contexts" erroneously removed
+the reset of the field in_service_bic for bfq-sq. This commit re-adds
+that missing reset.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-sched.c | 7 +++++++
+ block/bfq-sq-iosched.c | 1 +
+ 2 files changed, 8 insertions(+)
+
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 9c4e6797d8c9..7425824c26b8 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -1904,6 +1904,13 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+ struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
+ struct bfq_entity *entity = in_serv_entity;
+
++#ifndef BFQ_MQ
++ if (bfqd->in_service_bic) {
++ put_io_context(bfqd->in_service_bic->icq.ioc);
++ bfqd->in_service_bic = NULL;
++ }
++#endif
++
+ bfq_clear_bfqq_wait_request(in_serv_bfqq);
+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
+ bfqd->in_service_queue = NULL;
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 25da0d1c0622..e1960bf149d8 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -3765,6 +3765,7 @@ static int bfq_dispatch_request(struct bfq_data *bfqd,
+ if (!bfqd->in_service_bic) {
+ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
+ bfqd->in_service_bic = RQ_BIC(rq);
++ BUG_ON(!bfqd->in_service_bic);
+ }
+
+ if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
+
+From eceae5457530df8598557767d7be258ca9384de4 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 9 Aug 2017 22:29:01 +0200
+Subject: [PATCH 37/51] bfq-sq-mq: make lookup_next_entity push up vtime on
+ expirations
+
+To provide a very smooth service, bfq starts to serve a bfq_queue
+only if the queue is 'eligible', i.e., if the same queue would
+have started to be served in the ideal, perfectly fair system that
+bfq simulates internally. This is obtained by associating each
+queue with a virtual start time, and by computing a special system
+virtual time quantity: a queue is eligible only if the system
+virtual time has reached the virtual start time of the
+queue. Finally, bfq guarantees that, when a new queue must be set
+in service, there is always at least one eligible entity for each
+active parent entity in the scheduler. To provide this guarantee,
+the function __bfq_lookup_next_entity pushes up, for each parent
+entity on which it is invoked, the system virtual time to the
+minimum among the virtual start times of the entities in the
+active tree for the parent entity (more precisely, the push up
+occurs if the system virtual time happens to be lower than all
+such virtual start times).
+
+There is however a circumstance in which __bfq_lookup_next_entity
+cannot push up the system virtual time for a parent entity, even
+if the system virtual time is lower than the virtual start times
+of all the child entities in the active tree. It happens if one of
+the child entities is in service. In fact, in such a case, there
+is already an eligible entity, the in-service one, even if it may
+not be not present in the active tree (because in-service entities
+may be removed from the active tree).
+
+Unfortunately, in the last re-design of the
+hierarchical-scheduling engine, the reset of the pointer to the
+in-service entity for a given parent entity--reset to be done as a
+consequence of the expiration of the in-service entity--always
+happens after the function __bfq_lookup_next_entity has been
+invoked. This causes the function to think that there is still an
+entity in service for the parent entity, and then that the system
+virtual time cannot be pushed up, even if actually such a
+no-more-in-service entity has already been properly reinserted
+into the active tree (or in some other tree if no more
+active). Yet, the system virtual time *had* to be pushed up, to be
+ready to correctly choose the next queue to serve. Because of the
+lack of this push up, bfq may wrongly set in service a queue that
+had been speculatively pre-computed as the possible
+next-in-service queue, but that would no more be the one to serve
+after the expiration and the reinsertion into the active trees of
+the previously in-service entities.
+
+This commit addresses this issue by making
+__bfq_lookup_next_entity properly push up the system virtual time
+if an expiration is occurring.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 4 +--
+ block/bfq-sched.c | 77 ++++++++++++++++++++++++++++++++------------------
+ block/bfq-sq-iosched.c | 4 +--
+ 3 files changed, 53 insertions(+), 32 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 49ffca1ad6e7..b5c848650375 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -682,7 +682,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd,
+ entity->budget = new_budget;
+ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
+ new_budget);
+- bfq_requeue_bfqq(bfqd, bfqq);
++ bfq_requeue_bfqq(bfqd, bfqq, false);
+ }
+ }
+
+@@ -2822,7 +2822,7 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+
+ bfq_del_bfqq_busy(bfqd, bfqq, true);
+ } else {
+- bfq_requeue_bfqq(bfqd, bfqq);
++ bfq_requeue_bfqq(bfqd, bfqq, true);
+ /*
+ * Resort priority tree of potential close cooperators.
+ */
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 7425824c26b8..f3001af37256 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -33,7 +33,8 @@ static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree)
+ return rb_entry(node, struct bfq_entity, rb_node);
+ }
+
+-static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd);
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++ bool expiration);
+
+ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
+
+@@ -43,6 +44,8 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
+ * @new_entity: if not NULL, pointer to the entity whose activation,
+ * requeueing or repositionig triggered the invocation of
+ * this function.
++ * @expiration: id true, this function is being invoked after the
++ * expiration of the in-service entity
+ *
+ * This function is called to update sd->next_in_service, which, in
+ * its turn, may change as a consequence of the insertion or
+@@ -61,7 +64,8 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
+ * entity.
+ */
+ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+- struct bfq_entity *new_entity)
++ struct bfq_entity *new_entity,
++ bool expiration)
+ {
+ struct bfq_entity *next_in_service = sd->next_in_service;
+ struct bfq_queue *bfqq;
+@@ -120,7 +124,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ if (replace_next)
+ next_in_service = new_entity;
+ } else /* invoked because of a deactivation: lookup needed */
+- next_in_service = bfq_lookup_next_entity(sd);
++ next_in_service = bfq_lookup_next_entity(sd, expiration);
+
+ if (next_in_service) {
+ parent_sched_may_change = !sd->next_in_service ||
+@@ -1291,10 +1295,12 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
+ * @requeue: true if this is a requeue, which implies that bfqq is
+ * being expired; thus ALL its ancestors stop being served and must
+ * therefore be requeued
++ * @expiration: true if this function is being invoked in the expiration path
++ * of the in-service queue
+ */
+ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
+ bool non_blocking_wait_rq,
+- bool requeue)
++ bool requeue, bool expiration)
+ {
+ struct bfq_sched_data *sd;
+
+@@ -1307,7 +1313,8 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
+ RB_EMPTY_ROOT(&(sd->service_tree+1)->active) &&
+ RB_EMPTY_ROOT(&(sd->service_tree+2)->active));
+
+- if (!bfq_update_next_in_service(sd, entity) && !requeue) {
++ if (!bfq_update_next_in_service(sd, entity, expiration) &&
++ !requeue) {
+ BUG_ON(!sd->next_in_service);
+ break;
+ }
+@@ -1373,6 +1380,8 @@ static bool __bfq_deactivate_entity(struct bfq_entity *entity,
+ * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
+ * @entity: the entity to deactivate.
+ * @ins_into_idle_tree: true if the entity can be put into the idle tree
++ * @expiration: true if this function is being invoked in the expiration path
++ * of the in-service queue
+ */
+ static void bfq_deactivate_entity(struct bfq_entity *entity,
+ bool ins_into_idle_tree,
+@@ -1417,7 +1426,7 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
+ * then, since entity has just been
+ * deactivated, a new one must be found.
+ */
+- bfq_update_next_in_service(sd, NULL);
++ bfq_update_next_in_service(sd, NULL, expiration);
+
+ if (sd->next_in_service || sd->in_service_entity) {
+ /*
+@@ -1495,7 +1504,7 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
+ "invoking udpdate_next for this entity");
+ }
+ #endif
+- if (!bfq_update_next_in_service(sd, entity) &&
++ if (!bfq_update_next_in_service(sd, entity, expiration) &&
+ !expiration)
+ /*
+ * next_in_service unchanged or not causing
+@@ -1524,7 +1533,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "calc_vtime_jump: new value %llu",
+- root_entity->min_start);
++ ((root_entity->min_start>>10)*1000)>>12);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+@@ -1533,7 +1542,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+ "calc_vtime_jump: new value %llu",
+- root_entity->min_start);
++ ((root_entity->min_start>>10)*1000)>>12);
+ }
+ #endif
+ return root_entity->min_start;
+@@ -1615,17 +1624,9 @@ static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
+ * 3) is idle.
+ */
+ static struct bfq_entity *
+-__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service
+-#if 0
+- , bool force
+-#endif
+- )
++__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
+ {
+- struct bfq_entity *entity
+-#if 0
+- , *new_next_in_service = NULL
+-#endif
+- ;
++ struct bfq_entity *entity;
+ u64 new_vtime;
+ struct bfq_queue *bfqq;
+
+@@ -1667,8 +1668,9 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "__lookup_next: start %llu vtime %llu st %p",
++ "__lookup_next: start %llu vtime %llu (%llu) st %p",
+ ((entity->start>>10)*1000)>>12,
++ ((st->vtime>>10)*1000)>>12,
+ ((new_vtime>>10)*1000)>>12, st);
+ }
+ #endif
+@@ -1681,12 +1683,14 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service
+ /**
+ * bfq_lookup_next_entity - return the first eligible entity in @sd.
+ * @sd: the sched_data.
++ * @expiration: true if we are on the expiration path of the in-service queue
+ *
+ * This function is invoked when there has been a change in the trees
+- * for sd, and we need know what is the new next entity after this
+- * change.
++ * for sd, and we need to know what is the new next entity to serve
++ * after this change.
+ */
+-static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd)
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++ bool expiration)
+ {
+ struct bfq_service_tree *st = sd->service_tree;
+ struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1);
+@@ -1716,8 +1720,24 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd)
+ * class, unless the idle class needs to be served.
+ */
+ for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) {
++ /*
++ * If expiration is true, then bfq_lookup_next_entity
++ * is being invoked as a part of the expiration path
++ * of the in-service queue. In this case, even if
++ * sd->in_service_entity is not NULL,
++ * sd->in_service_entiy at this point is actually not
++ * in service any more, and, if needed, has already
++ * been properly queued or requeued into the right
++ * tree. The reason why sd->in_service_entity is still
++ * not NULL here, even if expiration is true, is that
++ * sd->in_service_entiy is reset as a last step in the
++ * expiration path. So, if expiration is true, tell
++ * __bfq_lookup_next_entity that there is no
++ * sd->in_service_entity.
++ */
+ entity = __bfq_lookup_next_entity(st + class_idx,
+- sd->in_service_entity);
++ sd->in_service_entity &&
++ !expiration);
+
+ if (entity)
+ break;
+@@ -1891,7 +1911,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ for_each_entity(entity) {
+ struct bfq_sched_data *sd = entity->sched_data;
+
+- if(!bfq_update_next_in_service(sd, NULL))
++ if (!bfq_update_next_in_service(sd, NULL, false))
+ break;
+ }
+
+@@ -1951,16 +1971,17 @@ static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ entity->on_st);
+
+ bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq),
+- false);
++ false, false);
+ bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
+ }
+
+-static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ bool expiration)
+ {
+ struct bfq_entity *entity = &bfqq->entity;
+
+ bfq_activate_requeue_entity(entity, false,
+- bfqq == bfqd->in_service_queue);
++ bfqq == bfqd->in_service_queue, expiration);
+ }
+
+ static void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index e1960bf149d8..42393ab889a9 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -644,7 +644,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd,
+ entity->budget = new_budget;
+ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
+ new_budget);
+- bfq_requeue_bfqq(bfqd, bfqq);
++ bfq_requeue_bfqq(bfqd, bfqq, false);
+ }
+ }
+
+@@ -2715,7 +2715,7 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+
+ bfq_del_bfqq_busy(bfqd, bfqq, true);
+ } else {
+- bfq_requeue_bfqq(bfqd, bfqq);
++ bfq_requeue_bfqq(bfqd, bfqq, true);
+ /*
+ * Resort priority tree of potential close cooperators.
+ */
+
+From ee9f95b24e1d88ffba4845981c2a4684aefd0245 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 9 Aug 2017 22:53:00 +0200
+Subject: [PATCH 38/51] bfq-sq-mq: remove direct switch to an entity in higher
+ class
+
+If the function bfq_update_next_in_service is invoked as a consequence
+of the activation or requeueing of an entity, say E, and finds out
+that E belongs to a higher-priority class than that of the current
+next-in-service entity, then it sets next_in_service directly to
+E. But this may lead to anomalous schedules, because E may happen not
+be eligible for service, because its virtual start time is higher than
+the system virtual time for its service tree.
+
+This commit addresses this issue by simply removing this direct
+switch.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-sched.c | 19 +++++--------------
+ 1 file changed, 5 insertions(+), 14 deletions(-)
+
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index f3001af37256..b1a59088db88 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -76,9 +76,8 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ * or repositiong of an entity that does not coincide with
+ * sd->next_in_service, then a full lookup in the active tree
+ * can be avoided. In fact, it is enough to check whether the
+- * just-modified entity has a higher priority than
+- * sd->next_in_service, or, even if it has the same priority
+- * as sd->next_in_service, is eligible and has a lower virtual
++ * just-modified entity has the same priority as
++ * sd->next_in_service, is eligible and has a lower virtual
+ * finish time than sd->next_in_service. If this compound
+ * condition holds, then the new entity becomes the new
+ * next_in_service. Otherwise no change is needed.
+@@ -94,9 +93,8 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+
+ /*
+ * If there is already a next_in_service candidate
+- * entity, then compare class priorities or timestamps
+- * to decide whether to replace sd->service_tree with
+- * new_entity.
++ * entity, then compare timestamps to decide whether
++ * to replace sd->service_tree with new_entity.
+ */
+ if (next_in_service) {
+ unsigned int new_entity_class_idx =
+@@ -104,10 +102,6 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ struct bfq_service_tree *st =
+ sd->service_tree + new_entity_class_idx;
+
+- /*
+- * For efficiency, evaluate the most likely
+- * sub-condition first.
+- */
+ replace_next =
+ (new_entity_class_idx ==
+ bfq_class_idx(next_in_service)
+@@ -115,10 +109,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ !bfq_gt(new_entity->start, st->vtime)
+ &&
+ bfq_gt(next_in_service->finish,
+- new_entity->finish))
+- ||
+- new_entity_class_idx <
+- bfq_class_idx(next_in_service);
++ new_entity->finish));
+ }
+
+ if (replace_next)
+
+From a3fdc5af40537355b68c1f0d3997c5a5fb54b9ce Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 10 Aug 2017 08:15:50 +0200
+Subject: [PATCH 39/51] bfq-sq-mq: guarantee update_next_in_service always
+ returns an eligible entity
+
+If the function bfq_update_next_in_service is invoked as a consequence
+of the activation or requeueing of an entity, say E, then it doesn't
+invoke bfq_lookup_next_entity to get the next-in-service entity. In
+contrast, it follows a shorter path: if E happens to be eligible (see
+commit "bfq-sq-mq: make lookup_next_entity push up vtime on
+expirations" for details on eligibility) and to have a lower virtual
+finish time than the current candidate as next-in-service entity, then
+E directly becomes the next-in-service entity. Unfortunately, there is
+a corner case for which this shorter path makes
+bfq_update_next_in_service choose a non eligible entity: it occurs if
+both E and the current next-in-service entity happen to be non
+eligible when bfq_update_next_in_service is invoked. In this case, E
+is not set as next-in-service, and, since bfq_lookup_next_entity is
+not invoked, the state of the parent entity is not updated so as to
+end up with an eligible entity as the proper next-in-service entity.
+
+In this respect, next-in-service is actually allowed to be non
+eligible while some queue is in service: since no system-virtual-time
+push-up can be performed in that case (see again commit "bfq-sq-mq:
+make lookup_next_entity push up vtime on expirations" for details),
+next-in-service is chosen, speculatively, as a function of the
+possible value that the system virtual time may get after a push
+up. But the correctness of the schedule breaks if next-in-service is
+still a non eligible entity when it is time to set in service the next
+entity. Unfortunately, this may happen in the above corner case.
+
+This commit fixes this problem by making bfq_update_next_in_service
+invoke bfq_lookup_next_entity not only if the above shorter path
+cannot be taken, but also if the shorter path is taken but fails to
+yield an eligible next-in-service entity.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-sched.c | 38 ++++++++++++++++++++++++++++----------
+ 1 file changed, 28 insertions(+), 10 deletions(-)
+
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index b1a59088db88..e4a2553a2d2c 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -70,6 +70,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ struct bfq_entity *next_in_service = sd->next_in_service;
+ struct bfq_queue *bfqq;
+ bool parent_sched_may_change = false;
++ bool change_without_lookup = false;
+
+ /*
+ * If this update is triggered by the activation, requeueing
+@@ -89,7 +90,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ * set to true, and left as true if
+ * sd->next_in_service is NULL.
+ */
+- bool replace_next = true;
++ change_without_lookup = true;
+
+ /*
+ * If there is already a next_in_service candidate
+@@ -102,7 +103,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ struct bfq_service_tree *st =
+ sd->service_tree + new_entity_class_idx;
+
+- replace_next =
++ change_without_lookup =
+ (new_entity_class_idx ==
+ bfq_class_idx(next_in_service)
+ &&
+@@ -112,15 +113,32 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ new_entity->finish));
+ }
+
+- if (replace_next)
++ if (change_without_lookup) {
+ next_in_service = new_entity;
+- } else /* invoked because of a deactivation: lookup needed */
++ bfqq = bfq_entity_to_bfqq(next_in_service);
++
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "update_next_in_service: chose without lookup");
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ else {
++ struct bfq_group *bfqg =
++ container_of(next_in_service,
++ struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data*)bfqg->bfqd, bfqg,
++ "update_next_in_service: chose without lookup");
++ }
++#endif
++ }
++ }
++
++ if (!change_without_lookup) /* lookup needed */
+ next_in_service = bfq_lookup_next_entity(sd, expiration);
+
+- if (next_in_service) {
++ if (next_in_service)
+ parent_sched_may_change = !sd->next_in_service ||
+ bfq_update_parent_budget(next_in_service);
+- }
+
+ sd->next_in_service = next_in_service;
+
+@@ -1053,7 +1071,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+
+ if (bfqq) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "__activate_entity: new queue finish %llu",
++ "update_fin_time_enqueue: new queue finish %llu",
+ ((entity->finish>>10)*1000)>>12);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+@@ -1061,7 +1079,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "__activate_entity: new group finish %llu",
++ "update_fin_time_enqueue: new group finish %llu",
+ ((entity->finish>>10)*1000)>>12);
+ #endif
+ }
+@@ -1071,7 +1089,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+
+ if (bfqq) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "__activate_entity: queue %seligible in st %p",
++ "update_fin_time_enqueue: queue %seligible in st %p",
+ entity->start <= st->vtime ? "" : "non ", st);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+@@ -1079,7 +1097,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "__activate_entity: group %seligible in st %p",
++ "update_fin_time_enqueue: group %seligible in st %p",
+ entity->start <= st->vtime ? "" : "non ", st);
+ #endif
+ }
+
+From 6565e4d1aac029b6f0a5d86a4c6ef38608838eac Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 31 Aug 2017 19:24:26 +0200
+Subject: [PATCH 40/51] doc, block, bfq: fix some typos and stale sentences
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Reviewed-by: Jeremy Hickman <jeremywh7@gmail.com>
+Reviewed-by: Laurentiu Nicola <lnicola@dend.ro>
+---
+ Documentation/block/bfq-iosched.txt | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index 0e59f1c9d30e..dcfe15523da3 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -17,7 +17,7 @@ instances of BFQ are available (otherwise only the first instance):
+ - bfq-mq: development version of BFQ for blk-mq; this version contains
+ also all latest features and fixes not yet landed in mainline, plus many
+ safety checks
+-- bfq: BFQ for legacy blk; also this version contains latest features
++- bfq-sq: BFQ for legacy blk; also this version contains latest features
+ and fixes, as well as safety checks
+
+ In its default configuration, BFQ privileges latency over
+
+From 261ee8cc9f43e03d790a07184f0bcaa504ee6737 Mon Sep 17 00:00:00 2001
+From: Luca Miccio <lucmiccio@gmail.com>
+Date: Wed, 13 Sep 2017 12:03:56 +0200
+Subject: [PATCH 41/51] bfq-mq, bfq-sq: Disable writeback throttling
+
+Similarly to CFQ, BFQ has its write-throttling heuristics, and it
+is better not to combine them with further write-throttling
+heuristics of a different nature.
+So this commit disables write-back throttling for a device if BFQ
+is used as I/O scheduler for that device.
+
+Signed-off-by: Luca Miccio <lucmiccio@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
+---
+ block/bfq-mq-iosched.c | 2 ++
+ block/bfq-sq-iosched.c | 7 +++++++
+ 2 files changed, 9 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index b5c848650375..7d27d5b3befb 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -89,6 +89,7 @@
+ #include "blk-mq-tag.h"
+ #include "blk-mq-sched.h"
+ #include "bfq-mq.h"
++#include "blk-wbt.h"
+
+ /* Expiration time of sync (0) and async (1) requests, in ns. */
+ static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
+@@ -5260,6 +5261,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ bfq_init_root_group(bfqd->root_group, bfqd);
+ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
+
++ wbt_disable_default(q);
+ return 0;
+
+ out_free:
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 42393ab889a9..6fdc3b1d5bb8 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -83,6 +83,7 @@
+ #include <linux/ioprio.h>
+ #include "blk.h"
+ #include "bfq.h"
++#include "blk-wbt.h"
+
+ /* Expiration time of sync (0) and async (1) requests, in ns. */
+ static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
+@@ -4976,6 +4977,11 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ return -ENOMEM;
+ }
+
++static void bfq_registered_queue(struct request_queue *q)
++{
++ wbt_disable_default(q);
++}
++
+ static void bfq_slab_kill(void)
+ {
+ kmem_cache_destroy(bfq_pool);
+@@ -5285,6 +5291,7 @@ static struct elevator_type iosched_bfq = {
+ .elevator_may_queue_fn = bfq_may_queue,
+ .elevator_init_fn = bfq_init_queue,
+ .elevator_exit_fn = bfq_exit_queue,
++ .elevator_registered_fn = bfq_registered_queue,
+ },
+ .icq_size = sizeof(struct bfq_io_cq),
+ .icq_align = __alignof__(struct bfq_io_cq),
+
+From 40ea0aed088791da27fcfa51f3b64d1f96b0d06e Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Tue, 12 Sep 2017 16:45:53 +0200
+Subject: [PATCH 42/51] bfq-mq, bfq-sq: fix wrong init of saved start time for
+ weight raising
+
+This commit fixes a bug that causes bfq to fail to guarantee a high
+responsiveness on some drives, if there is heavy random read+write I/O
+in the background. More precisely, such a failure allowed this bug to
+be found [1], but the bug may well cause other yet unreported
+anomalies.
+
+BFQ raises the weight of the bfq_queues associated with soft real-time
+applications, to privilege the I/O, and thus reduce latency, for these
+applications. This mechanism is named soft-real-time weight raising in
+BFQ. A soft real-time period may happen to be nested into an
+interactive weight raising period, i.e., it may happen that, when a
+bfq_queue switches to a soft real-time weight-raised state, the
+bfq_queue is already being weight-raised because deemed interactive
+too. In this case, BFQ saves in a special variable
+wr_start_at_switch_to_srt, the time instant when the interactive
+weight-raising period started for the bfq_queue, i.e., the time
+instant when BFQ started to deem the bfq_queue interactive. This value
+is then used to check whether the interactive weight-raising period
+would still be in progress when the soft real-time weight-raising
+period ends. If so, interactive weight raising is restored for the
+bfq_queue. This restore is useful, in particular, because it prevents
+bfq_queues from losing their interactive weight raising prematurely,
+as a consequence of spurious, short-lived soft real-time
+weight-raising periods caused by wrong detections as soft real-time.
+
+If, instead, a bfq_queue switches to soft-real-time weight raising
+while it *is not* already in an interactive weight-raising period,
+then the variable wr_start_at_switch_to_srt has no meaning during the
+following soft real-time weight-raising period. Unfortunately the
+handling of this case is wrong in BFQ: not only the variable is not
+flagged somehow as meaningless, but it is also set to the time when
+the switch to soft real-time weight-raising occurs. This may cause an
+interactive weight-raising period to be considered mistakenly as still
+in progress, and thus a spurious interactive weight-raising period to
+start for the bfq_queue, at the end of the soft-real-time
+weight-raising period. In particular the spurious interactive
+weight-raising period will be considered as still in progress, if the
+soft-real-time weight-raising period does not last very long. The
+bfq_queue will then be wrongly privileged and, if I/O bound, will
+unjustly steal bandwidth to truly interactive or soft real-time
+bfq_queues, harming responsiveness and low latency.
+
+This commit fixes this issue by just setting wr_start_at_switch_to_srt
+to minus infinity (farthest past time instant according to jiffies
+macros): when the soft-real-time weight-raising period ends, certainly
+no interactive weight-raising period will be considered as still in
+progress.
+
+[1] Background I/O Type: Random - Background I/O mix: Reads and writes
+- Application to start: LibreOffice Writer in
+http://www.phoronix.com/scan.php?page=news_item&px=Linux-4.13-IO-Laptop
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
+Tested-by: Lee Tibbert <lee.tibbert@gmail.com>
+Tested-by: Mirko Montanari <mirkomontanari91@gmail.com>
+---
+ block/bfq-mq-iosched.c | 50 +++++++++++++++++++++++++++++++-------------------
+ block/bfq-sq-iosched.c | 50 +++++++++++++++++++++++++++++++-------------------
+ 2 files changed, 62 insertions(+), 38 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 7d27d5b3befb..f378519b6d33 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1204,6 +1204,24 @@ static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
+ return wr_or_deserves_wr;
+ }
+
++/*
++ * Return the farthest future time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_greatest_from_now(void)
++{
++ return jiffies + MAX_JIFFY_OFFSET;
++}
++
++/*
++ * Return the farthest past time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_smallest_from_now(void)
++{
++ return jiffies - MAX_JIFFY_OFFSET;
++}
++
+ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq,
+ unsigned int old_wr_coeff,
+@@ -1218,7 +1236,19 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+ } else {
+- bfqq->wr_start_at_switch_to_srt = jiffies;
++ /*
++ * No interactive weight raising in progress
++ * here: assign minus infinity to
++ * wr_start_at_switch_to_srt, to make sure
++ * that, at the end of the soft-real-time
++ * weight raising periods that is starting
++ * now, no interactive weight-raising period
++ * may be wrongly considered as still in
++ * progress (and thus actually started by
++ * mistake).
++ */
++ bfqq->wr_start_at_switch_to_srt =
++ bfq_smallest_from_now();
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff *
+ BFQ_SOFTRT_WEIGHT_FACTOR;
+ bfqq->wr_cur_max_time =
+@@ -3174,24 +3204,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
+ }
+
+-/*
+- * Return the farthest future time instant according to jiffies
+- * macros.
+- */
+-static unsigned long bfq_greatest_from_now(void)
+-{
+- return jiffies + MAX_JIFFY_OFFSET;
+-}
+-
+-/*
+- * Return the farthest past time instant according to jiffies
+- * macros.
+- */
+-static unsigned long bfq_smallest_from_now(void)
+-{
+- return jiffies - MAX_JIFFY_OFFSET;
+-}
+-
+ /**
+ * bfq_bfqq_expire - expire a queue.
+ * @bfqd: device owning the queue.
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 6fdc3b1d5bb8..f4654436cd55 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -1165,6 +1165,24 @@ static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
+ return wr_or_deserves_wr;
+ }
+
++/*
++ * Return the farthest future time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_greatest_from_now(void)
++{
++ return jiffies + MAX_JIFFY_OFFSET;
++}
++
++/*
++ * Return the farthest past time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_smallest_from_now(void)
++{
++ return jiffies - MAX_JIFFY_OFFSET;
++}
++
+ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq,
+ unsigned int old_wr_coeff,
+@@ -1179,7 +1197,19 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+ } else {
+- bfqq->wr_start_at_switch_to_srt = jiffies;
++ /*
++ * No interactive weight raising in progress
++ * here: assign minus infinity to
++ * wr_start_at_switch_to_srt, to make sure
++ * that, at the end of the soft-real-time
++ * weight raising periods that is starting
++ * now, no interactive weight-raising period
++ * may be wrongly considered as still in
++ * progress (and thus actually started by
++ * mistake).
++ */
++ bfqq->wr_start_at_switch_to_srt =
++ bfq_smallest_from_now();
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff *
+ BFQ_SOFTRT_WEIGHT_FACTOR;
+ bfqq->wr_cur_max_time =
+@@ -3067,24 +3097,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
+ }
+
+-/*
+- * Return the farthest future time instant according to jiffies
+- * macros.
+- */
+-static unsigned long bfq_greatest_from_now(void)
+-{
+- return jiffies + MAX_JIFFY_OFFSET;
+-}
+-
+-/*
+- * Return the farthest past time instant according to jiffies
+- * macros.
+- */
+-static unsigned long bfq_smallest_from_now(void)
+-{
+- return jiffies - MAX_JIFFY_OFFSET;
+-}
+-
+ /**
+ * bfq_bfqq_expire - expire a queue.
+ * @bfqd: device owning the queue.
+
+From 9dbea44b6f721baeff35b9fdf628ec55fe00e09d Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 14 Sep 2017 05:12:58 -0400
+Subject: [PATCH 43/51] Fix commit "Unnest request-queue and ioc locks from
+ scheduler locks"
+
+The commit "Unnest request-queue and ioc locks from scheduler locks"
+mistakenly removed the setting of the split flag in function
+bfq_prepare_request. This commit puts this missing instruction back in
+its place.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index f378519b6d33..288078e68a2a 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -744,6 +744,12 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
++ __func__,
++ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish,
++ bfqq->wr_cur_max_time);
++
+ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
+ time_is_before_jiffies(bfqq->last_wr_start_finish +
+ bfqq->wr_cur_max_time))) {
+@@ -2208,6 +2214,11 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
+ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
++ __func__,
++ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish,
++ bfqq->wr_cur_max_time);
+ }
+
+ static void
+@@ -4950,6 +4961,7 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
+ bic->saved_in_large_burst = true;
+
+ bfqq = bfq_split_bfqq(bic, bfqq);
++ split = true;
+
+ if (!bfqq)
+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
+
+From d4ebb2a66a23dc183792088c521f2be2193b56db Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 15 Sep 2017 01:53:51 -0400
+Subject: [PATCH 44/51] bfq-sq, bfq-mq: check and switch back to interactive wr
+ also on queue split
+
+As already explained in the message of commit "bfq-mq, bfq-sq: fix
+wrong init of saved start time for weight raising", if a soft
+real-time weight-raising period happens to be nested in a larger
+interactive weight-raising period, then BFQ restores the interactive
+weight raising at the end of the soft real-time weight raising. In
+particular, BFQ checks whether the latter has ended only on request
+dispatches.
+
+Unfortunately, the above scheme fails to restore interactive weight
+raising in the following corner case: if a bfq_queue, say Q,
+1) Is merged with another bfq_queue while it is in a nested soft
+real-time weight-raising period. The weight-raising state of Q is
+then saved, and not considered any longer until a split occurs.
+2) Is split from the other bfq_queue(s) at a time instant when its
+soft real-time weight raising is already finished.
+On the split, while resuming the previous, soft real-time
+weight-raised state of the bfq_queue Q, BFQ checks whether the
+current soft real-time weight-raising period is actually over. If so,
+BFQ switches weight raising off for Q, *without* checking whether the
+soft real-time period was actually nested in a non-yet-finished
+interactive weight-raising period.
+
+This commit addresses this issue by adding the above missing check in
+bfq_queue splits, and restoring interactive weight raising if needed.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Tested-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Tested-by: Mirko Montanari <mirkomontanari91@gmail.com>
+---
+ block/bfq-mq-iosched.c | 29 +++++++++++++++++++++--------
+ block/bfq-sq-iosched.c | 35 +++++++++++++++++++++++++++--------
+ 2 files changed, 48 insertions(+), 16 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 288078e68a2a..6130a95c6497 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -716,6 +716,15 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
+ return dur;
+ }
+
++/* switch back from soft real-time to interactive weight raising */
++static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
++ struct bfq_data *bfqd)
++{
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
++}
++
+ static void
+ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ struct bfq_io_cq *bic, bool bfq_already_existing)
+@@ -753,12 +762,20 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
+ time_is_before_jiffies(bfqq->last_wr_start_finish +
+ bfqq->wr_cur_max_time))) {
+- bfq_log_bfqq(bfqq->bfqd, bfqq,
++ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
++ !bfq_bfqq_in_large_burst(bfqq) &&
++ time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
++ bfq_wr_duration(bfqd))) {
++ switch_back_to_interactive_wr(bfqq, bfqd);
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "resume state: switching back to interactive");
++ } else {
++ bfqq->wr_coeff = 1;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "resume state: switching off wr (%lu + %lu < %lu)",
+ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time,
+ jiffies);
+-
+- bfqq->wr_coeff = 1;
++ }
+ }
+
+ /* make sure weight will be updated, however we got here */
+@@ -3820,11 +3837,7 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_wr_duration(bfqd)))
+ bfq_bfqq_end_wr(bfqq);
+ else {
+- /* switch back to interactive wr */
+- bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+- bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+- bfqq->last_wr_start_finish =
+- bfqq->wr_start_at_switch_to_srt;
++ switch_back_to_interactive_wr(bfqq, bfqd);
+ BUG_ON(time_is_after_jiffies(
+ bfqq->last_wr_start_finish));
+ bfqq->entity.prio_changed = 1;
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index f4654436cd55..e07d5d1c0d40 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -678,6 +678,15 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
+ return dur;
+ }
+
++/* switch back from soft real-time to interactive weight raising */
++static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
++ struct bfq_data *bfqd)
++{
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
++}
++
+ static void
+ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ struct bfq_io_cq *bic, bool bfq_already_existing)
+@@ -705,15 +714,29 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
++ __func__,
++ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish,
++ bfqq->wr_cur_max_time);
++
+ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
+ time_is_before_jiffies(bfqq->last_wr_start_finish +
+ bfqq->wr_cur_max_time))) {
+- bfq_log_bfqq(bfqq->bfqd, bfqq,
++ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
++ !bfq_bfqq_in_large_burst(bfqq) &&
++ time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
++ bfq_wr_duration(bfqd))) {
++ switch_back_to_interactive_wr(bfqq, bfqd);
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "resume state: switching back to interactive");
++ } else {
++ bfqq->wr_coeff = 1;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "resume state: switching off wr (%lu + %lu < %lu)",
+ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time,
+ jiffies);
+-
+- bfqq->wr_coeff = 1;
++ }
+ }
+
+ /* make sure weight will be updated, however we got here */
+@@ -3703,11 +3726,7 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_wr_duration(bfqd)))
+ bfq_bfqq_end_wr(bfqq);
+ else {
+- /* switch back to interactive wr */
+- bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+- bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+- bfqq->last_wr_start_finish =
+- bfqq->wr_start_at_switch_to_srt;
++ switch_back_to_interactive_wr(bfqq, bfqd);
+ BUG_ON(time_is_after_jiffies(
+ bfqq->last_wr_start_finish));
+ bfqq->entity.prio_changed = 1;
+
+From 9eaec0c3a2d675763b09da81c9117a9c43bce942 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 15 Sep 2017 04:58:33 -0400
+Subject: [PATCH 45/51] bfq-sq, bfq-mq: let early-merged queues be
+ weight-raised on split too
+
+A just-created bfq_queue, say Q, may happen to be merged with another
+bfq_queue on the very first invocation of the function
+__bfq_insert_request. In such a case, even if Q would clearly deserve
+interactive weight raising (as it has just been created), the function
+bfq_add_request does not make it to be invoked for Q, and thus to
+activate weight raising for Q. As a consequence, when the state of Q
+is saved for a possible future restore, after a split of Q from the
+other bfq_queue(s), such a state happens to be (unjustly)
+non-weight-raised. Then the bfq_queue will not enjoy any weight
+raising on the split, even if should still be in an interactive
+weight-raising period when the split occurs.
+
+This commit solves this problem as follows, for a just-created
+bfq_queue that is being early-merged: it stores directly, in the saved
+state of the bfq_queue, the weight-raising state that would have been
+assigned to the bfq_queue if not early-merged.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Tested-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Tested-by: Mirko Montanari <mirkomontanari91@gmail.com>
+---
+ block/bfq-mq-iosched.c | 28 +++++++++++++++++++++++-----
+ block/bfq-sq-iosched.c | 28 +++++++++++++++++++++++-----
+ 2 files changed, 46 insertions(+), 10 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 6130a95c6497..af84e506e897 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -2226,10 +2226,27 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
+- bic->saved_wr_coeff = bfqq->wr_coeff;
+- bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
+- bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
+- bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
++ if (unlikely(bfq_bfqq_just_created(bfqq) &&
++ !bfq_bfqq_in_large_burst(bfqq))) {
++ /*
++ * bfqq being merged ritgh after being created: bfqq
++ * would have deserved interactive weight raising, but
++ * did not make it to be set in a weight-raised state,
++ * because of this early merge. Store directly the
++ * weight-raising state that would have been assigned
++ * to bfqq, so that to avoid that bfqq unjustly fails
++ * to enjoy weight raising if split soon.
++ */
++ bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
++ bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
++ bic->saved_last_wr_start_finish = jiffies;
++ } else {
++ bic->saved_wr_coeff = bfqq->wr_coeff;
++ bic->saved_wr_start_at_switch_to_srt =
++ bfqq->wr_start_at_switch_to_srt;
++ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
++ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
++ }
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
+@@ -4560,7 +4577,6 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ bfqq->allocated);
+
+ new_bfqq->ref++;
+- bfq_clear_bfqq_just_created(bfqq);
+ /*
+ * If the bic associated with the process
+ * issuing this request still points to bfqq
+@@ -4572,6 +4588,8 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
+ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
+ bfqq, new_bfqq);
++
++ bfq_clear_bfqq_just_created(bfqq);
+ /*
+ * rq is about to be enqueued into new_bfqq,
+ * release rq reference on bfqq
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index e07d5d1c0d40..0c48f527fe3f 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -2105,10 +2105,27 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
+- bic->saved_wr_coeff = bfqq->wr_coeff;
+- bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
+- bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
+- bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
++ if (unlikely(bfq_bfqq_just_created(bfqq) &&
++ !bfq_bfqq_in_large_burst(bfqq))) {
++ /*
++ * bfqq being merged ritgh after being created: bfqq
++ * would have deserved interactive weight raising, but
++ * did not make it to be set in a weight-raised state,
++ * because of this early merge. Store directly the
++ * weight-raising state that would have been assigned
++ * to bfqq, so that to avoid that bfqq unjustly fails
++ * to enjoy weight raising if split soon.
++ */
++ bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
++ bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
++ bic->saved_last_wr_start_finish = jiffies;
++ } else {
++ bic->saved_wr_coeff = bfqq->wr_coeff;
++ bic->saved_wr_start_at_switch_to_srt =
++ bfqq->wr_start_at_switch_to_srt;
++ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
++ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
++ }
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+ }
+
+@@ -4383,10 +4400,11 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq)
+ new_bfqq->allocated[rq_data_dir(rq)]++;
+ bfqq->allocated[rq_data_dir(rq)]--;
+ new_bfqq->ref++;
+- bfq_clear_bfqq_just_created(bfqq);
+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
+ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
+ bfqq, new_bfqq);
++
++ bfq_clear_bfqq_just_created(bfqq);
+ /*
+ * rq is about to be enqueued into new_bfqq,
+ * release rq reference on bfqq
+
+From cb05150675095cb97ab22e4955eb82e4fe2e9dbe Mon Sep 17 00:00:00 2001
+From: omcira <omcira@gmail.com>
+Date: Mon, 18 Sep 2017 10:49:48 +0200
+Subject: [PATCH 46/51] bfq-sq, bfq-mq: decrease burst size when queues in
+ burst exit
+
+If many queues belonging to the same group happen to be created
+shortly after each other, then the concurrent processes associated
+with these queues have typically a common goal, and they get it done
+as soon as possible if not hampered by device idling. Examples are
+processes spawned by git grep, or by systemd during boot. As for
+device idling, this mechanism is currently necessary for weight
+raising to succeed in its goal: privileging I/O. In view of these
+facts, BFQ does not provide the above queues with either weight
+raising or device idling.
+
+On the other hand, a burst of queue creations may be caused also by
+the start-up of a complex application. In this case, these queues need
+usually to be served one after the other, and as quickly as possible,
+to maximise responsiveness. Therefore, in this case the best strategy
+is to weight-raise all the queues created during the burst, i.e., the
+exact opposite of the strategy for the above case.
+
+To distinguish between the two cases, BFQ uses an empirical burst-size
+threshold, found through extensive tests and monitoring of daily
+usage. Only large bursts, i.e., burst with a size above this
+threshold, are considered as generated by a high number of parallel
+processes. In this respect, upstart-based boot proved to be rather
+hard to detect as generating a large burst of queue creations, because
+with upstart most of the queues created in a burst exit *before* the
+next queues in the same burst are created. To address this issue, I
+changed the burst-detection mechanism so as to not decrease the size
+of the current burst even if one of the queues in the burst is
+eliminated.
+
+Unfortunately, this missing decrease causes false positives on very
+fast systems: on the start-up of a complex application, such as
+libreoffice writer, so many queues are created, served and exited
+shortly after each other, that a large burst of queue creations is
+wrongly detected as occurring. These false positives just disappear if
+the size of a burst is decreased when one of the queues in the burst
+exits. This commit restores the missing burst-size decrease, relying
+of the fact that upstart is apparently unlikely to be used on systems
+running this and future versions of the kernel.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Mauro Andreolini <mauro.andreolini@unimore.it>
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Tested-by: Mirko Montanari <mirkomontanari91@gmail.com>
+---
+ block/bfq-mq-iosched.c | 12 +++---------
+ block/bfq-sq-iosched.c | 12 +++---------
+ 2 files changed, 6 insertions(+), 18 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index af84e506e897..6e413d7236ce 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4111,16 +4111,10 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ BUG_ON(bfqq->entity.tree);
+ BUG_ON(bfq_bfqq_busy(bfqq));
+
+- if (bfq_bfqq_sync(bfqq))
+- /*
+- * The fact that this queue is being destroyed does not
+- * invalidate the fact that this queue may have been
+- * activated during the current burst. As a consequence,
+- * although the queue does not exist anymore, and hence
+- * needs to be removed from the burst list if there,
+- * the burst size has not to be decremented.
+- */
++ if (bfq_bfqq_sync(bfqq) && !hlist_unhashed(&bfqq->burst_list_node)) {
+ hlist_del_init(&bfqq->burst_list_node);
++ bfqq->bfqd->burst_size--;
++ }
+
+ if (bfqq->bfqd)
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 0c48f527fe3f..93034dd7b801 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -3945,16 +3945,10 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ BUG_ON(bfqq->entity.tree);
+ BUG_ON(bfq_bfqq_busy(bfqq));
+
+- if (bfq_bfqq_sync(bfqq))
+- /*
+- * The fact that this queue is being destroyed does not
+- * invalidate the fact that this queue may have been
+- * activated during the current burst. As a consequence,
+- * although the queue does not exist anymore, and hence
+- * needs to be removed from the burst list if there,
+- * the burst size has not to be decremented.
+- */
++ if (bfq_bfqq_sync(bfqq) && !hlist_unhashed(&bfqq->burst_list_node)) {
+ hlist_del_init(&bfqq->burst_list_node);
++ bfqq->bfqd->burst_size--;
++ }
+
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+
+
+From 60de7307d5e3ed7f272f12c900f631bdfe114db2 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 6 Oct 2017 19:35:38 +0200
+Subject: [PATCH 47/51] bfq-sq, bfq-mq: fix unbalanced decrements of burst size
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The commit "bfq-sq, bfq-mq: decrease burst size when queues in burst
+exit" introduced the decrement of burst_size on the removal of a
+bfq_queue from the burst list. Unfortunately, this decrement can
+happen to be performed even when burst size is already equal to 0,
+because of unbalanced decrements. A description follows of the cause
+of these unbalanced decrements, namely a wrong assumption, and of the
+way how this wrong assumption leads to unbalanced decrements.
+
+The wrong assumption is that a bfq_queue can exit only if the process
+associated with the bfq_queue has exited. This is false, because a
+bfq_queue, say Q, may exit also as a consequence of a merge with
+another bfq_queue. In this case, Q exits because the I/O of its
+associated process has been redirected to another bfq_queue.
+
+The decrement unbalance occurs because Q may then be re-created after
+a split, and added back to the current burst list, *without*
+incrementing burst_size. burst_size is not incremented because Q is
+not a new bfq_queue added to the burst list, but a bfq_queue only
+temporarily removed from the list, and, before the commit "bfq-sq,
+bfq-mq: decrease burst size when queues in burst exit", burst_size was
+not decremented when Q was removed.
+
+This commit addresses this issue by just checking whether the exiting
+bfq_queue is a merged bfq_queue, and, in that case, not decrementing
+burst_size. Unfortunately, this still leaves room for unbalanced
+decrements, in the following rarer case: on a split, the bfq_queue
+happens to be inserted into a different burst list than that it was
+removed from when merged. If this happens, the number of elements in
+the new burst list becomes higher than burst_size (by one). When the
+bfq_queue then exits, it is of course not in a merged state any
+longer, thus burst_size is decremented, which results in an unbalanced
+decrement. To handle this sporadic, unlucky case in a simple way,
+this commit also checks that burst_size is larger than 0 before
+decrementing it.
+
+Finally, this commit removes an useless, extra check: the check that
+the bfq_queue is sync, performed before checking whether the bfq_queue
+is in the burst list. This extra check is redundant, because only sync
+bfq_queues can be inserted into the burst list.
+
+Reported-by: Philip Müller <philm@manjaro.org>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Tested-by: Philip Müller <philm@manjaro.org>
+Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
+Tested-by: Lee Tibbert <lee.tibbert@gmail.com>
+---
+ block/bfq-mq-iosched.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++--
+ block/bfq-sq-iosched.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 114 insertions(+), 4 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 6e413d7236ce..816bac6cdd3d 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4111,9 +4111,36 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ BUG_ON(bfqq->entity.tree);
+ BUG_ON(bfq_bfqq_busy(bfqq));
+
+- if (bfq_bfqq_sync(bfqq) && !hlist_unhashed(&bfqq->burst_list_node)) {
++ if (!hlist_unhashed(&bfqq->burst_list_node)) {
+ hlist_del_init(&bfqq->burst_list_node);
+- bfqq->bfqd->burst_size--;
++ /*
++ * Decrement also burst size after the removal, if the
++ * process associated with bfqq is exiting, and thus
++ * does not contribute to the burst any longer. This
++ * decrement helps filter out false positives of large
++ * bursts, when some short-lived process (often due to
++ * the execution of commands by some service) happens
++ * to start and exit while a complex application is
++ * starting, and thus spawning several processes that
++ * do I/O (and that *must not* be treated as a large
++ * burst, see comments on bfq_handle_burst).
++ *
++ * In particular, the decrement is performed only if:
++ * 1) bfqq is not a merged queue, because, if it is,
++ * then this free of bfqq is not triggered by the exit
++ * of the process bfqq is associated with, but exactly
++ * by the fact that bfqq has just been merged.
++ * 2) burst_size is greater than 0, to handle
++ * unbalanced decrements. Unbalanced decrements may
++ * happen in te following case: bfqq is inserted into
++ * the current burst list--without incrementing
++ * bust_size--because of a split, but the current
++ * burst list is not the burst list bfqq belonged to
++ * (see comments on the case of a split in
++ * bfq_set_request).
++ */
++ if (bfqq->bic && bfqq->bfqd->burst_size > 0)
++ bfqq->bfqd->burst_size--;
+ }
+
+ if (bfqq->bfqd)
+@@ -4940,6 +4967,34 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
+ "large burst");
+ bfq_clear_bfqq_in_large_burst(bfqq);
+ if (bic->was_in_burst_list)
++ /*
++ * If bfqq was in the current
++ * burst list before being
++ * merged, then we have to add
++ * it back. And we do not need
++ * to increase burst_size, as
++ * we did not decrement
++ * burst_size when we removed
++ * bfqq from the burst list as
++ * a consequence of a merge
++ * (see comments in
++ * bfq_put_queue). In this
++ * respect, it would be rather
++ * costly to know whether the
++ * current burst list is still
++ * the same burst list from
++ * which bfqq was removed on
++ * the merge. To avoid this
++ * cost, if bfqq was in a
++ * burst list, then we add
++ * bfqq to the current burst
++ * list without any further
++ * check. This can cause
++ * inappropriate insertions,
++ * but rarely enough to not
++ * harm the detection of large
++ * bursts significantly.
++ */
+ hlist_add_head(&bfqq->burst_list_node,
+ &bfqd->burst_list);
+ }
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 93034dd7b801..4bbd7f4c0154 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -3945,9 +3945,36 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ BUG_ON(bfqq->entity.tree);
+ BUG_ON(bfq_bfqq_busy(bfqq));
+
+- if (bfq_bfqq_sync(bfqq) && !hlist_unhashed(&bfqq->burst_list_node)) {
++ if (!hlist_unhashed(&bfqq->burst_list_node)) {
+ hlist_del_init(&bfqq->burst_list_node);
+- bfqq->bfqd->burst_size--;
++ /*
++ * Decrement also burst size after the removal, if the
++ * process associated with bfqq is exiting, and thus
++ * does not contribute to the burst any longer. This
++ * decrement helps filter out false positives of large
++ * bursts, when some short-lived process (often due to
++ * the execution of commands by some service) happens
++ * to start and exit while a complex application is
++ * starting, and thus spawning several processes that
++ * do I/O (and that *must not* be treated as a large
++ * burst, see comments on bfq_handle_burst).
++ *
++ * In particular, the decrement is performed only if:
++ * 1) bfqq is not a merged queue, because, if it is,
++ * then this free of bfqq is not triggered by the exit
++ * of the process bfqq is associated with, but exactly
++ * by the fact that bfqq has just been merged.
++ * 2) burst_size is greater than 0, to handle
++ * unbalanced decrements. Unbalanced decrements may
++ * happen in te following case: bfqq is inserted into
++ * the current burst list--without incrementing
++ * bust_size--because of a split, but the current
++ * burst list is not the burst list bfqq belonged to
++ * (see comments on the case of a split in
++ * bfq_set_request).
++ */
++ if (bfqq->bic && bfqq->bfqd->burst_size > 0)
++ bfqq->bfqd->burst_size--;
+ }
+
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+@@ -4691,6 +4718,34 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ "large burst");
+ bfq_clear_bfqq_in_large_burst(bfqq);
+ if (bic->was_in_burst_list)
++ /*
++ * If bfqq was in the current
++ * burst list before being
++ * merged, then we have to add
++ * it back. And we do not need
++ * to increase burst_size, as
++ * we did not decrement
++ * burst_size when we removed
++ * bfqq from the burst list as
++ * a consequence of a merge
++ * (see comments in
++ * bfq_put_queue). In this
++ * respect, it would be rather
++ * costly to know whether the
++ * current burst list is still
++ * the same burst list from
++ * which bfqq was removed on
++ * the merge. To avoid this
++ * cost, if bfqq was in a
++ * burst list, then we add
++ * bfqq to the current burst
++ * list without any further
++ * check. This can cause
++ * inappropriate insertions,
++ * but rarely enough to not
++ * harm the detection of large
++ * bursts significantly.
++ */
+ hlist_add_head(&bfqq->burst_list_node,
+ &bfqd->burst_list);
+ }
+
+From 09adbd0f46f4ba395964b35bf611b7cc3dd84b4d Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 30 Oct 2017 16:50:50 +0100
+Subject: [PATCH 48/51] doc, block, bfq-mq: update max IOPS sustainable with
+ BFQ
+
+We have investigated more deeply the performance of BFQ, in terms of
+number of IOPS that can be processed by the CPU when BFQ is used as
+I/O scheduler. In more detail, using the script [1], we have measured
+the number of IOPS reached on top of a null block device configured
+with zero latency, as a function of the workload (sequential read,
+sequential write, random read, random write) and of the system (we
+considered desktops, laptops and embedded systems).
+
+Basing on the resulting figures, with this commit we update the
+current, conservative IOPS range reported in BFQ documentation. In
+particular, the documentation now reports, for each of three different
+systems, the lowest number of IOPS obtained for that system with the
+above test (namely, the value obtained with the workload leading to
+the lowest IOPS).
+
+[1] https://github.com/Algodev-github/IOSpeed
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Luca Miccio <lucmiccio@gmail.com>
+---
+ Documentation/block/bfq-iosched.txt | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index dcfe15523da3..595ff7a5ff34 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -29,12 +29,19 @@ for that device, by setting low_latency to 0. See Section 3 for
+ details on how to configure BFQ for the desired tradeoff between
+ latency and throughput, or on how to maximize throughput.
+
+-On average CPUs, the current version of BFQ can handle devices
+-performing at most ~30K IOPS; at most ~50 KIOPS on faster CPUs. As a
+-reference, 30-50 KIOPS correspond to very high bandwidths with
+-sequential I/O (e.g., 8-12 GB/s if I/O requests are 256 KB large), and
+-to 120-200 MB/s with 4KB random I/O. BFQ is currently being tested on
+-multi-queue devices too.
++BFQ has a non-null overhead, which limits the maximum IOPS that the
++CPU can process for a device scheduled with BFQ. To give an idea of
++the limits on slow or average CPUs, here are BFQ limits for three
++different CPUs, on, respectively, an average laptop, an old desktop,
++and a cheap embedded system, in case full hierarchical support is
++enabled (i.e., CONFIG_BFQ_SQ_GROUP_IOSCHED is set for bfq-sq, or
++CONFIG_MQ_BFQ_GROUP_IOSCHED is set for bfq-mq, or, finally,
++CONFIG_BFQ_GROUP_IOSCHED is set for bfq):
++- Intel i7-4850HQ: 250 KIOPS
++- AMD A8-3850: 170 KIOPS
++- ARM CortexTM-A53 Octa-core: 45 KIOPS
++
++BFQ works for multi-queue devices too (bfq and bfq-mq instances).
+
+ The table of contents follows. Impatients can just jump to Section 3.
+
+
+From be94f97b577dc587593185224a7718aa59ac43f7 Mon Sep 17 00:00:00 2001
+From: Luca Miccio <lucmiccio@gmail.com>
+Date: Tue, 31 Oct 2017 09:50:11 +0100
+Subject: [PATCH 49/51] block, bfq-mq: add missing invocations of
+ bfqg_stats_update_io_add/remove
+
+bfqg_stats_update_io_add and bfqg_stats_update_io_remove are to be
+invoked, respectively, when an I/O request enters and when an I/O
+request exits the scheduler. Unfortunately, bfq-mq does not fully comply
+with this scheme, because it does not invoke these functions for
+requests that are inserted into or extracted from its priority
+dispatch list. This commit fixes this mistake.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Luca Miccio <lucmiccio@gmail.com>
+---
+ block/bfq-mq-iosched.c | 24 +++++++++++++++++++-----
+ 1 file changed, 19 insertions(+), 5 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 816bac6cdd3d..fbf28804c220 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1394,7 +1394,6 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
+ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
+
+ BUG_ON(bfqq == bfqd->in_service_queue);
+- bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
+
+ /*
+ * bfqq deserves to be weight-raised if:
+@@ -1734,7 +1733,6 @@ static void bfq_remove_request(struct request_queue *q,
+ BUG_ON(bfqq->meta_pending == 0);
+ bfqq->meta_pending--;
+ }
+- bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
+ }
+
+ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
+@@ -1879,6 +1877,7 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+ bfqq->next_rq = rq;
+
+ bfq_remove_request(q, next);
++ bfqg_stats_update_io_remove(bfqq_group(bfqq), next->cmd_flags);
+
+ spin_unlock_irq(&bfqq->bfqd->lock);
+ end:
+@@ -4077,6 +4076,10 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ spin_lock_irq(&bfqd->lock);
+
+ rq = __bfq_dispatch_request(hctx);
++ if (rq && RQ_BFQQ(rq))
++ bfqg_stats_update_io_remove(bfqq_group(RQ_BFQQ(rq)),
++ rq->cmd_flags);
++
+ spin_unlock_irq(&bfqd->lock);
+
+ return rq;
+@@ -4634,6 +4637,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ {
+ struct request_queue *q = hctx->queue;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
+
+ spin_lock_irq(&bfqd->lock);
+ if (blk_mq_sched_try_insert_merge(q, rq)) {
+@@ -4647,8 +4651,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+
+ spin_lock_irq(&bfqd->lock);
+ if (at_head || blk_rq_is_passthrough(rq)) {
+- struct bfq_queue *bfqq = RQ_BFQQ(rq);
+-
+ if (at_head)
+ list_add(&rq->queuelist, &bfqd->dispatch);
+ else
+@@ -4668,6 +4670,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ rq->rq_flags &= ~RQF_GOT;
+
+ __bfq_insert_request(bfqd, rq);
++ /*
++ * Update bfqq, because, if a queue merge has occurred
++ * in __bfq_insert_request, then rq has been
++ * redirected into a new queue.
++ */
++ bfqq = RQ_BFQQ(rq);
+
+ if (rq_mergeable(rq)) {
+ elv_rqhash_add(q, rq);
+@@ -4676,6 +4684,9 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ }
+ }
+
++ if (bfqq)
++ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, rq->cmd_flags);
++
+ spin_unlock_irq(&bfqd->lock);
+ }
+
+@@ -4893,8 +4904,11 @@ static void bfq_finish_request(struct request *rq)
+ BUG_ON(in_interrupt());
+
+ assert_spin_locked(&bfqd->lock);
+- if (!RB_EMPTY_NODE(&rq->rb_node))
++ if (!RB_EMPTY_NODE(&rq->rb_node)) {
+ bfq_remove_request(rq->q, rq);
++ bfqg_stats_update_io_remove(bfqq_group(bfqq),
++ rq->cmd_flags);
++ }
+ bfq_put_rq_priv_body(bfqq);
+ }
+
+
+From 8659a1549d2bf241129a0f7c90429bddd9c2bc53 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 8 Nov 2017 19:07:40 +0100
+Subject: [PATCH 50/51] block, bfq-mq: update blkio stats outside the scheduler
+ lock
+
+bfq-mq invokes various blkg_*stats_* functions to update the statistics
+contained in the special files blkio.bfq-mq.* in the blkio controller
+groups, i.e., the I/O accounting related to the proportional-share
+policy provided by bfq-mq. The execution of these functions takes a
+considerable percentage, about 40%, of the total per-request execution
+time of bfq-mq (i.e., of the sum of the execution time of all the bfq-mq
+functions that have to be executed to process an I/O request from its
+creation to its destruction). This reduces the request-processing
+rate sustainable by bfq-mq noticeably, even on a multicore CPU. In fact,
+the bfq-mq functions that invoke blkg_*stats_* functions cannot be
+executed in parallel with the rest of the code of bfq-mq, because
+both are executed under the same same per-device scheduler lock.
+
+To reduce this slowdown, this commit moves, wherever possible, the
+invocation of these functions (more precisely, of the bfq-mq functions
+that invoke blkg_*stats_* functions) outside the critical sections
+protected by the scheduler lock.
+
+With this change, and with all blkio.bfq-mq.* statistics enabled, the
+throughput grows, e.g., from 250 to 310 KIOPS (+25%) on an Intel
+i7-4850HQ, in case of 8 threads doing random I/O in parallel on
+null_blk, with the latter configured with 0 latency. We obtained the
+same or higher throughput boosts, up to +30%, with other processors
+(some figures are reported in the documentation). For our tests, we
+used the script [1], with which our results can be easily reproduced.
+
+NOTE. This commit still protects the invocation of blkg_*stats_*
+functions with the request_queue lock, because the group these
+functions are invoked on may otherwise disappear before or while these
+functions are executed. Fortunately, tests without even this lock
+show, by difference, that the serialization caused by this lock has a
+little impact (at most ~5% of throughput reduction).
+
+[1] https://github.com/Algodev-github/IOSpeed
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Luca Miccio <lucmiccio@gmail.com>
+---
+ Documentation/block/bfq-iosched.txt | 18 ++++--
+ block/bfq-mq-iosched.c | 112 +++++++++++++++++++++++++++++++-----
+ block/bfq-sched.c | 2 +
+ 3 files changed, 112 insertions(+), 20 deletions(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index 595ff7a5ff34..c816c595082d 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -31,16 +31,22 @@ latency and throughput, or on how to maximize throughput.
+
+ BFQ has a non-null overhead, which limits the maximum IOPS that the
+ CPU can process for a device scheduled with BFQ. To give an idea of
+-the limits on slow or average CPUs, here are BFQ limits for three
+-different CPUs, on, respectively, an average laptop, an old desktop,
+-and a cheap embedded system, in case full hierarchical support is
+-enabled (i.e., CONFIG_BFQ_SQ_GROUP_IOSCHED is set for bfq-sq, or
+-CONFIG_MQ_BFQ_GROUP_IOSCHED is set for bfq-mq, or, finally,
+-CONFIG_BFQ_GROUP_IOSCHED is set for bfq):
++the limits on slow or average CPUs, here are, first, the limits of
++bfq-sq for three different CPUs, on, respectively, an average laptop,
++an old desktop, and a cheap embedded system, in case full hierarchical
++support is enabled (i.e., CONFIG_BFQ_SQ_GROUP_IOSCHED is set):
+ - Intel i7-4850HQ: 250 KIOPS
+ - AMD A8-3850: 170 KIOPS
+ - ARM CortexTM-A53 Octa-core: 45 KIOPS
+
++bfq-mq and bfq instances reach, instead, a higher sustainable
++throughput. Their limits, on the same systems as above, are, with full
++hierarchical support enabled (i.e., CONFIG_MQ_BFQ_GROUP_IOSCHED set
++for bfq-mq, or CONFIG_BFQ_GROUP_IOSCHED set for bfq):
++- Intel i7-4850HQ: 310 KIOPS
++- AMD A8-3850: 200 KIOPS
++- ARM CortexTM-A53 Octa-core: 56 KIOPS
++
+ BFQ works for multi-queue devices too (bfq and bfq-mq instances).
+
+ The table of contents follows. Impatients can just jump to Section 3.
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index fbf28804c220..ab3b83d612c2 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1822,7 +1822,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ bfqq->next_rq = next_rq;
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "requests_merged: req %p prev %p next_rq %p bfqq %p",
++ "request_merged: req %p prev %p next_rq %p bfqq %p",
+ req, prev, next_rq, bfqq);
+
+ /*
+@@ -2415,7 +2415,6 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+ {
+ if (bfqq) {
+- bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
+ bfq_clear_bfqq_fifo_expire(bfqq);
+
+ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
+@@ -3784,7 +3783,6 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ */
+ bfq_clear_bfqq_wait_request(bfqq);
+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
+- bfqg_stats_update_idle_time(bfqq_group(bfqq));
+ }
+ goto keep_queue;
+ }
+@@ -4072,16 +4070,67 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ {
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+ struct request *rq;
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ struct bfq_queue *in_serv_queue, *bfqq;
++ bool waiting_rq, idle_timer_disabled;
++#endif
+
+ spin_lock_irq(&bfqd->lock);
+
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ in_serv_queue = bfqd->in_service_queue;
++ waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
++
+ rq = __bfq_dispatch_request(hctx);
+- if (rq && RQ_BFQQ(rq))
+- bfqg_stats_update_io_remove(bfqq_group(RQ_BFQQ(rq)),
+- rq->cmd_flags);
+
++ idle_timer_disabled =
++ waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
++
++#else
++ rq = __bfq_dispatch_request(hctx);
++#endif
+ spin_unlock_irq(&bfqd->lock);
+
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ bfqq = rq ? RQ_BFQQ(rq) : NULL;
++ if (!idle_timer_disabled && !bfqq)
++ return rq;
++
++ /*
++ * rq and bfqq are guaranteed to exist until this function
++ * ends, for the following reasons. First, rq can be
++ * dispatched to the device, and then can be completed and
++ * freed, only after this function ends. Second, rq cannot be
++ * merged (and thus freed because of a merge) any longer,
++ * because it has already started. Thus rq cannot be freed
++ * before this function ends, and, since rq has a reference to
++ * bfqq, the same guarantee holds for bfqq too.
++ *
++ * In addition, the following queue lock guarantees that
++ * bfqq_group(bfqq) exists as well.
++ */
++ spin_lock_irq(hctx->queue->queue_lock);
++ if (idle_timer_disabled)
++ /*
++ * Since the idle timer has been disabled,
++ * in_serv_queue contained some request when
++ * __bfq_dispatch_request was invoked above, which
++ * implies that rq was picked exactly from
++ * in_serv_queue. Thus in_serv_queue == bfqq, and is
++ * therefore guaranteed to exist because of the above
++ * arguments.
++ */
++ bfqg_stats_update_idle_time(bfqq_group(in_serv_queue));
++ if (bfqq) {
++ struct bfq_group *bfqg = bfqq_group(bfqq);
++
++ bfqg_stats_update_avg_queue_size(bfqg);
++ bfqg_stats_set_start_empty_time(bfqg);
++ bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
++ }
++ spin_unlock_irq(hctx->queue->queue_lock);
++#endif
++
+ return rq;
+ }
+
+@@ -4200,7 +4249,6 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfqd->lock, flags);
+-
+ bfq_exit_bfqq(bfqd, bfqq);
+ bic_set_bfqq(bic, NULL, is_sync);
+ spin_unlock_irqrestore(&bfqd->lock, flags);
+@@ -4554,7 +4602,6 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ */
+ bfq_clear_bfqq_wait_request(bfqq);
+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
+- bfqg_stats_update_idle_time(bfqq_group(bfqq));
+
+ /*
+ * The queue is not empty, because a new request just
+@@ -4569,9 +4616,11 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ }
+ }
+
+-static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
++/* returns true if it causes the idle timer to be disabled */
++static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
++ bool waiting, idle_timer_disabled = false;
+ BUG_ON(!bfqq);
+
+ assert_spin_locked(&bfqd->lock);
+@@ -4624,12 +4673,16 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ }
+ }
+
++ waiting = bfqq && bfq_bfqq_wait_request(bfqq);
+ bfq_add_request(rq);
++ idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
+
+ rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
+ list_add_tail(&rq->queuelist, &bfqq->fifo);
+
+ bfq_rq_enqueued(bfqd, bfqq, rq);
++
++ return idle_timer_disabled;
+ }
+
+ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+@@ -4638,6 +4691,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ struct request_queue *q = hctx->queue;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ bool idle_timer_disabled = false;
++ unsigned int cmd_flags;
++#endif
+
+ spin_lock_irq(&bfqd->lock);
+ if (blk_mq_sched_try_insert_merge(q, rq)) {
+@@ -4669,13 +4726,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ BUG_ON(!(rq->rq_flags & RQF_GOT));
+ rq->rq_flags &= ~RQF_GOT;
+
+- __bfq_insert_request(bfqd, rq);
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ idle_timer_disabled = __bfq_insert_request(bfqd, rq);
+ /*
+ * Update bfqq, because, if a queue merge has occurred
+ * in __bfq_insert_request, then rq has been
+ * redirected into a new queue.
+ */
+ bfqq = RQ_BFQQ(rq);
++#else
++ __bfq_insert_request(bfqd, rq);
++#endif
+
+ if (rq_mergeable(rq)) {
+ elv_rqhash_add(q, rq);
+@@ -4683,11 +4744,34 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ q->last_merge = rq;
+ }
+ }
+-
+- if (bfqq)
+- bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, rq->cmd_flags);
+-
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ /*
++ * Cache cmd_flags before releasing scheduler lock, because rq
++ * may disappear afterwards (for example, because of a request
++ * merge).
++ */
++ cmd_flags = rq->cmd_flags;
++#endif
+ spin_unlock_irq(&bfqd->lock);
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ if (!bfqq)
++ return;
++ /*
++ * bfqq still exists, because it can disappear only after
++ * either it is merged with another queue, or the process it
++ * is associated with exits. But both actions must be taken by
++ * the same process currently executing this flow of
++ * instruction.
++ *
++ * In addition, the following queue lock guarantees that
++ * bfqq_group(bfqq) exists as well.
++ */
++ spin_lock_irq(q->queue_lock);
++ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
++ if (idle_timer_disabled)
++ bfqg_stats_update_idle_time(bfqq_group(bfqq));
++ spin_unlock_irq(q->queue_lock);
++#endif
+ }
+
+ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index e4a2553a2d2c..616c0692335a 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -949,9 +949,11 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
+ st->vtime += bfq_delta(served, st->wsum);
+ bfq_forget_idle(st);
+ }
++#ifndef BFQ_MQ
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ bfqg_stats_set_start_empty_time(bfqq_group(bfqq));
+ #endif
++#endif
+ st = bfq_entity_service_tree(&bfqq->entity);
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs, vtime %llu on %p",
+ served, ((st->vtime>>10)*1000)>>12, st);
+
+From abdfb33a3325df55ec0261fd824ca61ddac13575 Mon Sep 17 00:00:00 2001
+From: Luca Miccio <lucmiccio@gmail.com>
+Date: Wed, 8 Nov 2017 19:07:41 +0100
+Subject: [PATCH 51/51] block, bfq-sq, bfq-mq: move debug blkio stats behind
+ CONFIG_DEBUG_BLK_CGROUP
+
+BFQ (both bfq-mq and bfq-sq) currently creates, and updates, its own
+instance of the whole set of blkio statistics that cfq creates. Yet,
+from the comments of Tejun Heo in [1], it turned out that most of
+these statistics are meant/useful only for debugging. This commit
+makes BFQ create the latter, debugging statistics only if the option
+CONFIG_DEBUG_BLK_CGROUP is set.
+
+By doing so, this commit also enables BFQ to enjoy a high perfomance
+boost. The reason is that, if CONFIG_DEBUG_BLK_CGROUP is not set, then
+BFQ has to update far fewer statistics, and, in particular, not the
+heaviest to update. To give an idea of the benefits, if
+CONFIG_DEBUG_BLK_CGROUP is not set, then, on an Intel i7-4850HQ, and
+with 8 threads doing random I/O in parallel on null_blk (configured
+with 0 latency), the throughput of bfq-mq grows from 310 to 400 KIOPS
+(+30%). We have measured similar or even much higher boosts with other
+CPUs: e.g., +45% with an ARM CortexTM-A53 Octa-core. Our results have
+been obtained and can be reproduced very easily with the script in [1].
+
+[1] https://www.spinics.net/lists/linux-block/msg18943.html
+
+Reported-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Luca Miccio <lucmiccio@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ Documentation/block/bfq-iosched.txt | 59 ++++++++++---
+ block/bfq-cgroup-included.c | 163 ++++++++++++++++++++----------------
+ block/bfq-mq-iosched.c | 14 ++--
+ block/bfq-mq.h | 4 +-
+ block/bfq.h | 4 +-
+ 5 files changed, 147 insertions(+), 97 deletions(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index c816c595082d..30ef2dba85ad 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -29,24 +29,41 @@ for that device, by setting low_latency to 0. See Section 3 for
+ details on how to configure BFQ for the desired tradeoff between
+ latency and throughput, or on how to maximize throughput.
+
+-BFQ has a non-null overhead, which limits the maximum IOPS that the
+-CPU can process for a device scheduled with BFQ. To give an idea of
+-the limits on slow or average CPUs, here are, first, the limits of
+-bfq-sq for three different CPUs, on, respectively, an average laptop,
++BFQ has a non-null overhead, which limits the maximum IOPS that a CPU
++can process for a device scheduled with BFQ. To give an idea of the
++limits on slow or average CPUs, here are, first, the limits of bfq-mq
++and bfq for three different CPUs, on, respectively, an average laptop,
+ an old desktop, and a cheap embedded system, in case full hierarchical
+-support is enabled (i.e., CONFIG_BFQ_SQ_GROUP_IOSCHED is set):
+-- Intel i7-4850HQ: 250 KIOPS
+-- AMD A8-3850: 170 KIOPS
+-- ARM CortexTM-A53 Octa-core: 45 KIOPS
+-
+-bfq-mq and bfq instances reach, instead, a higher sustainable
+-throughput. Their limits, on the same systems as above, are, with full
+-hierarchical support enabled (i.e., CONFIG_MQ_BFQ_GROUP_IOSCHED set
+-for bfq-mq, or CONFIG_BFQ_GROUP_IOSCHED set for bfq):
++support is enabled (i.e., CONFIG_MQ_BFQ_GROUP_IOSCHED is set for
++bfq-mq, or CONFIG_BFQ_GROUP_IOSCHED is set for bfq), but
++CONFIG_DEBUG_BLK_CGROUP is not set (Section 4-2):
++- Intel i7-4850HQ: 400 KIOPS
++- AMD A8-3850: 250 KIOPS
++- ARM CortexTM-A53 Octa-core: 80 KIOPS
++
++As for bfq-sq, it cannot reach the above IOPS, because of the
++inherent, lower parallelism of legacy blk and of the components within
++it (including bfq-sq itself). In particular, results with
++CONFIG_DEBUG_BLK_CGROUP unset are rather fluctuating. The limits
++reported below for the case CONFIG_DEBUG_BLK_CGROUP set will however
++provide a lower bound to the limits of bfq-sq.
++
++Turning back to bfq-mq and bfq, If CONFIG_DEBUG_BLK_CGROUP is set (and
++of course full hierarchical support is enabled), then the sustainable
++throughput with bfq-mq and bfq decreases, because all blkio.bfq*
++statistics are created and updated (Section 4-2). For bfq-mq and bfq,
++this leads to the following maximum sustainable throughputs, on the
++same systems as above:
+ - Intel i7-4850HQ: 310 KIOPS
+ - AMD A8-3850: 200 KIOPS
+ - ARM CortexTM-A53 Octa-core: 56 KIOPS
+
++Finally, if CONFIG_DEBUG_BLK_CGROUP is set (and full hierarchical
++support is enabled), then bfq-sq exhibits the following limits:
++- Intel i7-4850HQ: 250 KIOPS
++- AMD A8-3850: 170 KIOPS
++- ARM CortexTM-A53 Octa-core: 45 KIOPS
++
+ BFQ works for multi-queue devices too (bfq and bfq-mq instances).
+
+ The table of contents follows. Impatients can just jump to Section 3.
+@@ -524,6 +541,22 @@ BFQ-specific files is "blkio.bfqX." or "io.bfqX.", where X can be ""
+ to set the weight of a group with the mainline BFQ is blkio.bfq.weight
+ or io.bfq.weight.
+
++As for cgroups-v1 (blkio controller), the exact set of stat files
++created, and kept up-to-date by bfq*, depends on whether
++CONFIG_DEBUG_BLK_CGROUP is set. If it is set, then bfq* creates all
++the stat files documented in
++Documentation/cgroup-v1/blkio-controller.txt. If, instead,
++CONFIG_DEBUG_BLK_CGROUP is not set, then bfq* creates only the files
++blkio.bfq*.io_service_bytes
++blkio.bfq*.io_service_bytes_recursive
++blkio.bfq*.io_serviced
++blkio.bfq*.io_serviced_recursive
++
++The value of CONFIG_DEBUG_BLK_CGROUP greatly influences the maximum
++throughput sustainable with bfq*, because updating the blkio.bfq*
++stats is rather costly, especially for some of the stats enabled by
++CONFIG_DEBUG_BLK_CGROUP.
++
+ Parameters to set
+ -----------------
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 631e53d9150d..562b0ce581a7 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -15,7 +15,7 @@
+ * file.
+ */
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+
+ /* bfqg stats flags */
+ enum bfqg_stats_flags {
+@@ -155,6 +155,63 @@ static void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
+ bfqg_stats_update_group_wait_time(stats);
+ }
+
++static void bfqg_stats_update_io_add(struct bfq_group *bfqg,
++ struct bfq_queue *bfqq, unsigned int op)
++{
++ blkg_rwstat_add(&bfqg->stats.queued, op, 1);
++ bfqg_stats_end_empty_time(&bfqg->stats);
++ if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
++ bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
++}
++
++static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
++{
++ blkg_rwstat_add(&bfqg->stats.queued, op, -1);
++}
++
++static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
++{
++ blkg_rwstat_add(&bfqg->stats.merged, op, 1);
++}
++
++static void bfqg_stats_update_completion(struct bfq_group *bfqg,
++ uint64_t start_time, uint64_t io_start_time, unsigned int op)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++ unsigned long long now = sched_clock();
++
++ if (time_after64(now, io_start_time))
++ blkg_rwstat_add(&stats->service_time, op,
++ now - io_start_time);
++ if (time_after64(io_start_time, start_time))
++ blkg_rwstat_add(&stats->wait_time, op,
++ io_start_time - start_time);
++}
++
++#else /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */
++
++static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg,
++ struct bfq_queue *bfqq, unsigned int op) { }
++static inline void
++bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
++static inline void
++bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
++static inline void bfqg_stats_update_completion(struct bfq_group *bfqg,
++ uint64_t start_time, uint64_t io_start_time,
++ unsigned int op) { }
++static inline void
++bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
++ struct bfq_group *curr_bfqg) { }
++static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { }
++static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
++
++#endif /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */
++
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct blkcg_policy blkcg_policy_bfq;
+
+ /*
+@@ -247,44 +304,10 @@ static void bfqg_and_blkg_put(struct bfq_group *bfqg)
+ }
+ #endif
+
+-static void bfqg_stats_update_io_add(struct bfq_group *bfqg,
+- struct bfq_queue *bfqq,
+- unsigned int op)
+-{
+- blkg_rwstat_add(&bfqg->stats.queued, op, 1);
+- bfqg_stats_end_empty_time(&bfqg->stats);
+- if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
+- bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
+-}
+-
+-static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
+-{
+- blkg_rwstat_add(&bfqg->stats.queued, op, -1);
+-}
+-
+-static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
+-{
+- blkg_rwstat_add(&bfqg->stats.merged, op, 1);
+-}
+-
+-static void bfqg_stats_update_completion(struct bfq_group *bfqg,
+- uint64_t start_time, uint64_t io_start_time,
+- unsigned int op)
+-{
+- struct bfqg_stats *stats = &bfqg->stats;
+- unsigned long long now = sched_clock();
+-
+- if (time_after64(now, io_start_time))
+- blkg_rwstat_add(&stats->service_time, op,
+- now - io_start_time);
+- if (time_after64(io_start_time, start_time))
+- blkg_rwstat_add(&stats->wait_time, op,
+- io_start_time - start_time);
+-}
+-
+ /* @stats = 0 */
+ static void bfqg_stats_reset(struct bfqg_stats *stats)
+ {
++#ifdef CONFIG_DEBUG_BLK_CGROUP
+ /* queued stats shouldn't be cleared */
+ blkg_rwstat_reset(&stats->merged);
+ blkg_rwstat_reset(&stats->service_time);
+@@ -296,6 +319,7 @@ static void bfqg_stats_reset(struct bfqg_stats *stats)
+ blkg_stat_reset(&stats->group_wait_time);
+ blkg_stat_reset(&stats->idle_time);
+ blkg_stat_reset(&stats->empty_time);
++#endif
+ }
+
+ /* @to += @from */
+@@ -304,6 +328,7 @@ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
+ if (!to || !from)
+ return;
+
++#ifdef CONFIG_DEBUG_BLK_CGROUP
+ /* queued stats shouldn't be cleared */
+ blkg_rwstat_add_aux(&to->merged, &from->merged);
+ blkg_rwstat_add_aux(&to->service_time, &from->service_time);
+@@ -316,6 +341,7 @@ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
+ blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
+ blkg_stat_add_aux(&to->idle_time, &from->idle_time);
+ blkg_stat_add_aux(&to->empty_time, &from->empty_time);
++#endif
+ }
+
+ /*
+@@ -367,6 +393,7 @@ static void bfq_init_entity(struct bfq_entity *entity,
+
+ static void bfqg_stats_exit(struct bfqg_stats *stats)
+ {
++#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_rwstat_exit(&stats->merged);
+ blkg_rwstat_exit(&stats->service_time);
+ blkg_rwstat_exit(&stats->wait_time);
+@@ -378,10 +405,12 @@ static void bfqg_stats_exit(struct bfqg_stats *stats)
+ blkg_stat_exit(&stats->group_wait_time);
+ blkg_stat_exit(&stats->idle_time);
+ blkg_stat_exit(&stats->empty_time);
++#endif
+ }
+
+ static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
+ {
++#ifdef CONFIG_DEBUG_BLK_CGROUP
+ if (blkg_rwstat_init(&stats->merged, gfp) ||
+ blkg_rwstat_init(&stats->service_time, gfp) ||
+ blkg_rwstat_init(&stats->wait_time, gfp) ||
+@@ -396,6 +425,7 @@ static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
+ bfqg_stats_exit(stats);
+ return -ENOMEM;
+ }
++#endif
+
+ return 0;
+ }
+@@ -1003,6 +1033,7 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
+ return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
+ }
+
++#ifdef CONFIG_DEBUG_BLK_CGROUP
+ static int bfqg_print_stat(struct seq_file *sf, void *v)
+ {
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
+@@ -1108,6 +1139,7 @@ static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
+ 0, false);
+ return 0;
+ }
++#endif /* CONFIG_DEBUG_BLK_CGROUP */
+
+ static struct bfq_group *
+ bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
+@@ -1137,15 +1169,6 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+
+ /* statistics, covers only the tasks in the bfqg */
+ {
+- .name = BFQ_CGROUP_FNAME(time),
+- .private = offsetof(struct bfq_group, stats.time),
+- .seq_show = bfqg_print_stat,
+- },
+- {
+- .name = BFQ_CGROUP_FNAME(sectors),
+- .seq_show = bfqg_print_stat_sectors,
+- },
+- {
+ .name = BFQ_CGROUP_FNAME(io_service_bytes),
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_bytes,
+@@ -1155,6 +1178,16 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_ios,
+ },
++#ifdef CONFIG_DEBUG_BLK_CGROUP
++ {
++ .name = BFQ_CGROUP_FNAME(time),
++ .private = offsetof(struct bfq_group, stats.time),
++ .seq_show = bfqg_print_stat,
++ },
++ {
++ .name = BFQ_CGROUP_FNAME(sectors),
++ .seq_show = bfqg_print_stat_sectors,
++ },
+ {
+ .name = BFQ_CGROUP_FNAME(io_service_time),
+ .private = offsetof(struct bfq_group, stats.service_time),
+@@ -1175,18 +1208,10 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+ .private = offsetof(struct bfq_group, stats.queued),
+ .seq_show = bfqg_print_rwstat,
+ },
++#endif /* CONFIG_DEBUG_BLK_CGROUP */
+
+ /* the same statictics which cover the bfqg and its descendants */
+ {
+- .name = BFQ_CGROUP_FNAME(time_recursive),
+- .private = offsetof(struct bfq_group, stats.time),
+- .seq_show = bfqg_print_stat_recursive,
+- },
+- {
+- .name = BFQ_CGROUP_FNAME(sectors_recursive),
+- .seq_show = bfqg_print_stat_sectors_recursive,
+- },
+- {
+ .name = BFQ_CGROUP_FNAME(io_service_bytes_recursive),
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_bytes_recursive,
+@@ -1196,6 +1221,16 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_ios_recursive,
+ },
++#ifdef CONFIG_DEBUG_BLK_CGROUP
++ {
++ .name = BFQ_CGROUP_FNAME(time_recursive),
++ .private = offsetof(struct bfq_group, stats.time),
++ .seq_show = bfqg_print_stat_recursive,
++ },
++ {
++ .name = BFQ_CGROUP_FNAME(sectors_recursive),
++ .seq_show = bfqg_print_stat_sectors_recursive,
++ },
+ {
+ .name = BFQ_CGROUP_FNAME(io_service_time_recursive),
+ .private = offsetof(struct bfq_group, stats.service_time),
+@@ -1240,6 +1275,7 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+ .private = offsetof(struct bfq_group, stats.dequeue),
+ .seq_show = bfqg_print_stat,
+ },
++#endif /* CONFIG_DEBUG_BLK_CGROUP */
+ { } /* terminate */
+ };
+
+@@ -1257,25 +1293,6 @@ static struct cftype bfq_blkg_files[] = {
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+-static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg,
+- struct bfq_queue *bfqq, unsigned int op) { }
+-static inline void
+-bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
+-static inline void
+-bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
+-static inline void bfqg_stats_update_completion(struct bfq_group *bfqg,
+- uint64_t start_time, uint64_t io_start_time,
+- unsigned int op) { }
+-static inline void
+-bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
+- struct bfq_group *curr_bfqg) { }
+-static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { }
+-static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
+-static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
+-static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
+-static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
+-static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
+-
+ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ struct bfq_group *bfqg) {}
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index ab3b83d612c2..0c09609a6099 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4070,14 +4070,14 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ {
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+ struct request *rq;
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ struct bfq_queue *in_serv_queue, *bfqq;
+ bool waiting_rq, idle_timer_disabled;
+ #endif
+
+ spin_lock_irq(&bfqd->lock);
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ in_serv_queue = bfqd->in_service_queue;
+ waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
+
+@@ -4091,7 +4091,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ #endif
+ spin_unlock_irq(&bfqd->lock);
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ bfqq = rq ? RQ_BFQQ(rq) : NULL;
+ if (!idle_timer_disabled && !bfqq)
+ return rq;
+@@ -4691,7 +4691,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ struct request_queue *q = hctx->queue;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ bool idle_timer_disabled = false;
+ unsigned int cmd_flags;
+ #endif
+@@ -4726,7 +4726,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ BUG_ON(!(rq->rq_flags & RQF_GOT));
+ rq->rq_flags &= ~RQF_GOT;
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ idle_timer_disabled = __bfq_insert_request(bfqd, rq);
+ /*
+ * Update bfqq, because, if a queue merge has occurred
+@@ -4744,7 +4744,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ q->last_merge = rq;
+ }
+ }
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ /*
+ * Cache cmd_flags before releasing scheduler lock, because rq
+ * may disappear afterwards (for example, because of a request
+@@ -4753,7 +4753,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ cmd_flags = rq->cmd_flags;
+ #endif
+ spin_unlock_irq(&bfqd->lock);
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ if (!bfqq)
+ return;
+ /*
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 7ed2cc29be57..1cb05bb853d2 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -784,7 +784,7 @@ enum bfqq_expiration {
+
+
+ struct bfqg_stats {
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ /* number of ios merged */
+ struct blkg_rwstat merged;
+ /* total time spent on device in ns, may not be accurate w/ queueing */
+@@ -812,7 +812,7 @@ struct bfqg_stats {
+ uint64_t start_idle_time;
+ uint64_t start_empty_time;
+ uint16_t flags;
+-#endif
++#endif /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */
+ };
+
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+diff --git a/block/bfq.h b/block/bfq.h
+index 15d326f466b7..47cd4d5a8c32 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -791,7 +791,7 @@ enum bfqq_expiration {
+
+
+ struct bfqg_stats {
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ /* number of ios merged */
+ struct blkg_rwstat merged;
+ /* total time spent on device in ns, may not be accurate w/ queueing */
+@@ -819,7 +819,7 @@ struct bfqg_stats {
+ uint64_t start_idle_time;
+ uint64_t start_empty_time;
+ uint16_t flags;
+-#endif
++#endif /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */
+ };
+
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
diff --git a/sys-kernel/linux-image-redcore-lts/files/0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch b/sys-kernel/linux-image-redcore-lts/files/0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch
new file mode 100644
index 00000000..8f2c8783
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch
@@ -0,0 +1,9571 @@
+diff -Nur a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
+--- a/arch/powerpc/platforms/cell/spufs/sched.c 2018-10-10 07:54:28.000000000 +0100
++++ b/arch/powerpc/platforms/cell/spufs/sched.c 2018-11-03 16:06:32.704528679 +0000
+@@ -65,11 +65,6 @@
+ static struct timer_list spuloadavg_timer;
+
+ /*
+- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
+- */
+-#define NORMAL_PRIO 120
+-
+-/*
+ * Frequency of the spu scheduler tick. By default we do one SPU scheduler
+ * tick for every 10 CPU scheduler ticks.
+ */
+diff -Nur a/arch/x86/Kconfig b/arch/x86/Kconfig
+--- a/arch/x86/Kconfig 2018-11-03 16:00:51.897619785 +0000
++++ b/arch/x86/Kconfig 2018-11-03 16:06:32.705528711 +0000
+@@ -963,10 +963,26 @@
+ depends on SMP
+ ---help---
+ SMT scheduler support improves the CPU scheduler's decision making
+- when dealing with Intel Pentium 4 chips with HyperThreading at a
++ when dealing with Intel P4/Core 2 chips with HyperThreading at a
+ cost of slightly increased overhead in some places. If unsure say
+ N here.
+
++config SMT_NICE
++ bool "SMT (Hyperthreading) aware nice priority and policy support"
++ depends on SCHED_MUQSS && SCHED_SMT
++ default y
++ ---help---
++ Enabling Hyperthreading on Intel CPUs decreases the effectiveness
++ of the use of 'nice' levels and different scheduling policies
++ (e.g. realtime) due to sharing of CPU power between hyperthreads.
++ SMT nice support makes each logical CPU aware of what is running on
++ its hyperthread siblings, maintaining appropriate distribution of
++ CPU according to nice levels and scheduling policies at the expense
++ of slightly increased overhead.
++
++ If unsure say Y here.
++
++
+ config SCHED_MC
+ def_bool y
+ prompt "Multi-core scheduler support"
+diff -Nur a/Documentation/scheduler/sched-BFS.txt b/Documentation/scheduler/sched-BFS.txt
+--- a/Documentation/scheduler/sched-BFS.txt 1970-01-01 01:00:00.000000000 +0100
++++ b/Documentation/scheduler/sched-BFS.txt 2018-11-03 16:06:32.702528615 +0000
+@@ -0,0 +1,351 @@
++BFS - The Brain Fuck Scheduler by Con Kolivas.
++
++Goals.
++
++The goal of the Brain Fuck Scheduler, referred to as BFS from here on, is to
++completely do away with the complex designs of the past for the cpu process
++scheduler and instead implement one that is very simple in basic design.
++The main focus of BFS is to achieve excellent desktop interactivity and
++responsiveness without heuristics and tuning knobs that are difficult to
++understand, impossible to model and predict the effect of, and when tuned to
++one workload cause massive detriment to another.
++
++
++Design summary.
++
++BFS is best described as a single runqueue, O(n) lookup, earliest effective
++virtual deadline first design, loosely based on EEVDF (earliest eligible virtual
++deadline first) and my previous Staircase Deadline scheduler. Each component
++shall be described in order to understand the significance of, and reasoning for
++it. The codebase when the first stable version was released was approximately
++9000 lines less code than the existing mainline linux kernel scheduler (in
++2.6.31). This does not even take into account the removal of documentation and
++the cgroups code that is not used.
++
++Design reasoning.
++
++The single runqueue refers to the queued but not running processes for the
++entire system, regardless of the number of CPUs. The reason for going back to
++a single runqueue design is that once multiple runqueues are introduced,
++per-CPU or otherwise, there will be complex interactions as each runqueue will
++be responsible for the scheduling latency and fairness of the tasks only on its
++own runqueue, and to achieve fairness and low latency across multiple CPUs, any
++advantage in throughput of having CPU local tasks causes other disadvantages.
++This is due to requiring a very complex balancing system to at best achieve some
++semblance of fairness across CPUs and can only maintain relatively low latency
++for tasks bound to the same CPUs, not across them. To increase said fairness
++and latency across CPUs, the advantage of local runqueue locking, which makes
++for better scalability, is lost due to having to grab multiple locks.
++
++A significant feature of BFS is that all accounting is done purely based on CPU
++used and nowhere is sleep time used in any way to determine entitlement or
++interactivity. Interactivity "estimators" that use some kind of sleep/run
++algorithm are doomed to fail to detect all interactive tasks, and to falsely tag
++tasks that aren't interactive as being so. The reason for this is that it is
++close to impossible to determine that when a task is sleeping, whether it is
++doing it voluntarily, as in a userspace application waiting for input in the
++form of a mouse click or otherwise, or involuntarily, because it is waiting for
++another thread, process, I/O, kernel activity or whatever. Thus, such an
++estimator will introduce corner cases, and more heuristics will be required to
++cope with those corner cases, introducing more corner cases and failed
++interactivity detection and so on. Interactivity in BFS is built into the design
++by virtue of the fact that tasks that are waking up have not used up their quota
++of CPU time, and have earlier effective deadlines, thereby making it very likely
++they will preempt any CPU bound task of equivalent nice level. See below for
++more information on the virtual deadline mechanism. Even if they do not preempt
++a running task, because the rr interval is guaranteed to have a bound upper
++limit on how long a task will wait for, it will be scheduled within a timeframe
++that will not cause visible interface jitter.
++
++
++Design details.
++
++Task insertion.
++
++BFS inserts tasks into each relevant queue as an O(1) insertion into a double
++linked list. On insertion, *every* running queue is checked to see if the newly
++queued task can run on any idle queue, or preempt the lowest running task on the
++system. This is how the cross-CPU scheduling of BFS achieves significantly lower
++latency per extra CPU the system has. In this case the lookup is, in the worst
++case scenario, O(n) where n is the number of CPUs on the system.
++
++Data protection.
++
++BFS has one single lock protecting the process local data of every task in the
++global queue. Thus every insertion, removal and modification of task data in the
++global runqueue needs to grab the global lock. However, once a task is taken by
++a CPU, the CPU has its own local data copy of the running process' accounting
++information which only that CPU accesses and modifies (such as during a
++timer tick) thus allowing the accounting data to be updated lockless. Once a
++CPU has taken a task to run, it removes it from the global queue. Thus the
++global queue only ever has, at most,
++
++ (number of tasks requesting cpu time) - (number of logical CPUs) + 1
++
++tasks in the global queue. This value is relevant for the time taken to look up
++tasks during scheduling. This will increase if many tasks with CPU affinity set
++in their policy to limit which CPUs they're allowed to run on if they outnumber
++the number of CPUs. The +1 is because when rescheduling a task, the CPU's
++currently running task is put back on the queue. Lookup will be described after
++the virtual deadline mechanism is explained.
++
++Virtual deadline.
++
++The key to achieving low latency, scheduling fairness, and "nice level"
++distribution in BFS is entirely in the virtual deadline mechanism. The one
++tunable in BFS is the rr_interval, or "round robin interval". This is the
++maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
++tasks of the same nice level will be running for, or looking at it the other
++way around, the longest duration two tasks of the same nice level will be
++delayed for. When a task requests cpu time, it is given a quota (time_slice)
++equal to the rr_interval and a virtual deadline. The virtual deadline is
++offset from the current time in jiffies by this equation:
++
++ jiffies + (prio_ratio * rr_interval)
++
++The prio_ratio is determined as a ratio compared to the baseline of nice -20
++and increases by 10% per nice level. The deadline is a virtual one only in that
++no guarantee is placed that a task will actually be scheduled by this time, but
++it is used to compare which task should go next. There are three components to
++how a task is next chosen. First is time_slice expiration. If a task runs out
++of its time_slice, it is descheduled, the time_slice is refilled, and the
++deadline reset to that formula above. Second is sleep, where a task no longer
++is requesting CPU for whatever reason. The time_slice and deadline are _not_
++adjusted in this case and are just carried over for when the task is next
++scheduled. Third is preemption, and that is when a newly waking task is deemed
++higher priority than a currently running task on any cpu by virtue of the fact
++that it has an earlier virtual deadline than the currently running task. The
++earlier deadline is the key to which task is next chosen for the first and
++second cases. Once a task is descheduled, it is put back on the queue, and an
++O(n) lookup of all queued-but-not-running tasks is done to determine which has
++the earliest deadline and that task is chosen to receive CPU next.
++
++The CPU proportion of different nice tasks works out to be approximately the
++
++ (prio_ratio difference)^2
++
++The reason it is squared is that a task's deadline does not change while it is
++running unless it runs out of time_slice. Thus, even if the time actually
++passes the deadline of another task that is queued, it will not get CPU time
++unless the current running task deschedules, and the time "base" (jiffies) is
++constantly moving.
++
++Task lookup.
++
++BFS has 103 priority queues. 100 of these are dedicated to the static priority
++of realtime tasks, and the remaining 3 are, in order of best to worst priority,
++SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority
++scheduling). When a task of these priorities is queued, a bitmap of running
++priorities is set showing which of these priorities has tasks waiting for CPU
++time. When a CPU is made to reschedule, the lookup for the next task to get
++CPU time is performed in the following way:
++
++First the bitmap is checked to see what static priority tasks are queued. If
++any realtime priorities are found, the corresponding queue is checked and the
++first task listed there is taken (provided CPU affinity is suitable) and lookup
++is complete. If the priority corresponds to a SCHED_ISO task, they are also
++taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds
++to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this
++stage, every task in the runlist that corresponds to that priority is checked
++to see which has the earliest set deadline, and (provided it has suitable CPU
++affinity) it is taken off the runqueue and given the CPU. If a task has an
++expired deadline, it is taken and the rest of the lookup aborted (as they are
++chosen in FIFO order).
++
++Thus, the lookup is O(n) in the worst case only, where n is as described
++earlier, as tasks may be chosen before the whole task list is looked over.
++
++
++Scalability.
++
++The major limitations of BFS will be that of scalability, as the separate
++runqueue designs will have less lock contention as the number of CPUs rises.
++However they do not scale linearly even with separate runqueues as multiple
++runqueues will need to be locked concurrently on such designs to be able to
++achieve fair CPU balancing, to try and achieve some sort of nice-level fairness
++across CPUs, and to achieve low enough latency for tasks on a busy CPU when
++other CPUs would be more suited. BFS has the advantage that it requires no
++balancing algorithm whatsoever, as balancing occurs by proxy simply because
++all CPUs draw off the global runqueue, in priority and deadline order. Despite
++the fact that scalability is _not_ the prime concern of BFS, it both shows very
++good scalability to smaller numbers of CPUs and is likely a more scalable design
++at these numbers of CPUs.
++
++It also has some very low overhead scalability features built into the design
++when it has been deemed their overhead is so marginal that they're worth adding.
++The first is the local copy of the running process' data to the CPU it's running
++on to allow that data to be updated lockless where possible. Then there is
++deference paid to the last CPU a task was running on, by trying that CPU first
++when looking for an idle CPU to use the next time it's scheduled. Finally there
++is the notion of cache locality beyond the last running CPU. The sched_domains
++information is used to determine the relative virtual "cache distance" that
++other CPUs have from the last CPU a task was running on. CPUs with shared
++caches, such as SMT siblings, or multicore CPUs with shared caches, are treated
++as cache local. CPUs without shared caches are treated as not cache local, and
++CPUs on different NUMA nodes are treated as very distant. This "relative cache
++distance" is used by modifying the virtual deadline value when doing lookups.
++Effectively, the deadline is unaltered between "cache local" CPUs, doubled for
++"cache distant" CPUs, and quadrupled for "very distant" CPUs. The reasoning
++behind the doubling of deadlines is as follows. The real cost of migrating a
++task from one CPU to another is entirely dependant on the cache footprint of
++the task, how cache intensive the task is, how long it's been running on that
++CPU to take up the bulk of its cache, how big the CPU cache is, how fast and
++how layered the CPU cache is, how fast a context switch is... and so on. In
++other words, it's close to random in the real world where we do more than just
++one sole workload. The only thing we can be sure of is that it's not free. So
++BFS uses the principle that an idle CPU is a wasted CPU and utilising idle CPUs
++is more important than cache locality, and cache locality only plays a part
++after that. Doubling the effective deadline is based on the premise that the
++"cache local" CPUs will tend to work on the same tasks up to double the number
++of cache local CPUs, and once the workload is beyond that amount, it is likely
++that none of the tasks are cache warm anywhere anyway. The quadrupling for NUMA
++is a value I pulled out of my arse.
++
++When choosing an idle CPU for a waking task, the cache locality is determined
++according to where the task last ran and then idle CPUs are ranked from best
++to worst to choose the most suitable idle CPU based on cache locality, NUMA
++node locality and hyperthread sibling business. They are chosen in the
++following preference (if idle):
++
++* Same core, idle or busy cache, idle threads
++* Other core, same cache, idle or busy cache, idle threads.
++* Same node, other CPU, idle cache, idle threads.
++* Same node, other CPU, busy cache, idle threads.
++* Same core, busy threads.
++* Other core, same cache, busy threads.
++* Same node, other CPU, busy threads.
++* Other node, other CPU, idle cache, idle threads.
++* Other node, other CPU, busy cache, idle threads.
++* Other node, other CPU, busy threads.
++
++This shows the SMT or "hyperthread" awareness in the design as well which will
++choose a real idle core first before a logical SMT sibling which already has
++tasks on the physical CPU.
++
++Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark.
++However this benchmarking was performed on an earlier design that was far less
++scalable than the current one so it's hard to know how scalable it is in terms
++of both CPUs (due to the global runqueue) and heavily loaded machines (due to
++O(n) lookup) at this stage. Note that in terms of scalability, the number of
++_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x)
++quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark
++results are very promising indeed, without needing to tweak any knobs, features
++or options. Benchmark contributions are most welcome.
++
++
++Features
++
++As the initial prime target audience for BFS was the average desktop user, it
++was designed to not need tweaking, tuning or have features set to obtain benefit
++from it. Thus the number of knobs and features has been kept to an absolute
++minimum and should not require extra user input for the vast majority of cases.
++There are precisely 2 tunables, and 2 extra scheduling policies. The rr_interval
++and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition
++to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is
++support for CGROUPS. The average user should neither need to know what these
++are, nor should they need to be using them to have good desktop behaviour.
++
++rr_interval
++
++There is only one "scheduler" tunable, the round robin interval. This can be
++accessed in
++
++ /proc/sys/kernel/rr_interval
++
++The value is in milliseconds, and the default value is set to 6 on a
++uniprocessor machine, and automatically set to a progressively higher value on
++multiprocessor machines. The reasoning behind increasing the value on more CPUs
++is that the effective latency is decreased by virtue of there being more CPUs on
++BFS (for reasons explained above), and increasing the value allows for less
++cache contention and more throughput. Valid values are from 1 to 1000
++Decreasing the value will decrease latencies at the cost of decreasing
++throughput, while increasing it will improve throughput, but at the cost of
++worsening latencies. The accuracy of the rr interval is limited by HZ resolution
++of the kernel configuration. Thus, the worst case latencies are usually slightly
++higher than this actual value. The default value of 6 is not an arbitrary one.
++It is based on the fact that humans can detect jitter at approximately 7ms, so
++aiming for much lower latencies is pointless under most circumstances. It is
++worth noting this fact when comparing the latency performance of BFS to other
++schedulers. Worst case latencies being higher than 7ms are far worse than
++average latencies not being in the microsecond range.
++
++Isochronous scheduling.
++
++Isochronous scheduling is a unique scheduling policy designed to provide
++near-real-time performance to unprivileged (ie non-root) users without the
++ability to starve the machine indefinitely. Isochronous tasks (which means
++"same time") are set using, for example, the schedtool application like so:
++
++ schedtool -I -e amarok
++
++This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
++is that it has a priority level between true realtime tasks and SCHED_NORMAL
++which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
++if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
++rate). However if ISO tasks run for more than a tunable finite amount of time,
++they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
++time is the percentage of _total CPU_ available across the machine, configurable
++as a percentage in the following "resource handling" tunable (as opposed to a
++scheduler tunable):
++
++ /proc/sys/kernel/iso_cpu
++
++and is set to 70% by default. It is calculated over a rolling 5 second average
++Because it is the total CPU available, it means that on a multi CPU machine, it
++is possible to have an ISO task running as realtime scheduling indefinitely on
++just one CPU, as the other CPUs will be available. Setting this to 100 is the
++equivalent of giving all users SCHED_RR access and setting it to 0 removes the
++ability to run any pseudo-realtime tasks.
++
++A feature of BFS is that it detects when an application tries to obtain a
++realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
++appropriate privileges to use those policies. When it detects this, it will
++give the task SCHED_ISO policy instead. Thus it is transparent to the user.
++Because some applications constantly set their policy as well as their nice
++level, there is potential for them to undo the override specified by the user
++on the command line of setting the policy to SCHED_ISO. To counter this, once
++a task has been set to SCHED_ISO policy, it needs superuser privileges to set
++it back to SCHED_NORMAL. This will ensure the task remains ISO and all child
++processes and threads will also inherit the ISO policy.
++
++Idleprio scheduling.
++
++Idleprio scheduling is a scheduling policy designed to give out CPU to a task
++_only_ when the CPU would be otherwise idle. The idea behind this is to allow
++ultra low priority tasks to be run in the background that have virtually no
++effect on the foreground tasks. This is ideally suited to distributed computing
++clients (like setiathome, folding, mprime etc) but can also be used to start
++a video encode or so on without any slowdown of other tasks. To avoid this
++policy from grabbing shared resources and holding them indefinitely, if it
++detects a state where the task is waiting on I/O, the machine is about to
++suspend to ram and so on, it will transiently schedule them as SCHED_NORMAL. As
++per the Isochronous task management, once a task has been scheduled as IDLEPRIO,
++it cannot be put back to SCHED_NORMAL without superuser privileges. Tasks can
++be set to start as SCHED_IDLEPRIO with the schedtool command like so:
++
++ schedtool -D -e ./mprime
++
++Subtick accounting.
++
++It is surprisingly difficult to get accurate CPU accounting, and in many cases,
++the accounting is done by simply determining what is happening at the precise
++moment a timer tick fires off. This becomes increasingly inaccurate as the
++timer tick frequency (HZ) is lowered. It is possible to create an application
++which uses almost 100% CPU, yet by being descheduled at the right time, records
++zero CPU usage. While the main problem with this is that there are possible
++security implications, it is also difficult to determine how much CPU a task
++really does use. BFS tries to use the sub-tick accounting from the TSC clock,
++where possible, to determine real CPU usage. This is not entirely reliable, but
++is far more likely to produce accurate CPU usage data than the existing designs
++and will not show tasks as consuming no CPU usage when they actually are. Thus,
++the amount of CPU reported as being used by BFS will more accurately represent
++how much CPU the task itself is using (as is shown for example by the 'time'
++application), so the reported values may be quite different to other schedulers.
++Values reported as the 'load' are more prone to problems with this design, but
++per process values are closer to real usage. When comparing throughput of BFS
++to other designs, it is important to compare the actual completed work in terms
++of total wall clock time taken and total work done, rather than the reported
++"cpu usage".
++
++
++Con Kolivas <kernel@kolivas.org> Fri Aug 27 2010
+diff -Nur a/Documentation/scheduler/sched-MuQSS.txt b/Documentation/scheduler/sched-MuQSS.txt
+--- a/Documentation/scheduler/sched-MuQSS.txt 1970-01-01 01:00:00.000000000 +0100
++++ b/Documentation/scheduler/sched-MuQSS.txt 2018-11-03 16:06:32.702528615 +0000
+@@ -0,0 +1,347 @@
++MuQSS - The Multiple Queue Skiplist Scheduler by Con Kolivas.
++
++MuQSS is a per-cpu runqueue variant of the original BFS scheduler with
++one 8 level skiplist per runqueue, and fine grained locking for much more
++scalability.
++
++
++Goals.
++
++The goal of the Multiple Queue Skiplist Scheduler, referred to as MuQSS from
++here on (pronounced mux) is to completely do away with the complex designs of
++the past for the cpu process scheduler and instead implement one that is very
++simple in basic design. The main focus of MuQSS is to achieve excellent desktop
++interactivity and responsiveness without heuristics and tuning knobs that are
++difficult to understand, impossible to model and predict the effect of, and when
++tuned to one workload cause massive detriment to another, while still being
++scalable to many CPUs and processes.
++
++
++Design summary.
++
++MuQSS is best described as per-cpu multiple runqueue, O(log n) insertion, O(1)
++lookup, earliest effective virtual deadline first tickless design, loosely based
++on EEVDF (earliest eligible virtual deadline first) and my previous Staircase
++Deadline scheduler, and evolved from the single runqueue O(n) BFS scheduler.
++Each component shall be described in order to understand the significance of,
++and reasoning for it.
++
++
++Design reasoning.
++
++In BFS, the use of a single runqueue across all CPUs meant that each CPU would
++need to scan the entire runqueue looking for the process with the earliest
++deadline and schedule that next, regardless of which CPU it originally came
++from. This made BFS deterministic with respect to latency and provided
++guaranteed latencies dependent on number of processes and CPUs. The single
++runqueue, however, meant that all CPUs would compete for the single lock
++protecting it, which would lead to increasing lock contention as the number of
++CPUs rose and appeared to limit scalability of common workloads beyond 16
++logical CPUs. Additionally, the O(n) lookup of the runqueue list obviously
++increased overhead proportionate to the number of queued proecesses and led to
++cache thrashing while iterating over the linked list.
++
++MuQSS is an evolution of BFS, designed to maintain the same scheduling
++decision mechanism and be virtually deterministic without relying on the
++constrained design of the single runqueue by splitting out the single runqueue
++to be per-CPU and use skiplists instead of linked lists.
++
++The original reason for going back to a single runqueue design for BFS was that
++once multiple runqueues are introduced, per-CPU or otherwise, there will be
++complex interactions as each runqueue will be responsible for the scheduling
++latency and fairness of the tasks only on its own runqueue, and to achieve
++fairness and low latency across multiple CPUs, any advantage in throughput of
++having CPU local tasks causes other disadvantages. This is due to requiring a
++very complex balancing system to at best achieve some semblance of fairness
++across CPUs and can only maintain relatively low latency for tasks bound to the
++same CPUs, not across them. To increase said fairness and latency across CPUs,
++the advantage of local runqueue locking, which makes for better scalability, is
++lost due to having to grab multiple locks.
++
++MuQSS works around the problems inherent in multiple runqueue designs by
++making its skip lists priority ordered and through novel use of lockless
++examination of each other runqueue it can decide if it should take the earliest
++deadline task from another runqueue for latency reasons, or for CPU balancing
++reasons. It still does not have a balancing system, choosing to allow the
++next task scheduling decision and task wakeup CPU choice to allow balancing to
++happen by virtue of its choices.
++
++
++Design details.
++
++Custom skip list implementation:
++
++To avoid the overhead of building up and tearing down skip list structures,
++the variant used by MuQSS has a number of optimisations making it specific for
++its use case in the scheduler. It uses static arrays of 8 'levels' instead of
++building up and tearing down structures dynamically. This makes each runqueue
++only scale O(log N) up to 64k tasks. However as there is one runqueue per CPU
++it means that it scales O(log N) up to 64k x number of logical CPUs which is
++far beyond the realistic task limits each CPU could handle. By being 8 levels
++it also makes the array exactly one cacheline in size. Additionally, each
++skip list node is bidirectional making insertion and removal amortised O(1),
++being O(k) where k is 1-8. Uniquely, we are only ever interested in the very
++first entry in each list at all times with MuQSS, so there is never a need to
++do a search and thus look up is always O(1). In interactive mode, the queues
++will be searched beyond their first entry if the first task is not suitable
++for affinity or SMT nice reasons.
++
++Task insertion:
++
++MuQSS inserts tasks into a per CPU runqueue as an O(log N) insertion into
++a custom skip list as described above (based on the original design by William
++Pugh). Insertion is ordered in such a way that there is never a need to do a
++search by ordering tasks according to static priority primarily, and then
++virtual deadline at the time of insertion.
++
++Niffies:
++
++Niffies are a monotonic forward moving timer not unlike the "jiffies" but are
++of nanosecond resolution. Niffies are calculated per-runqueue from the high
++resolution TSC timers, and in order to maintain fairness are synchronised
++between CPUs whenever both runqueues are locked concurrently.
++
++Virtual deadline:
++
++The key to achieving low latency, scheduling fairness, and "nice level"
++distribution in MuQSS is entirely in the virtual deadline mechanism. The one
++tunable in MuQSS is the rr_interval, or "round robin interval". This is the
++maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
++tasks of the same nice level will be running for, or looking at it the other
++way around, the longest duration two tasks of the same nice level will be
++delayed for. When a task requests cpu time, it is given a quota (time_slice)
++equal to the rr_interval and a virtual deadline. The virtual deadline is
++offset from the current time in niffies by this equation:
++
++ niffies + (prio_ratio * rr_interval)
++
++The prio_ratio is determined as a ratio compared to the baseline of nice -20
++and increases by 10% per nice level. The deadline is a virtual one only in that
++no guarantee is placed that a task will actually be scheduled by this time, but
++it is used to compare which task should go next. There are three components to
++how a task is next chosen. First is time_slice expiration. If a task runs out
++of its time_slice, it is descheduled, the time_slice is refilled, and the
++deadline reset to that formula above. Second is sleep, where a task no longer
++is requesting CPU for whatever reason. The time_slice and deadline are _not_
++adjusted in this case and are just carried over for when the task is next
++scheduled. Third is preemption, and that is when a newly waking task is deemed
++higher priority than a currently running task on any cpu by virtue of the fact
++that it has an earlier virtual deadline than the currently running task. The
++earlier deadline is the key to which task is next chosen for the first and
++second cases.
++
++The CPU proportion of different nice tasks works out to be approximately the
++
++ (prio_ratio difference)^2
++
++The reason it is squared is that a task's deadline does not change while it is
++running unless it runs out of time_slice. Thus, even if the time actually
++passes the deadline of another task that is queued, it will not get CPU time
++unless the current running task deschedules, and the time "base" (niffies) is
++constantly moving.
++
++Task lookup:
++
++As tasks are already pre-ordered according to anticipated scheduling order in
++the skip lists, lookup for the next suitable task per-runqueue is always a
++matter of simply selecting the first task in the 0th level skip list entry.
++In order to maintain optimal latency and fairness across CPUs, MuQSS does a
++novel examination of every other runqueue in cache locality order, choosing the
++best task across all runqueues. This provides near-determinism of how long any
++task across the entire system may wait before receiving CPU time. The other
++runqueues are first examine lockless and then trylocked to minimise the
++potential lock contention if they are likely to have a suitable better task.
++Each other runqueue lock is only held for as long as it takes to examine the
++entry for suitability. In "interactive" mode, the default setting, MuQSS will
++look for the best deadline task across all CPUs, while in !interactive mode,
++it will only select a better deadline task from another CPU if it is more
++heavily laden than the current one.
++
++Lookup is therefore O(k) where k is number of CPUs.
++
++
++Latency.
++
++Through the use of virtual deadlines to govern the scheduling order of normal
++tasks, queue-to-activation latency per runqueue is guaranteed to be bound by
++the rr_interval tunable which is set to 6ms by default. This means that the
++longest a CPU bound task will wait for more CPU is proportional to the number
++of running tasks and in the common case of 0-2 running tasks per CPU, will be
++under the 7ms threshold for human perception of jitter. Additionally, as newly
++woken tasks will have an early deadline from their previous runtime, the very
++tasks that are usually latency sensitive will have the shortest interval for
++activation, usually preempting any existing CPU bound tasks.
++
++Tickless expiry:
++
++A feature of MuQSS is that it is not tied to the resolution of the chosen tick
++rate in Hz, instead depending entirely on the high resolution timers where
++possible for sub-millisecond accuracy on timeouts regarless of the underlying
++tick rate. This allows MuQSS to be run with the low overhead of low Hz rates
++such as 100 by default, benefiting from the improved throughput and lower
++power usage it provides. Another advantage of this approach is that in
++combination with the Full No HZ option, which disables ticks on running task
++CPUs instead of just idle CPUs, the tick can be disabled at all times
++regardless of how many tasks are running instead of being limited to just one
++running task. Note that this option is NOT recommended for regular desktop
++users.
++
++
++Scalability and balancing.
++
++Unlike traditional approaches where balancing is a combination of CPU selection
++at task wakeup and intermittent balancing based on a vast array of rules set
++according to architecture, busyness calculations and special case management,
++MuQSS indirectly balances on the fly at task wakeup and next task selection.
++During initialisation, MuQSS creates a cache coherency ordered list of CPUs for
++each logical CPU and uses this to aid task/CPU selection when CPUs are busy.
++Additionally it selects any idle CPUs, if they are available, at any time over
++busy CPUs according to the following preference:
++
++ * Same thread, idle or busy cache, idle or busy threads
++ * Other core, same cache, idle or busy cache, idle threads.
++ * Same node, other CPU, idle cache, idle threads.
++ * Same node, other CPU, busy cache, idle threads.
++ * Other core, same cache, busy threads.
++ * Same node, other CPU, busy threads.
++ * Other node, other CPU, idle cache, idle threads.
++ * Other node, other CPU, busy cache, idle threads.
++ * Other node, other CPU, busy threads.
++
++Mux is therefore SMT, MC and Numa aware without the need for extra
++intermittent balancing to maintain CPUs busy and make the most of cache
++coherency.
++
++
++Features
++
++As the initial prime target audience for MuQSS was the average desktop user, it
++was designed to not need tweaking, tuning or have features set to obtain benefit
++from it. Thus the number of knobs and features has been kept to an absolute
++minimum and should not require extra user input for the vast majority of cases.
++There are 3 optional tunables, and 2 extra scheduling policies. The rr_interval,
++interactive, and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO
++policies. In addition to this, MuQSS also uses sub-tick accounting. What MuQSS
++does _not_ now feature is support for CGROUPS. The average user should neither
++need to know what these are, nor should they need to be using them to have good
++desktop behaviour. However since some applications refuse to work without
++cgroups, one can enable them with MuQSS as a stub and the filesystem will be
++created which will allow the applications to work.
++
++rr_interval:
++
++ /proc/sys/kernel/rr_interval
++
++The value is in milliseconds, and the default value is set to 6. Valid values
++are from 1 to 1000 Decreasing the value will decrease latencies at the cost of
++decreasing throughput, while increasing it will improve throughput, but at the
++cost of worsening latencies. It is based on the fact that humans can detect
++jitter at approximately 7ms, so aiming for much lower latencies is pointless
++under most circumstances. It is worth noting this fact when comparing the
++latency performance of MuQSS to other schedulers. Worst case latencies being
++higher than 7ms are far worse than average latencies not being in the
++microsecond range.
++
++interactive:
++
++ /proc/sys/kernel/interactive
++
++The value is a simple boolean of 1 for on and 0 for off and is set to on by
++default. Disabling this will disable the near-determinism of MuQSS when
++selecting the next task by not examining all CPUs for the earliest deadline
++task, or which CPU to wake to, instead prioritising CPU balancing for improved
++throughput. Latency will still be bound by rr_interval, but on a per-CPU basis
++instead of across the whole system.
++
++Isochronous scheduling:
++
++Isochronous scheduling is a unique scheduling policy designed to provide
++near-real-time performance to unprivileged (ie non-root) users without the
++ability to starve the machine indefinitely. Isochronous tasks (which means
++"same time") are set using, for example, the schedtool application like so:
++
++ schedtool -I -e amarok
++
++This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
++is that it has a priority level between true realtime tasks and SCHED_NORMAL
++which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
++if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
++rate). However if ISO tasks run for more than a tunable finite amount of time,
++they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
++time is the percentage of CPU available per CPU, configurable as a percentage in
++the following "resource handling" tunable (as opposed to a scheduler tunable):
++
++iso_cpu:
++
++ /proc/sys/kernel/iso_cpu
++
++and is set to 70% by default. It is calculated over a rolling 5 second average
++Because it is the total CPU available, it means that on a multi CPU machine, it
++is possible to have an ISO task running as realtime scheduling indefinitely on
++just one CPU, as the other CPUs will be available. Setting this to 100 is the
++equivalent of giving all users SCHED_RR access and setting it to 0 removes the
++ability to run any pseudo-realtime tasks.
++
++A feature of MuQSS is that it detects when an application tries to obtain a
++realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
++appropriate privileges to use those policies. When it detects this, it will
++give the task SCHED_ISO policy instead. Thus it is transparent to the user.
++
++
++Idleprio scheduling:
++
++Idleprio scheduling is a scheduling policy designed to give out CPU to a task
++_only_ when the CPU would be otherwise idle. The idea behind this is to allow
++ultra low priority tasks to be run in the background that have virtually no
++effect on the foreground tasks. This is ideally suited to distributed computing
++clients (like setiathome, folding, mprime etc) but can also be used to start a
++video encode or so on without any slowdown of other tasks. To avoid this policy
++from grabbing shared resources and holding them indefinitely, if it detects a
++state where the task is waiting on I/O, the machine is about to suspend to ram
++and so on, it will transiently schedule them as SCHED_NORMAL. Once a task has
++been scheduled as IDLEPRIO, it cannot be put back to SCHED_NORMAL without
++superuser privileges since it is effectively a lower scheduling policy. Tasks
++can be set to start as SCHED_IDLEPRIO with the schedtool command like so:
++
++schedtool -D -e ./mprime
++
++Subtick accounting:
++
++It is surprisingly difficult to get accurate CPU accounting, and in many cases,
++the accounting is done by simply determining what is happening at the precise
++moment a timer tick fires off. This becomes increasingly inaccurate as the timer
++tick frequency (HZ) is lowered. It is possible to create an application which
++uses almost 100% CPU, yet by being descheduled at the right time, records zero
++CPU usage. While the main problem with this is that there are possible security
++implications, it is also difficult to determine how much CPU a task really does
++use. Mux uses sub-tick accounting from the TSC clock to determine real CPU
++usage. Thus, the amount of CPU reported as being used by MuQSS will more
++accurately represent how much CPU the task itself is using (as is shown for
++example by the 'time' application), so the reported values may be quite
++different to other schedulers. When comparing throughput of MuQSS to other
++designs, it is important to compare the actual completed work in terms of total
++wall clock time taken and total work done, rather than the reported "cpu usage".
++
++Symmetric MultiThreading (SMT) aware nice:
++
++SMT, a.k.a. hyperthreading, is a very common feature on modern CPUs. While the
++logical CPU count rises by adding thread units to each CPU core, allowing more
++than one task to be run simultaneously on the same core, the disadvantage of it
++is that the CPU power is shared between the tasks, not summating to the power
++of two CPUs. The practical upshot of this is that two tasks running on
++separate threads of the same core run significantly slower than if they had one
++core each to run on. While smart CPU selection allows each task to have a core
++to itself whenever available (as is done on MuQSS), it cannot offset the
++slowdown that occurs when the cores are all loaded and only a thread is left.
++Most of the time this is harmless as the CPU is effectively overloaded at this
++point and the extra thread is of benefit. However when running a niced task in
++the presence of an un-niced task (say nice 19 v nice 0), the nice task gets
++precisely the same amount of CPU power as the unniced one. MuQSS has an
++optional configuration feature known as SMT-NICE which selectively idles the
++secondary niced thread for a period proportional to the nice difference,
++allowing CPU distribution according to nice level to be maintained, at the
++expense of a small amount of extra overhead. If this is configured in on a
++machine without SMT threads, the overhead is minimal.
++
++
++Con Kolivas <kernel@kolivas.org> Sat, 29th October 2016
+diff -Nur a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+--- a/Documentation/sysctl/kernel.txt 2018-11-03 16:00:51.893619657 +0000
++++ b/Documentation/sysctl/kernel.txt 2018-11-03 16:06:32.703528647 +0000
+@@ -39,6 +39,7 @@
+ - hung_task_timeout_secs
+ - hung_task_warnings
+ - kexec_load_disabled
++- iso_cpu
+ - kptr_restrict
+ - l2cr [ PPC only ]
+ - modprobe ==> Documentation/debugging-modules.txt
+@@ -73,6 +74,7 @@
+ - randomize_va_space
+ - real-root-dev ==> Documentation/admin-guide/initrd.rst
+ - reboot-cmd [ SPARC only ]
++- rr_interval
+ - rtsig-max
+ - rtsig-nr
+ - seccomp/ ==> Documentation/userspace-api/seccomp_filter.rst
+@@ -95,6 +97,7 @@
+ - unknown_nmi_panic
+ - watchdog
+ - watchdog_thresh
++- yield_type
+ - version
+
+ ==============================================================
+@@ -397,6 +400,16 @@
+
+ ==============================================================
+
++iso_cpu: (MuQSS CPU scheduler only).
++
++This sets the percentage cpu that the unprivileged SCHED_ISO tasks can
++run effectively at realtime priority, averaged over a rolling five
++seconds over the -whole- system, meaning all cpus.
++
++Set to 70 (percent) by default.
++
++==============================================================
++
+ l2cr: (PPC only)
+
+ This flag controls the L2 cache of G3 processor boards. If
+@@ -823,6 +836,20 @@
+
+ ==============================================================
+
++rr_interval: (MuQSS CPU scheduler only)
++
++This is the smallest duration that any cpu process scheduling unit
++will run for. Increasing this value can increase throughput of cpu
++bound tasks substantially but at the expense of increased latencies
++overall. Conversely decreasing it will decrease average and maximum
++latencies but at the expense of throughput. This value is in
++milliseconds and the default value chosen depends on the number of
++cpus available at scheduler initialisation with a minimum of 6.
++
++Valid values are from 1-1000.
++
++==============================================================
++
+ rtsig-max & rtsig-nr:
+
+ The file rtsig-max can be used to tune the maximum number
+@@ -1081,3 +1108,13 @@
+ tunable to zero will disable lockup detection altogether.
+
+ ==============================================================
++
++yield_type: (MuQSS CPU scheduler only)
++
++This determines what type of yield calls to sched_yield will perform.
++
++ 0: No yield.
++ 1: Yield only to better priority/deadline tasks. (default)
++ 2: Expire timeslice and recalculate deadline.
++
++==============================================================
+diff -Nur a/fs/proc/base.c b/fs/proc/base.c
+--- a/fs/proc/base.c 2018-10-10 07:54:28.000000000 +0100
++++ b/fs/proc/base.c 2018-11-03 16:06:32.706528743 +0000
+@@ -481,7 +481,7 @@
+ seq_printf(m, "0 0 0\n");
+ else
+ seq_printf(m, "%llu %llu %lu\n",
+- (unsigned long long)task->se.sum_exec_runtime,
++ (unsigned long long)tsk_seruntime(task),
+ (unsigned long long)task->sched_info.run_delay,
+ task->sched_info.pcount);
+
+diff -Nur a/include/linux/init_task.h b/include/linux/init_task.h
+--- a/include/linux/init_task.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/linux/init_task.h 2018-11-03 16:06:32.706528743 +0000
+@@ -172,8 +172,6 @@
+ # define INIT_VTIME(tsk)
+ #endif
+
+-#define INIT_TASK_COMM "swapper"
+-
+ #ifdef CONFIG_RT_MUTEXES
+ # define INIT_RT_MUTEXES(tsk) \
+ .pi_waiters = RB_ROOT_CACHED, \
+@@ -223,6 +221,80 @@
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+ */
++#ifdef CONFIG_SCHED_MUQSS
++#define INIT_TASK_COMM "MuQSS"
++#define INIT_TASK(tsk) \
++{ \
++ INIT_TASK_TI(tsk) \
++ .state = 0, \
++ .stack = init_stack, \
++ .usage = ATOMIC_INIT(2), \
++ .flags = PF_KTHREAD, \
++ .prio = NORMAL_PRIO, \
++ .static_prio = MAX_PRIO-20, \
++ .normal_prio = NORMAL_PRIO, \
++ .deadline = 0, \
++ .policy = SCHED_NORMAL, \
++ .cpus_allowed = CPU_MASK_ALL, \
++ .mm = NULL, \
++ .active_mm = &init_mm, \
++ .restart_block = { \
++ .fn = do_no_restart_syscall, \
++ }, \
++ .time_slice = 1000000, \
++ .tasks = LIST_HEAD_INIT(tsk.tasks), \
++ INIT_PUSHABLE_TASKS(tsk) \
++ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
++ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
++ .real_parent = &tsk, \
++ .parent = &tsk, \
++ .children = LIST_HEAD_INIT(tsk.children), \
++ .sibling = LIST_HEAD_INIT(tsk.sibling), \
++ .group_leader = &tsk, \
++ RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
++ RCU_POINTER_INITIALIZER(cred, &init_cred), \
++ .comm = INIT_TASK_COMM, \
++ .thread = INIT_THREAD, \
++ .fs = &init_fs, \
++ .files = &init_files, \
++ .signal = &init_signals, \
++ .sighand = &init_sighand, \
++ .nsproxy = &init_nsproxy, \
++ .pending = { \
++ .list = LIST_HEAD_INIT(tsk.pending.list), \
++ .signal = {{0}}}, \
++ .blocked = {{0}}, \
++ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
++ .journal_info = NULL, \
++ INIT_CPU_TIMERS(tsk) \
++ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
++ .timer_slack_ns = 50000, /* 50 usec default slack */ \
++ .pids = { \
++ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
++ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
++ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
++ }, \
++ .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
++ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
++ INIT_IDS \
++ INIT_PERF_EVENTS(tsk) \
++ INIT_TRACE_IRQFLAGS \
++ INIT_LOCKDEP \
++ INIT_FTRACE_GRAPH \
++ INIT_TRACE_RECURSION \
++ INIT_TASK_RCU_PREEMPT(tsk) \
++ INIT_TASK_RCU_TASKS(tsk) \
++ INIT_CPUSET_SEQ(tsk) \
++ INIT_RT_MUTEXES(tsk) \
++ INIT_PREV_CPUTIME(tsk) \
++ INIT_VTIME(tsk) \
++ INIT_NUMA_BALANCING(tsk) \
++ INIT_KASAN(tsk) \
++ INIT_LIVEPATCH(tsk) \
++ INIT_TASK_SECURITY \
++}
++#else /* CONFIG_SCHED_MUQSS */
++#define INIT_TASK_COMM "swapper"
+ #define INIT_TASK(tsk) \
+ { \
+ INIT_TASK_TI(tsk) \
+@@ -300,7 +372,7 @@
+ INIT_LIVEPATCH(tsk) \
+ INIT_TASK_SECURITY \
+ }
+-
++#endif /* CONFIG_SCHED_MUQSS */
+
+ /* Attach to the init_task data structure for proper alignment */
+ #define __init_task_data __attribute__((__section__(".data..init_task")))
+diff -Nur a/include/linux/ioprio.h b/include/linux/ioprio.h
+--- a/include/linux/ioprio.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/linux/ioprio.h 2018-11-03 16:06:32.706528743 +0000
+@@ -52,6 +52,8 @@
+ */
+ static inline int task_nice_ioprio(struct task_struct *task)
+ {
++ if (iso_task(task))
++ return 0;
+ return (task_nice(task) + 20) / 5;
+ }
+
+diff -Nur a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
+--- a/include/linux/sched/nohz.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/linux/sched/nohz.h 2018-11-03 16:06:32.707528775 +0000
+@@ -6,7 +6,7 @@
+ * This is the interface between the scheduler and nohz/dynticks:
+ */
+
+-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
++#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS)
+ extern void cpu_load_update_nohz_start(void);
+ extern void cpu_load_update_nohz_stop(void);
+ #else
+@@ -23,7 +23,7 @@
+ static inline void set_cpu_sd_state_idle(void) { }
+ #endif
+
+-#ifdef CONFIG_NO_HZ_COMMON
++#if defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS)
+ void calc_load_nohz_start(void);
+ void calc_load_nohz_stop(void);
+ #else
+diff -Nur a/include/linux/sched/prio.h b/include/linux/sched/prio.h
+--- a/include/linux/sched/prio.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/linux/sched/prio.h 2018-11-03 16:06:32.707528775 +0000
+@@ -20,8 +20,20 @@
+ */
+
+ #define MAX_USER_RT_PRIO 100
++
++#ifdef CONFIG_SCHED_MUQSS
++/* Note different MAX_RT_PRIO */
++#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1)
++
++#define ISO_PRIO (MAX_RT_PRIO)
++#define NORMAL_PRIO (MAX_RT_PRIO + 1)
++#define IDLE_PRIO (MAX_RT_PRIO + 2)
++#define PRIO_LIMIT ((IDLE_PRIO) + 1)
++#else /* CONFIG_SCHED_MUQSS */
+ #define MAX_RT_PRIO MAX_USER_RT_PRIO
+
++#endif /* CONFIG_SCHED_MUQSS */
++
+ #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
+ #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
+
+diff -Nur a/include/linux/sched/task.h b/include/linux/sched/task.h
+--- a/include/linux/sched/task.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/linux/sched/task.h 2018-11-03 16:06:32.707528775 +0000
+@@ -80,7 +80,7 @@
+ extern void free_task(struct task_struct *tsk);
+
+ /* sched_exec is called by processes performing an exec */
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_MUQSS)
+ extern void sched_exec(void);
+ #else
+ #define sched_exec() {}
+diff -Nur a/include/linux/sched.h b/include/linux/sched.h
+--- a/include/linux/sched.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/linux/sched.h 2018-11-03 16:06:32.707528775 +0000
+@@ -27,6 +27,9 @@
+ #include <linux/signal_types.h>
+ #include <linux/mm_types_task.h>
+ #include <linux/task_io_accounting.h>
++#ifdef CONFIG_SCHED_MUQSS
++#include <linux/skip_list.h>
++#endif
+
+ /* task_struct member predeclarations (sorted alphabetically): */
+ struct audit_context;
+@@ -579,9 +582,11 @@
+ unsigned int flags;
+ unsigned int ptrace;
+
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_MUQSS)
++ int on_cpu;
++#endif
+ #ifdef CONFIG_SMP
+ struct llist_node wake_entry;
+- int on_cpu;
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+ /* Current CPU: */
+ unsigned int cpu;
+@@ -598,10 +603,25 @@
+ int static_prio;
+ int normal_prio;
+ unsigned int rt_priority;
++#ifdef CONFIG_SCHED_MUQSS
++ int time_slice;
++ u64 deadline;
++ skiplist_node node; /* Skip list node */
++ u64 last_ran;
++ u64 sched_time; /* sched_clock time spent running */
++#ifdef CONFIG_SMT_NICE
++ int smt_bias; /* Policy/nice level bias across smt siblings */
++#endif
++#ifdef CONFIG_HOTPLUG_CPU
++ bool zerobound; /* Bound to CPU0 for hotplug */
++#endif
++ unsigned long rt_timeout;
++#else /* CONFIG_SCHED_MUQSS */
+
+ const struct sched_class *sched_class;
+ struct sched_entity se;
+ struct sched_rt_entity rt;
++#endif
+ #ifdef CONFIG_CGROUP_SCHED
+ struct task_group *sched_task_group;
+ #endif
+@@ -751,6 +771,10 @@
+ u64 utimescaled;
+ u64 stimescaled;
+ #endif
++#ifdef CONFIG_SCHED_MUQSS
++ /* Unbanked cpu time */
++ unsigned long utime_ns, stime_ns;
++#endif
+ u64 gtime;
+ struct prev_cputime prev_cputime;
+ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+@@ -1155,6 +1179,40 @@
+ */
+ };
+
++#ifdef CONFIG_SCHED_MUQSS
++#define tsk_seruntime(t) ((t)->sched_time)
++#define tsk_rttimeout(t) ((t)->rt_timeout)
++
++static inline void tsk_cpus_current(struct task_struct *p)
++{
++}
++
++void print_scheduler_version(void);
++
++static inline bool iso_task(struct task_struct *p)
++{
++ return (p->policy == SCHED_ISO);
++}
++#else /* CFS */
++#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t) ((t)->rt.timeout)
++
++static inline void tsk_cpus_current(struct task_struct *p)
++{
++ p->nr_cpus_allowed = current->nr_cpus_allowed;
++}
++
++static inline void print_scheduler_version(void)
++{
++ printk(KERN_INFO "CFS CPU scheduler.\n");
++}
++
++static inline bool iso_task(struct task_struct *p)
++{
++ return false;
++}
++#endif /* CONFIG_SCHED_MUQSS */
++
+ static inline struct pid *task_pid(struct task_struct *task)
+ {
+ return task->pids[PIDTYPE_PID].pid;
+diff -Nur a/include/linux/skip_list.h b/include/linux/skip_list.h
+--- a/include/linux/skip_list.h 1970-01-01 01:00:00.000000000 +0100
++++ b/include/linux/skip_list.h 2018-11-03 16:06:32.708528807 +0000
+@@ -0,0 +1,33 @@
++#ifndef _LINUX_SKIP_LISTS_H
++#define _LINUX_SKIP_LISTS_H
++typedef u64 keyType;
++typedef void *valueType;
++
++typedef struct nodeStructure skiplist_node;
++
++struct nodeStructure {
++ int level; /* Levels in this structure */
++ keyType key;
++ valueType value;
++ skiplist_node *next[8];
++ skiplist_node *prev[8];
++};
++
++typedef struct listStructure {
++ int entries;
++ int level; /* Maximum level of the list
++ (1 more than the number of levels in the list) */
++ skiplist_node *header; /* pointer to header */
++} skiplist;
++
++void skiplist_init(skiplist_node *slnode);
++skiplist *new_skiplist(skiplist_node *slnode);
++void free_skiplist(skiplist *l);
++void skiplist_node_init(skiplist_node *node);
++void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed);
++void skiplist_delete(skiplist *l, skiplist_node *node);
++
++static inline bool skiplist_node_empty(skiplist_node *node) {
++ return (!node->next[0]);
++}
++#endif /* _LINUX_SKIP_LISTS_H */
+diff -Nur a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
+--- a/include/uapi/linux/sched.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/uapi/linux/sched.h 2018-11-03 16:06:32.708528807 +0000
+@@ -37,9 +37,16 @@
+ #define SCHED_FIFO 1
+ #define SCHED_RR 2
+ #define SCHED_BATCH 3
+-/* SCHED_ISO: reserved but not implemented yet */
++/* SCHED_ISO: Implemented on MuQSS only */
+ #define SCHED_IDLE 5
++#ifdef CONFIG_SCHED_MUQSS
++#define SCHED_ISO 4
++#define SCHED_IDLEPRIO SCHED_IDLE
++#define SCHED_MAX (SCHED_IDLEPRIO)
++#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX)
++#else /* CONFIG_SCHED_MUQSS */
+ #define SCHED_DEADLINE 6
++#endif /* CONFIG_SCHED_MUQSS */
+
+ /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
+ #define SCHED_RESET_ON_FORK 0x40000000
+diff -Nur a/init/Kconfig b/init/Kconfig
+--- a/init/Kconfig 2018-11-03 16:00:51.921620552 +0000
++++ b/init/Kconfig 2018-11-03 16:06:32.709528839 +0000
+@@ -38,6 +38,18 @@
+
+ menu "General setup"
+
++config SCHED_MUQSS
++ bool "MuQSS cpu scheduler"
++ select HIGH_RES_TIMERS
++ ---help---
++ The Multiple Queue Skiplist Scheduler for excellent interactivity and
++ responsiveness on the desktop and highly scalable deterministic
++ low latency on any hardware.
++
++ Say Y here.
++ default y
++
++
+ config BROKEN
+ bool
+
+@@ -621,6 +633,7 @@
+ depends on ARCH_SUPPORTS_NUMA_BALANCING
+ depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ depends on SMP && NUMA && MIGRATION
++ depends on !SCHED_MUQSS
+ help
+ This option adds support for automatic NUMA aware memory/task placement.
+ The mechanism is quite primitive and is based on migrating memory when
+@@ -723,9 +736,13 @@
+ help
+ This feature lets CPU scheduler recognize task groups and control CPU
+ bandwidth allocation to such task groups. It uses cgroups to group
+- tasks.
++ tasks. In combination with MuQSS this is purely a STUB to create the
++ files associated with the CPU controller cgroup but most of the
++ controls do nothing. This is useful for working in environments and
++ with applications that will only work if this control group is
++ present.
+
+-if CGROUP_SCHED
++if CGROUP_SCHED && !SCHED_MUQSS
+ config FAIR_GROUP_SCHED
+ bool "Group scheduling for SCHED_OTHER"
+ depends on CGROUP_SCHED
+@@ -832,6 +849,7 @@
+
+ config CGROUP_CPUACCT
+ bool "Simple CPU accounting controller"
++ depends on !SCHED_MUQSS
+ help
+ Provides a simple controller for monitoring the
+ total CPU consumed by the tasks in a cgroup.
+@@ -950,6 +968,7 @@
+
+ config SCHED_AUTOGROUP
+ bool "Automatic process group scheduling"
++ depends on !SCHED_MUQSS
+ select CGROUPS
+ select CGROUP_SCHED
+ select FAIR_GROUP_SCHED
+diff -Nur a/init/main.c b/init/main.c
+--- a/init/main.c 2018-10-10 07:54:28.000000000 +0100
++++ b/init/main.c 2018-11-03 16:06:32.709528839 +0000
+@@ -841,7 +841,6 @@
+ return ret;
+ }
+
+-
+ extern initcall_t __initcall_start[];
+ extern initcall_t __initcall0_start[];
+ extern initcall_t __initcall1_start[];
+@@ -1008,6 +1007,8 @@
+
+ rcu_end_inkernel_boot();
+
++ print_scheduler_version();
++
+ if (ramdisk_execute_command) {
+ ret = run_init_process(ramdisk_execute_command);
+ if (!ret)
+diff -Nur a/kernel/delayacct.c b/kernel/delayacct.c
+--- a/kernel/delayacct.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/delayacct.c 2018-11-03 16:06:32.710528871 +0000
+@@ -115,7 +115,7 @@
+ */
+ t1 = tsk->sched_info.pcount;
+ t2 = tsk->sched_info.run_delay;
+- t3 = tsk->se.sum_exec_runtime;
++ t3 = tsk_seruntime(tsk);
+
+ d->cpu_count += t1;
+
+diff -Nur a/kernel/exit.c b/kernel/exit.c
+--- a/kernel/exit.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/exit.c 2018-11-03 16:06:32.710528871 +0000
+@@ -129,7 +129,7 @@
+ sig->curr_target = next_thread(tsk);
+ }
+
+- add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++ add_device_randomness((const void*) &tsk_seruntime(tsk),
+ sizeof(unsigned long long));
+
+ /*
+@@ -150,7 +150,7 @@
+ sig->inblock += task_io_get_inblock(tsk);
+ sig->oublock += task_io_get_oublock(tsk);
+ task_io_accounting_add(&sig->ioac, &tsk->ioac);
+- sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
++ sig->sum_sched_runtime += tsk_seruntime(tsk);
+ sig->nr_threads--;
+ __unhash_process(tsk, group_dead);
+ write_sequnlock(&sig->stats_lock);
+diff -Nur a/kernel/kthread.c b/kernel/kthread.c
+--- a/kernel/kthread.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/kthread.c 2018-11-03 16:06:32.711528903 +0000
+@@ -410,6 +410,34 @@
+ }
+ EXPORT_SYMBOL(kthread_bind);
+
++#if defined(CONFIG_SCHED_MUQSS) && defined(CONFIG_SMP)
++extern void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
++
++/*
++ * new_kthread_bind is a special variant of __kthread_bind_mask.
++ * For new threads to work on muqss we want to call do_set_cpus_allowed
++ * without the task_cpu being set and the task rescheduled until they're
++ * rescheduled on their own so we call __do_set_cpus_allowed directly which
++ * only changes the cpumask. This is particularly important for smpboot threads
++ * to work.
++ */
++static void new_kthread_bind(struct task_struct *p, unsigned int cpu)
++{
++ unsigned long flags;
++
++ if (WARN_ON(!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)))
++ return;
++
++ /* It's safe because the task is inactive. */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ __do_set_cpus_allowed(p, cpumask_of(cpu));
++ p->flags |= PF_NO_SETAFFINITY;
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++#else
++#define new_kthread_bind(p, cpu) kthread_bind(p, cpu)
++#endif
++
+ /**
+ * kthread_create_on_cpu - Create a cpu bound kthread
+ * @threadfn: the function to run until signal_pending(current).
+@@ -431,7 +459,7 @@
+ cpu);
+ if (IS_ERR(p))
+ return p;
+- kthread_bind(p, cpu);
++ new_kthread_bind(p, cpu);
+ /* CPU hotplug need to bind once again when unparking the thread. */
+ set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
+ to_kthread(p)->cpu = cpu;
+diff -Nur a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
+--- a/kernel/livepatch/transition.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/livepatch/transition.c 2018-11-03 16:06:32.711528903 +0000
+@@ -277,6 +277,12 @@
+ return 0;
+ }
+
++#ifdef CONFIG_SCHED_MUQSS
++typedef unsigned long rq_flags_t;
++#else
++typedef struct rq_flags rq_flag_t;
++#endif
++
+ /*
+ * Try to safely switch a task to the target patch state. If it's currently
+ * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
+@@ -285,7 +291,7 @@
+ static bool klp_try_switch_task(struct task_struct *task)
+ {
+ struct rq *rq;
+- struct rq_flags flags;
++ rq_flags_t flags;
+ int ret;
+ bool success = false;
+ char err_buf[STACK_ERR_BUF_SIZE];
+diff -Nur a/kernel/Makefile b/kernel/Makefile
+--- a/kernel/Makefile 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/Makefile 2018-11-03 16:06:32.709528839 +0000
+@@ -10,7 +10,7 @@
+ extable.o params.o \
+ kthread.o sys_ni.o nsproxy.o \
+ notifier.o ksysfs.o cred.o reboot.o \
+- async.o range.o smpboot.o ucount.o
++ async.o range.o smpboot.o ucount.o skip_list.o
+
+ obj-$(CONFIG_MODULES) += kmod.o
+ obj-$(CONFIG_MULTIUSER) += groups.o
+diff -Nur a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
+--- a/kernel/rcu/Kconfig 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/rcu/Kconfig 2018-11-03 16:06:32.711528903 +0000
+@@ -93,7 +93,7 @@
+ config CONTEXT_TRACKING_FORCE
+ bool "Force context tracking"
+ depends on CONTEXT_TRACKING
+- default y if !NO_HZ_FULL
++ default y if !NO_HZ_FULL && !SCHED_MUQSS
+ help
+ The major pre-requirement for full dynticks to work is to
+ support the context tracking subsystem. But there are also
+diff -Nur a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+--- a/kernel/sched/cpufreq_schedutil.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/sched/cpufreq_schedutil.c 2018-11-03 16:06:32.716529064 +0000
+@@ -176,6 +176,17 @@
+ return cpufreq_driver_resolve_freq(policy, freq);
+ }
+
++#ifdef CONFIG_SCHED_MUQSS
++static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ *util = rq->load_avg;
++ if (*util > SCHED_CAPACITY_SCALE)
++ *util = SCHED_CAPACITY_SCALE;
++ *max = SCHED_CAPACITY_SCALE;
++}
++#else /* CONFIG_SCHED_MUQSS */
+ static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
+ {
+ struct rq *rq = cpu_rq(cpu);
+@@ -186,6 +197,7 @@
+ *util = min(rq->cfs.avg.util_avg, cfs_max);
+ *max = cfs_max;
+ }
++#endif /* CONFIG_SCHED_MUQSS */
+
+ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
+ unsigned int flags)
+diff -Nur a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+--- a/kernel/sched/cputime.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/sched/cputime.c 2018-11-03 16:06:32.716529064 +0000
+@@ -270,26 +270,6 @@
+ return accounted;
+ }
+
+-#ifdef CONFIG_64BIT
+-static inline u64 read_sum_exec_runtime(struct task_struct *t)
+-{
+- return t->se.sum_exec_runtime;
+-}
+-#else
+-static u64 read_sum_exec_runtime(struct task_struct *t)
+-{
+- u64 ns;
+- struct rq_flags rf;
+- struct rq *rq;
+-
+- rq = task_rq_lock(t, &rf);
+- ns = t->se.sum_exec_runtime;
+- task_rq_unlock(rq, t, &rf);
+-
+- return ns;
+-}
+-#endif
+-
+ /*
+ * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
+ * tasks (sum on group iteration) belonging to @tsk's group.
+@@ -661,7 +641,7 @@
+ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+ {
+ struct task_cputime cputime = {
+- .sum_exec_runtime = p->se.sum_exec_runtime,
++ .sum_exec_runtime = tsk_seruntime(p),
+ };
+
+ task_cputime(p, &cputime.utime, &cputime.stime);
+diff -Nur a/kernel/sched/idle.c b/kernel/sched/idle.c
+--- a/kernel/sched/idle.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/sched/idle.c 2018-11-03 16:06:32.716529064 +0000
+@@ -209,6 +209,9 @@
+ */
+ static void do_idle(void)
+ {
++ int cpu = smp_processor_id();
++ bool pending = false;
++
+ /*
+ * If the arch has a polling bit, we maintain an invariant:
+ *
+@@ -220,13 +223,16 @@
+
+ __current_set_polling();
+ quiet_vmstat();
+- tick_nohz_idle_enter();
++ if (unlikely(softirq_pending(cpu)))
++ pending = true;
++ else
++ tick_nohz_idle_enter();
+
+ while (!need_resched()) {
+ check_pgt_cache();
+ rmb();
+
+- if (cpu_is_offline(smp_processor_id())) {
++ if (cpu_is_offline(cpu)) {
+ cpuhp_report_idle_dead();
+ arch_cpu_idle_dead();
+ }
+@@ -255,7 +261,8 @@
+ * an IPI to fold the state for us.
+ */
+ preempt_set_need_resched();
+- tick_nohz_idle_exit();
++ if (!pending)
++ tick_nohz_idle_exit();
+ __current_clr_polling();
+
+ /*
+diff -Nur a/kernel/sched/Makefile b/kernel/sched/Makefile
+--- a/kernel/sched/Makefile 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/sched/Makefile 2018-11-03 16:06:32.711528903 +0000
+@@ -16,14 +16,20 @@
+ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
+ endif
+
+-obj-y += core.o loadavg.o clock.o cputime.o
++ifdef CONFIG_SCHED_MUQSS
++obj-y += MuQSS.o clock.o
++else
++obj-y += core.o loadavg.o clock.o
+ obj-y += idle_task.o fair.o rt.o deadline.o
+-obj-y += wait.o wait_bit.o swait.o completion.o idle.o
+-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
++obj-$(CONFIG_SMP) += cpudeadline.o stop_task.o
+ obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
+-obj-$(CONFIG_SCHEDSTATS) += stats.o
+ obj-$(CONFIG_SCHED_DEBUG) += debug.o
+ obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
++endif
++obj-y += cputime.o
++obj-y += wait.o wait_bit.o swait.o completion.o idle.o
++obj-$(CONFIG_SMP) += cpupri.o topology.o
++obj-$(CONFIG_SCHEDSTATS) += stats.o
+ obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
+ obj-$(CONFIG_MEMBARRIER) += membarrier.o
+diff -Nur a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
+--- a/kernel/sched/MuQSS.c 1970-01-01 01:00:00.000000000 +0100
++++ b/kernel/sched/MuQSS.c 2018-11-03 16:06:32.715529032 +0000
+@@ -0,0 +1,6923 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * kernel/sched/MuQSS.c, was kernel/sched.c
++ *
++ * Kernel scheduler and related syscalls
++ *
++ * Copyright (C) 1991-2002 Linus Torvalds
++ *
++ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
++ * make semaphores SMP safe
++ * 1998-11-19 Implemented schedule_timeout() and related stuff
++ * by Andrea Arcangeli
++ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
++ * hybrid priority-list and round-robin design with
++ * an array-switch method of distributing timeslices
++ * and per-CPU runqueues. Cleanups and useful suggestions
++ * by Davide Libenzi, preemptible kernel bits by Robert Love.
++ * 2003-09-03 Interactivity tuning by Con Kolivas.
++ * 2004-04-02 Scheduler domains code by Nick Piggin
++ * 2007-04-15 Work begun on replacing all interactivity tuning with a
++ * fair scheduling design by Con Kolivas.
++ * 2007-05-05 Load balancing (smp-nice) and other improvements
++ * by Peter Williams
++ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
++ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
++ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
++ * Thomas Gleixner, Mike Kravetz
++ * 2009-08-13 Brainfuck deadline scheduling policy by Con Kolivas deletes
++ * a whole lot of those previous things.
++ * 2016-10-01 Multiple Queue Skiplist Scheduler scalable evolution of BFS
++ * scheduler by Con Kolivas.
++ */
++
++#include <linux/sched.h>
++#include <linux/sched/clock.h>
++#include <uapi/linux/sched/types.h>
++#include <linux/sched/loadavg.h>
++#include <linux/sched/hotplug.h>
++#include <linux/wait_bit.h>
++#include <linux/cpuset.h>
++#include <linux/delayacct.h>
++#include <linux/init_task.h>
++#include <linux/binfmts.h>
++#include <linux/context_tracking.h>
++#include <linux/rcupdate_wait.h>
++#include <linux/skip_list.h>
++
++#include <linux/blkdev.h>
++#include <linux/kprobes.h>
++#include <linux/mmu_context.h>
++#include <linux/module.h>
++#include <linux/nmi.h>
++#include <linux/prefetch.h>
++#include <linux/profile.h>
++#include <linux/security.h>
++#include <linux/syscalls.h>
++#include <linux/tick.h>
++
++#include <asm/switch_to.h>
++#include <asm/tlb.h>
++#ifdef CONFIG_PARAVIRT
++#include <asm/paravirt.h>
++#endif
++
++#include "../workqueue_internal.h"
++#include "../smpboot.h"
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++
++#include "MuQSS.h"
++
++#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
++#define rt_task(p) rt_prio((p)->prio)
++#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
++#define is_rt_policy(policy) ((policy) == SCHED_FIFO || \
++ (policy) == SCHED_RR)
++#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
++
++#define is_idle_policy(policy) ((policy) == SCHED_IDLEPRIO)
++#define idleprio_task(p) unlikely(is_idle_policy((p)->policy))
++#define task_running_idle(p) unlikely((p)->prio == IDLE_PRIO)
++
++#define is_iso_policy(policy) ((policy) == SCHED_ISO)
++#define iso_task(p) unlikely(is_iso_policy((p)->policy))
++#define task_running_iso(p) unlikely((p)->prio == ISO_PRIO)
++
++#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT)
++
++#define ISO_PERIOD (5 * HZ)
++
++#define STOP_PRIO (MAX_RT_PRIO - 1)
++
++/*
++ * Some helpers for converting to/from various scales. Use shifts to get
++ * approximate multiples of ten for less overhead.
++ */
++#define JIFFIES_TO_NS(TIME) ((TIME) * (1073741824 / HZ))
++#define JIFFY_NS (1073741824 / HZ)
++#define JIFFY_US (1048576 / HZ)
++#define NS_TO_JIFFIES(TIME) ((TIME) / JIFFY_NS)
++#define HALF_JIFFY_NS (1073741824 / HZ / 2)
++#define HALF_JIFFY_US (1048576 / HZ / 2)
++#define MS_TO_NS(TIME) ((TIME) << 20)
++#define MS_TO_US(TIME) ((TIME) << 10)
++#define NS_TO_MS(TIME) ((TIME) >> 20)
++#define NS_TO_US(TIME) ((TIME) >> 10)
++#define US_TO_NS(TIME) ((TIME) << 10)
++
++#define RESCHED_US (100) /* Reschedule if less than this many μs left */
++
++void print_scheduler_version(void)
++{
++ printk(KERN_INFO "MuQSS CPU scheduler v0.162 by Con Kolivas.\n");
++}
++
++/*
++ * This is the time all tasks within the same priority round robin.
++ * Value is in ms and set to a minimum of 6ms.
++ * Tunable via /proc interface.
++ */
++int rr_interval __read_mostly = 6;
++
++/*
++ * Tunable to choose whether to prioritise latency or throughput, simple
++ * binary yes or no
++ */
++int sched_interactive __read_mostly = 1;
++
++/*
++ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks
++ * are allowed to run five seconds as real time tasks. This is the total over
++ * all online cpus.
++ */
++int sched_iso_cpu __read_mostly = 70;
++
++/*
++ * sched_yield_type - Choose what sort of yield sched_yield will perform.
++ * 0: No yield.
++ * 1: Yield only to better priority/deadline tasks. (default)
++ * 2: Expire timeslice and recalculate deadline.
++ */
++int sched_yield_type __read_mostly = 1;
++
++/*
++ * The relative length of deadline for each priority(nice) level.
++ */
++static int prio_ratios[NICE_WIDTH] __read_mostly;
++
++/*
++ * The quota handed out to tasks of all priority levels when refilling their
++ * time_slice.
++ */
++static inline int timeslice(void)
++{
++ return MS_TO_US(rr_interval);
++}
++
++#ifdef CONFIG_SMP
++static cpumask_t cpu_idle_map ____cacheline_aligned_in_smp;
++#endif
++
++/* CPUs with isolated domains */
++cpumask_var_t cpu_isolated_map;
++
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#ifdef CONFIG_SMP
++struct rq *cpu_rq(int cpu)
++{
++ return &per_cpu(runqueues, (cpu));
++}
++#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
++
++/*
++ * For asym packing, by default the lower numbered cpu has higher priority.
++ */
++int __weak arch_asym_cpu_priority(int cpu)
++{
++ return -cpu;
++}
++
++int __weak arch_sd_sibling_asym_packing(void)
++{
++ return 0*SD_ASYM_PACKING;
++}
++#else
++struct rq *uprq;
++#endif /* CONFIG_SMP */
++
++#include "stats.h"
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next) do { } while (0)
++#endif
++#ifndef finish_arch_switch
++# define finish_arch_switch(prev) do { } while (0)
++#endif
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch() do { } while (0)
++#endif
++
++/*
++ * All common locking functions performed on rq->lock. rq->clock is local to
++ * the CPU accessing it so it can be modified just with interrupts disabled
++ * when we're not updating niffies.
++ * Looking up task_rq must be done under rq->lock to be safe.
++ */
++
++/*
++ * RQ-clock updating methods:
++ */
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++ s64 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++ /*
++ * Since irq_time is only updated on {soft,}irq_exit, we might run into
++ * this case when a previous update_rq_clock() happened inside a
++ * {soft,}irq region.
++ *
++ * When this happens, we stop ->clock_task and only update the
++ * prev_irq_time stamp to account for the part that fit, so that a next
++ * update will consume the rest. This ensures ->clock_task is
++ * monotonic.
++ *
++ * It does however cause some slight miss-attribution of {soft,}irq
++ * time, a more accurate solution would be to update the irq_time using
++ * the current rq->clock timestamp, except that would require using
++ * atomic ops.
++ */
++ if (irq_delta > delta)
++ irq_delta = delta;
++
++ rq->prev_irq_time += irq_delta;
++ delta -= irq_delta;
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++ if (static_key_false((&paravirt_steal_rq_enabled))) {
++ s64 steal = paravirt_steal_clock(cpu_of(rq));
++
++ steal -= rq->prev_steal_time_rq;
++
++ if (unlikely(steal > delta))
++ steal = delta;
++
++ rq->prev_steal_time_rq += steal;
++
++ delta -= steal;
++ }
++#endif
++ rq->clock_task += delta;
++}
++
++static inline void update_rq_clock(struct rq *rq)
++{
++ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++
++ if (unlikely(delta < 0))
++ return;
++ rq->clock += delta;
++ update_rq_clock_task(rq, delta);
++}
++
++/*
++ * Niffies are a globally increasing nanosecond counter. They're only used by
++ * update_load_avg and time_slice_expired, however deadlines are based on them
++ * across CPUs. Update them whenever we will call one of those functions, and
++ * synchronise them across CPUs whenever we hold both runqueue locks.
++ */
++static inline void update_clocks(struct rq *rq)
++{
++ s64 ndiff, minndiff;
++ long jdiff;
++
++ update_rq_clock(rq);
++ ndiff = rq->clock - rq->old_clock;
++ rq->old_clock = rq->clock;
++ jdiff = jiffies - rq->last_jiffy;
++
++ /* Subtract any niffies added by balancing with other rqs */
++ ndiff -= rq->niffies - rq->last_niffy;
++ minndiff = JIFFIES_TO_NS(jdiff) - rq->niffies + rq->last_jiffy_niffies;
++ if (minndiff < 0)
++ minndiff = 0;
++ ndiff = max(ndiff, minndiff);
++ rq->niffies += ndiff;
++ rq->last_niffy = rq->niffies;
++ if (jdiff) {
++ rq->last_jiffy += jdiff;
++ rq->last_jiffy_niffies = rq->niffies;
++ }
++}
++
++static inline int task_on_rq_queued(struct task_struct *p)
++{
++ return p->on_rq == TASK_ON_RQ_QUEUED;
++}
++
++static inline int task_on_rq_migrating(struct task_struct *p)
++{
++ return p->on_rq == TASK_ON_RQ_MIGRATING;
++}
++
++static inline int rq_trylock(struct rq *rq)
++ __acquires(rq->lock)
++{
++ return raw_spin_trylock(&rq->lock);
++}
++
++/*
++ * Any time we have two runqueues locked we use that as an opportunity to
++ * synchronise niffies to the highest value as idle ticks may have artificially
++ * kept niffies low on one CPU and the truth can only be later.
++ */
++static inline void synchronise_niffies(struct rq *rq1, struct rq *rq2)
++{
++ if (rq1->niffies > rq2->niffies)
++ rq2->niffies = rq1->niffies;
++ else
++ rq1->niffies = rq2->niffies;
++}
++
++/*
++ * double_rq_lock - safely lock two runqueues
++ *
++ * Note this does not disable interrupts like task_rq_lock,
++ * you need to do so manually before calling.
++ */
++
++/* For when we know rq1 != rq2 */
++static inline void __double_rq_lock(struct rq *rq1, struct rq *rq2)
++ __acquires(rq1->lock)
++ __acquires(rq2->lock)
++{
++ if (rq1 < rq2) {
++ raw_spin_lock(&rq1->lock);
++ raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
++ } else {
++ raw_spin_lock(&rq2->lock);
++ raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
++ }
++}
++
++static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
++ __acquires(rq1->lock)
++ __acquires(rq2->lock)
++{
++ BUG_ON(!irqs_disabled());
++ if (rq1 == rq2) {
++ raw_spin_lock(&rq1->lock);
++ __acquire(rq2->lock); /* Fake it out ;) */
++ } else
++ __double_rq_lock(rq1, rq2);
++ synchronise_niffies(rq1, rq2);
++}
++
++/*
++ * double_rq_unlock - safely unlock two runqueues
++ *
++ * Note this does not restore interrupts like task_rq_unlock,
++ * you need to do so manually after calling.
++ */
++static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
++ __releases(rq1->lock)
++ __releases(rq2->lock)
++{
++ raw_spin_unlock(&rq1->lock);
++ if (rq1 != rq2)
++ raw_spin_unlock(&rq2->lock);
++ else
++ __release(rq2->lock);
++}
++
++static inline void lock_all_rqs(void)
++{
++ int cpu;
++
++ preempt_disable();
++ for_each_possible_cpu(cpu) {
++ struct rq *rq = cpu_rq(cpu);
++
++ do_raw_spin_lock(&rq->lock);
++ }
++}
++
++static inline void unlock_all_rqs(void)
++{
++ int cpu;
++
++ for_each_possible_cpu(cpu) {
++ struct rq *rq = cpu_rq(cpu);
++
++ do_raw_spin_unlock(&rq->lock);
++ }
++ preempt_enable();
++}
++
++/* Specially nest trylock an rq */
++static inline bool trylock_rq(struct rq *this_rq, struct rq *rq)
++{
++ if (unlikely(!do_raw_spin_trylock(&rq->lock)))
++ return false;
++ spin_acquire(&rq->lock.dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++ synchronise_niffies(this_rq, rq);
++ return true;
++}
++
++/* Unlock a specially nested trylocked rq */
++static inline void unlock_rq(struct rq *rq)
++{
++ spin_release(&rq->lock.dep_map, 1, _RET_IP_);
++ do_raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * cmpxchg based fetch_or, macro so it works for different integer types
++ */
++#define fetch_or(ptr, mask) \
++ ({ \
++ typeof(ptr) _ptr = (ptr); \
++ typeof(mask) _mask = (mask); \
++ typeof(*_ptr) _old, _val = *_ptr; \
++ \
++ for (;;) { \
++ _old = cmpxchg(_ptr, _val, _val | _mask); \
++ if (_old == _val) \
++ break; \
++ _val = _old; \
++ } \
++ _old; \
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++ struct thread_info *ti = task_thread_info(p);
++ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++ struct thread_info *ti = task_thread_info(p);
++ typeof(ti->flags) old, val = READ_ONCE(ti->flags);
++
++ for (;;) {
++ if (!(val & _TIF_POLLING_NRFLAG))
++ return false;
++ if (val & _TIF_NEED_RESCHED)
++ return true;
++ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
++ if (old == val)
++ break;
++ val = old;
++ }
++ return true;
++}
++
++#else
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++ set_tsk_need_resched(p);
++ return true;
++}
++
++#ifdef CONFIG_SMP
++static bool set_nr_if_polling(struct task_struct *p)
++{
++ return false;
++}
++#endif
++#endif
++
++void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++ struct wake_q_node *node = &task->wake_q;
++
++ /*
++ * Atomically grab the task, if ->wake_q is !nil already it means
++ * its already queued (either by us or someone else) and will get the
++ * wakeup due to that.
++ *
++ * This cmpxchg() implies a full barrier, which pairs with the write
++ * barrier implied by the wakeup in wake_up_q().
++ */
++ if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
++ return;
++
++ get_task_struct(task);
++
++ /*
++ * The head is context local, there can be no concurrency.
++ */
++ *head->lastp = node;
++ head->lastp = &node->next;
++}
++
++void wake_up_q(struct wake_q_head *head)
++{
++ struct wake_q_node *node = head->first;
++
++ while (node != WAKE_Q_TAIL) {
++ struct task_struct *task;
++
++ task = container_of(node, struct task_struct, wake_q);
++ BUG_ON(!task);
++ /* Task can safely be re-inserted now */
++ node = node->next;
++ task->wake_q.next = NULL;
++
++ /*
++ * wake_up_process() implies a wmb() to pair with the queueing
++ * in wake_q_add() so as not to miss wakeups.
++ */
++ wake_up_process(task);
++ put_task_struct(task);
++ }
++}
++
++static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++ next->on_cpu = 1;
++}
++
++static inline void smp_sched_reschedule(int cpu)
++{
++ if (likely(cpu_online(cpu)))
++ smp_send_reschedule(cpu);
++}
++
++/*
++ * resched_task - mark a task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++void resched_task(struct task_struct *p)
++{
++ int cpu;
++#ifdef CONFIG_LOCKDEP
++ /* Kernel threads call this when creating workqueues while still
++ * inactive from __kthread_bind_mask, holding only the pi_lock */
++ if (!(p->flags & PF_KTHREAD)) {
++ struct rq *rq = task_rq(p);
++
++ lockdep_assert_held(&rq->lock);
++ }
++#endif
++ if (test_tsk_need_resched(p))
++ return;
++
++ cpu = task_cpu(p);
++ if (cpu == smp_processor_id()) {
++ set_tsk_need_resched(p);
++ set_preempt_need_resched();
++ return;
++ }
++
++ if (set_nr_and_not_polling(p))
++ smp_sched_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++}
++
++/*
++ * A task that is not running or queued will not have a node set.
++ * A task that is queued but not running will have a node set.
++ * A task that is currently running will have ->on_cpu set but no node set.
++ */
++static inline bool task_queued(struct task_struct *p)
++{
++ return !skiplist_node_empty(&p->node);
++}
++
++static void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
++static inline void resched_if_idle(struct rq *rq);
++
++/* Dodgy workaround till we figure out where the softirqs are going */
++static inline void do_pending_softirq(struct rq *rq, struct task_struct *next)
++{
++ if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt()))
++ do_softirq_own_stack();
++}
++
++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++ /*
++ * After ->on_cpu is cleared, the task can be moved to a different CPU.
++ * We must ensure this doesn't happen until the switch is completely
++ * finished.
++ *
++ * In particular, the load of prev->state in finish_task_switch() must
++ * happen before this.
++ *
++ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
++ */
++ smp_store_release(&prev->on_cpu, 0);
++#endif
++#ifdef CONFIG_DEBUG_SPINLOCK
++ /* this is a valid case when another task releases the spinlock */
++ rq->lock.owner = current;
++#endif
++ /*
++ * If we are tracking spinlock dependencies then we have to
++ * fix up the runqueue lock - which gets 'carried over' from
++ * prev into current:
++ */
++ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++
++#ifdef CONFIG_SMP
++ /*
++ * If prev was marked as migrating to another CPU in return_task, drop
++ * the local runqueue lock but leave interrupts disabled and grab the
++ * remote lock we're migrating it to before enabling them.
++ */
++ if (unlikely(task_on_rq_migrating(prev))) {
++ sched_info_dequeued(rq, prev);
++ /*
++ * We move the ownership of prev to the new cpu now. ttwu can't
++ * activate prev to the wrong cpu since it has to grab this
++ * runqueue in ttwu_remote.
++ */
++#ifdef CONFIG_THREAD_INFO_IN_TASK
++ prev->cpu = prev->wake_cpu;
++#else
++ task_thread_info(prev)->cpu = prev->wake_cpu;
++#endif
++ raw_spin_unlock(&rq->lock);
++
++ raw_spin_lock(&prev->pi_lock);
++ rq = __task_rq_lock(prev);
++ /* Check that someone else hasn't already queued prev */
++ if (likely(!task_queued(prev))) {
++ enqueue_task(rq, prev, 0);
++ prev->on_rq = TASK_ON_RQ_QUEUED;
++ /* Wake up the CPU if it's not already running */
++ resched_if_idle(rq);
++ }
++ raw_spin_unlock(&prev->pi_lock);
++ }
++#endif
++ /* Accurately set nr_running here for load average calculations */
++ rq->nr_running = rq->sl->entries + !rq_idle(rq);
++ rq_unlock(rq);
++
++ do_pending_softirq(rq, current);
++
++ local_irq_enable();
++}
++
++static inline bool deadline_before(u64 deadline, u64 time)
++{
++ return (deadline < time);
++}
++
++/*
++ * Deadline is "now" in niffies + (offset by priority). Setting the deadline
++ * is the key to everything. It distributes cpu fairly amongst tasks of the
++ * same nice value, it proportions cpu according to nice level, it means the
++ * task that last woke up the longest ago has the earliest deadline, thus
++ * ensuring that interactive tasks get low latency on wake up. The CPU
++ * proportion works out to the square of the virtual deadline difference, so
++ * this equation will give nice 19 3% CPU compared to nice 0.
++ */
++static inline u64 prio_deadline_diff(int user_prio)
++{
++ return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128));
++}
++
++static inline u64 task_deadline_diff(struct task_struct *p)
++{
++ return prio_deadline_diff(TASK_USER_PRIO(p));
++}
++
++static inline u64 static_deadline_diff(int static_prio)
++{
++ return prio_deadline_diff(USER_PRIO(static_prio));
++}
++
++static inline int longest_deadline_diff(void)
++{
++ return prio_deadline_diff(39);
++}
++
++static inline int ms_longest_deadline_diff(void)
++{
++ return NS_TO_MS(longest_deadline_diff());
++}
++
++static inline bool rq_local(struct rq *rq);
++
++#ifndef SCHED_CAPACITY_SCALE
++#define SCHED_CAPACITY_SCALE 1024
++#endif
++
++static inline int rq_load(struct rq *rq)
++{
++ return rq->nr_running;
++}
++
++/*
++ * Update the load average for feeding into cpu frequency governors. Use a
++ * rough estimate of a rolling average with ~ time constant of 32ms.
++ * 80/128 ~ 0.63. * 80 / 32768 / 128 == * 5 / 262144
++ * Make sure a call to update_clocks has been made before calling this to get
++ * an updated rq->niffies.
++ */
++static void update_load_avg(struct rq *rq, unsigned int flags)
++{
++ unsigned long us_interval, curload;
++ long load;
++
++ if (unlikely(rq->niffies <= rq->load_update))
++ return;
++
++ us_interval = NS_TO_US(rq->niffies - rq->load_update);
++ curload = rq_load(rq);
++ load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144);
++ if (unlikely(load < 0))
++ load = 0;
++ load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144;
++ rq->load_avg = load;
++
++ rq->load_update = rq->niffies;
++ if (likely(rq_local(rq)))
++ cpufreq_trigger(rq, flags);
++}
++
++/*
++ * Removing from the runqueue. Enter with rq locked. Deleting a task
++ * from the skip list is done via the stored node reference in the task struct
++ * and does not require a full look up. Thus it occurs in O(k) time where k
++ * is the "level" of the list the task was stored at - usually < 4, max 8.
++ */
++static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
++{
++ skiplist_delete(rq->sl, &p->node);
++ rq->best_key = rq->node.next[0]->key;
++ update_clocks(rq);
++
++ if (!(flags & DEQUEUE_SAVE))
++ sched_info_dequeued(task_rq(p), p);
++ update_load_avg(rq, flags);
++}
++
++#ifdef CONFIG_PREEMPT_RCU
++static bool rcu_read_critical(struct task_struct *p)
++{
++ return p->rcu_read_unlock_special.b.blocked;
++}
++#else /* CONFIG_PREEMPT_RCU */
++#define rcu_read_critical(p) (false)
++#endif /* CONFIG_PREEMPT_RCU */
++
++/*
++ * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as
++ * an idle task, we ensure none of the following conditions are met.
++ */
++static bool idleprio_suitable(struct task_struct *p)
++{
++ return (!(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING)) &&
++ !signal_pending(p) && !rcu_read_critical(p) && !freezing(p));
++}
++
++/*
++ * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check
++ * that the iso_refractory flag is not set.
++ */
++static inline bool isoprio_suitable(struct rq *rq)
++{
++ return !rq->iso_refractory;
++}
++
++/*
++ * Adding to the runqueue. Enter with rq locked.
++ */
++static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
++{
++ unsigned int randseed, cflags = 0;
++ u64 sl_id;
++
++ if (!rt_task(p)) {
++ /* Check it hasn't gotten rt from PI */
++ if ((idleprio_task(p) && idleprio_suitable(p)) ||
++ (iso_task(p) && isoprio_suitable(rq)))
++ p->prio = p->normal_prio;
++ else
++ p->prio = NORMAL_PRIO;
++ }
++ /*
++ * The sl_id key passed to the skiplist generates a sorted list.
++ * Realtime and sched iso tasks run FIFO so they only need be sorted
++ * according to priority. The skiplist will put tasks of the same
++ * key inserted later in FIFO order. Tasks of sched normal, batch
++ * and idleprio are sorted according to their deadlines. Idleprio
++ * tasks are offset by an impossibly large deadline value ensuring
++ * they get sorted into last positions, but still according to their
++ * own deadlines. This creates a "landscape" of skiplists running
++ * from priority 0 realtime in first place to the lowest priority
++ * idleprio tasks last. Skiplist insertion is an O(log n) process.
++ */
++ if (p->prio <= ISO_PRIO) {
++ sl_id = p->prio;
++ cflags = SCHED_CPUFREQ_RT;
++ } else {
++ sl_id = p->deadline;
++ if (idleprio_task(p)) {
++ if (p->prio == IDLE_PRIO)
++ sl_id |= 0xF000000000000000;
++ else
++ sl_id += longest_deadline_diff();
++ }
++ }
++ /*
++ * Some architectures don't have better than microsecond resolution
++ * so mask out ~microseconds as the random seed for skiplist insertion.
++ */
++ update_clocks(rq);
++ if (!(flags & ENQUEUE_RESTORE))
++ sched_info_queued(rq, p);
++ randseed = (rq->niffies >> 10) & 0xFFFFFFFF;
++ skiplist_insert(rq->sl, &p->node, sl_id, p, randseed);
++ rq->best_key = rq->node.next[0]->key;
++ if (p->in_iowait)
++ cflags |= SCHED_CPUFREQ_IOWAIT;
++ update_load_avg(rq, cflags);
++}
++
++/*
++ * Returns the relative length of deadline all compared to the shortest
++ * deadline which is that of nice -20.
++ */
++static inline int task_prio_ratio(struct task_struct *p)
++{
++ return prio_ratios[TASK_USER_PRIO(p)];
++}
++
++/*
++ * task_timeslice - all tasks of all priorities get the exact same timeslice
++ * length. CPU distribution is handled by giving different deadlines to
++ * tasks of different priorities. Use 128 as the base value for fast shifts.
++ */
++static inline int task_timeslice(struct task_struct *p)
++{
++ return (rr_interval * task_prio_ratio(p) / 128);
++}
++
++#ifdef CONFIG_SMP
++/* Entered with rq locked */
++static inline void resched_if_idle(struct rq *rq)
++{
++ if (rq_idle(rq))
++ resched_task(rq->curr);
++}
++
++static inline bool rq_local(struct rq *rq)
++{
++ return (rq->cpu == smp_processor_id());
++}
++#ifdef CONFIG_SMT_NICE
++static const cpumask_t *thread_cpumask(int cpu);
++
++/* Find the best real time priority running on any SMT siblings of cpu and if
++ * none are running, the static priority of the best deadline task running.
++ * The lookups to the other runqueues is done lockless as the occasional wrong
++ * value would be harmless. */
++static int best_smt_bias(struct rq *this_rq)
++{
++ int other_cpu, best_bias = 0;
++
++ for_each_cpu(other_cpu, &this_rq->thread_mask) {
++ struct rq *rq = cpu_rq(other_cpu);
++
++ if (rq_idle(rq))
++ continue;
++ if (unlikely(!rq->online))
++ continue;
++ if (!rq->rq_mm)
++ continue;
++ if (likely(rq->rq_smt_bias > best_bias))
++ best_bias = rq->rq_smt_bias;
++ }
++ return best_bias;
++}
++
++static int task_prio_bias(struct task_struct *p)
++{
++ if (rt_task(p))
++ return 1 << 30;
++ else if (task_running_iso(p))
++ return 1 << 29;
++ else if (task_running_idle(p))
++ return 0;
++ return MAX_PRIO - p->static_prio;
++}
++
++static bool smt_always_schedule(struct task_struct __maybe_unused *p, struct rq __maybe_unused *this_rq)
++{
++ return true;
++}
++
++static bool (*smt_schedule)(struct task_struct *p, struct rq *this_rq) = &smt_always_schedule;
++
++/* We've already decided p can run on CPU, now test if it shouldn't for SMT
++ * nice reasons. */
++static bool smt_should_schedule(struct task_struct *p, struct rq *this_rq)
++{
++ int best_bias, task_bias;
++
++ /* Kernel threads always run */
++ if (unlikely(!p->mm))
++ return true;
++ if (rt_task(p))
++ return true;
++ if (!idleprio_suitable(p))
++ return true;
++ best_bias = best_smt_bias(this_rq);
++ /* The smt siblings are all idle or running IDLEPRIO */
++ if (best_bias < 1)
++ return true;
++ task_bias = task_prio_bias(p);
++ if (task_bias < 1)
++ return false;
++ if (task_bias >= best_bias)
++ return true;
++ /* Dither 25% cpu of normal tasks regardless of nice difference */
++ if (best_bias % 4 == 1)
++ return true;
++ /* Sorry, you lose */
++ return false;
++}
++#else /* CONFIG_SMT_NICE */
++#define smt_schedule(p, this_rq) (true)
++#endif /* CONFIG_SMT_NICE */
++
++static inline void atomic_set_cpu(int cpu, cpumask_t *cpumask)
++{
++ set_bit(cpu, (volatile unsigned long *)cpumask);
++}
++
++/*
++ * The cpu_idle_map stores a bitmap of all the CPUs currently idle to
++ * allow easy lookup of whether any suitable idle CPUs are available.
++ * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the
++ * idle_cpus variable than to do a full bitmask check when we are busy. The
++ * bits are set atomically but read locklessly as occasional false positive /
++ * negative is harmless.
++ */
++static inline void set_cpuidle_map(int cpu)
++{
++ if (likely(cpu_online(cpu)))
++ atomic_set_cpu(cpu, &cpu_idle_map);
++}
++
++static inline void atomic_clear_cpu(int cpu, cpumask_t *cpumask)
++{
++ clear_bit(cpu, (volatile unsigned long *)cpumask);
++}
++
++static inline void clear_cpuidle_map(int cpu)
++{
++ atomic_clear_cpu(cpu, &cpu_idle_map);
++}
++
++static bool suitable_idle_cpus(struct task_struct *p)
++{
++ return (cpumask_intersects(&p->cpus_allowed, &cpu_idle_map));
++}
++
++/*
++ * Resched current on rq. We don't know if rq is local to this CPU nor if it
++ * is locked so we do not use an intermediate variable for the task to avoid
++ * having it dereferenced.
++ */
++static void resched_curr(struct rq *rq)
++{
++ int cpu;
++
++ if (test_tsk_need_resched(rq->curr))
++ return;
++
++ rq->preempt = rq->curr;
++ cpu = rq->cpu;
++
++ /* We're doing this without holding the rq lock if it's not task_rq */
++
++ if (cpu == smp_processor_id()) {
++ set_tsk_need_resched(rq->curr);
++ set_preempt_need_resched();
++ return;
++ }
++
++ if (set_nr_and_not_polling(rq->curr))
++ smp_sched_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++}
++
++#define CPUIDLE_DIFF_THREAD (1)
++#define CPUIDLE_DIFF_CORE (2)
++#define CPUIDLE_CACHE_BUSY (4)
++#define CPUIDLE_DIFF_CPU (8)
++#define CPUIDLE_THREAD_BUSY (16)
++#define CPUIDLE_DIFF_NODE (32)
++
++/*
++ * The best idle CPU is chosen according to the CPUIDLE ranking above where the
++ * lowest value would give the most suitable CPU to schedule p onto next. The
++ * order works out to be the following:
++ *
++ * Same thread, idle or busy cache, idle or busy threads
++ * Other core, same cache, idle or busy cache, idle threads.
++ * Same node, other CPU, idle cache, idle threads.
++ * Same node, other CPU, busy cache, idle threads.
++ * Other core, same cache, busy threads.
++ * Same node, other CPU, busy threads.
++ * Other node, other CPU, idle cache, idle threads.
++ * Other node, other CPU, busy cache, idle threads.
++ * Other node, other CPU, busy threads.
++ */
++static int best_mask_cpu(int best_cpu, struct rq *rq, cpumask_t *tmpmask)
++{
++ int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THREAD_BUSY |
++ CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | CPUIDLE_DIFF_CORE |
++ CPUIDLE_DIFF_THREAD;
++ int cpu_tmp;
++
++ if (cpumask_test_cpu(best_cpu, tmpmask))
++ goto out;
++
++ for_each_cpu(cpu_tmp, tmpmask) {
++ int ranking, locality;
++ struct rq *tmp_rq;
++
++ ranking = 0;
++ tmp_rq = cpu_rq(cpu_tmp);
++
++ locality = rq->cpu_locality[cpu_tmp];
++#ifdef CONFIG_NUMA
++ if (locality > 3)
++ ranking |= CPUIDLE_DIFF_NODE;
++ else
++#endif
++ if (locality > 2)
++ ranking |= CPUIDLE_DIFF_CPU;
++#ifdef CONFIG_SCHED_MC
++ else if (locality == 2)
++ ranking |= CPUIDLE_DIFF_CORE;
++ else if (!(tmp_rq->cache_idle(tmp_rq)))
++ ranking |= CPUIDLE_CACHE_BUSY;
++#endif
++#ifdef CONFIG_SCHED_SMT
++ if (locality == 1)
++ ranking |= CPUIDLE_DIFF_THREAD;
++ if (!(tmp_rq->siblings_idle(tmp_rq)))
++ ranking |= CPUIDLE_THREAD_BUSY;
++#endif
++ if (ranking < best_ranking) {
++ best_cpu = cpu_tmp;
++ best_ranking = ranking;
++ }
++ }
++out:
++ return best_cpu;
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++ struct rq *this_rq = cpu_rq(this_cpu);
++
++ return (this_rq->cpu_locality[that_cpu] < 3);
++}
++
++/* As per resched_curr but only will resched idle task */
++static inline void resched_idle(struct rq *rq)
++{
++ if (test_tsk_need_resched(rq->idle))
++ return;
++
++ rq->preempt = rq->idle;
++
++ set_tsk_need_resched(rq->idle);
++
++ if (rq_local(rq)) {
++ set_preempt_need_resched();
++ return;
++ }
++
++ smp_sched_reschedule(rq->cpu);
++}
++
++static struct rq *resched_best_idle(struct task_struct *p, int cpu)
++{
++ cpumask_t tmpmask;
++ struct rq *rq;
++ int best_cpu;
++
++ cpumask_and(&tmpmask, &p->cpus_allowed, &cpu_idle_map);
++ best_cpu = best_mask_cpu(cpu, task_rq(p), &tmpmask);
++ rq = cpu_rq(best_cpu);
++ if (!smt_schedule(p, rq))
++ return NULL;
++ rq->preempt = p;
++ resched_idle(rq);
++ return rq;
++}
++
++static inline void resched_suitable_idle(struct task_struct *p)
++{
++ if (suitable_idle_cpus(p))
++ resched_best_idle(p, task_cpu(p));
++}
++
++static inline struct rq *rq_order(struct rq *rq, int cpu)
++{
++ return rq->rq_order[cpu];
++}
++#else /* CONFIG_SMP */
++static inline void set_cpuidle_map(int cpu)
++{
++}
++
++static inline void clear_cpuidle_map(int cpu)
++{
++}
++
++static inline bool suitable_idle_cpus(struct task_struct *p)
++{
++ return uprq->curr == uprq->idle;
++}
++
++static inline void resched_suitable_idle(struct task_struct *p)
++{
++}
++
++static inline void resched_curr(struct rq *rq)
++{
++ resched_task(rq->curr);
++}
++
++static inline void resched_if_idle(struct rq *rq)
++{
++}
++
++static inline bool rq_local(struct rq *rq)
++{
++ return true;
++}
++
++static inline struct rq *rq_order(struct rq *rq, int cpu)
++{
++ return rq;
++}
++
++static inline bool smt_schedule(struct task_struct *p, struct rq *rq)
++{
++ return true;
++}
++#endif /* CONFIG_SMP */
++
++static inline int normal_prio(struct task_struct *p)
++{
++ if (has_rt_policy(p))
++ return MAX_RT_PRIO - 1 - p->rt_priority;
++ if (idleprio_task(p))
++ return IDLE_PRIO;
++ if (iso_task(p))
++ return ISO_PRIO;
++ return NORMAL_PRIO;
++}
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks as it will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++ p->normal_prio = normal_prio(p);
++ /*
++ * If we are RT tasks or we were boosted to RT priority,
++ * keep the priority unchanged. Otherwise, update priority
++ * to the normal priority:
++ */
++ if (!rt_prio(p->prio))
++ return p->normal_prio;
++ return p->prio;
++}
++
++/*
++ * activate_task - move a task to the runqueue. Enter with rq locked.
++ */
++static void activate_task(struct task_struct *p, struct rq *rq)
++{
++ resched_if_idle(rq);
++
++ /*
++ * Sleep time is in units of nanosecs, so shift by 20 to get a
++ * milliseconds-range estimation of the amount of time that the task
++ * spent sleeping:
++ */
++ if (unlikely(prof_on == SLEEP_PROFILING)) {
++ if (p->state == TASK_UNINTERRUPTIBLE)
++ profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
++ (rq->niffies - p->last_ran) >> 20);
++ }
++
++ p->prio = effective_prio(p);
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible--;
++
++ enqueue_task(rq, p, 0);
++ p->on_rq = TASK_ON_RQ_QUEUED;
++}
++
++/*
++ * deactivate_task - If it's running, it's not on the runqueue and we can just
++ * decrement the nr_running. Enter with rq locked.
++ */
++static inline void deactivate_task(struct task_struct *p, struct rq *rq)
++{
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible++;
++
++ p->on_rq = 0;
++ sched_info_dequeued(rq, p);
++}
++
++#ifdef CONFIG_SMP
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++ struct rq *rq;
++
++ if (task_cpu(p) == new_cpu)
++ return;
++
++ /* Do NOT call set_task_cpu on a currently queued task as we will not
++ * be reliably holding the rq lock after changing CPU. */
++ BUG_ON(task_queued(p));
++ rq = task_rq(p);
++
++#ifdef CONFIG_LOCKDEP
++ /*
++ * The caller should hold either p->pi_lock or rq->lock, when changing
++ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++ *
++ * Furthermore, all task_rq users should acquire both locks, see
++ * task_rq_lock().
++ */
++ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++ lockdep_is_held(&rq->lock)));
++#endif
++
++ trace_sched_migrate_task(p, new_cpu);
++ perf_event_task_migrate(p);
++
++ /*
++ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
++ * successfully executed on another CPU. We must ensure that updates of
++ * per-task data have been completed by this moment.
++ */
++ smp_wmb();
++
++ p->wake_cpu = new_cpu;
++
++ if (task_running(rq, p)) {
++ /*
++ * We should only be calling this on a running task if we're
++ * holding rq lock.
++ */
++ lockdep_assert_held(&rq->lock);
++
++ /*
++ * We can't change the task_thread_info CPU on a running task
++ * as p will still be protected by the rq lock of the CPU it
++ * is still running on so we only set the wake_cpu for it to be
++ * lazily updated once off the CPU.
++ */
++ return;
++ }
++
++#ifdef CONFIG_THREAD_INFO_IN_TASK
++ p->cpu = new_cpu;
++#else
++ task_thread_info(p)->cpu = new_cpu;
++#endif
++ /* We're no longer protecting p after this point since we're holding
++ * the wrong runqueue lock. */
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * Move a task off the runqueue and take it to a cpu for it will
++ * become the running task.
++ */
++static inline void take_task(struct rq *rq, int cpu, struct task_struct *p)
++{
++ struct rq *p_rq = task_rq(p);
++
++ dequeue_task(p_rq, p, DEQUEUE_SAVE);
++ if (p_rq != rq) {
++ sched_info_dequeued(p_rq, p);
++ sched_info_queued(rq, p);
++ }
++ set_task_cpu(p, cpu);
++}
++
++/*
++ * Returns a descheduling task to the runqueue unless it is being
++ * deactivated.
++ */
++static inline void return_task(struct task_struct *p, struct rq *rq,
++ int cpu, bool deactivate)
++{
++ if (deactivate)
++ deactivate_task(p, rq);
++ else {
++#ifdef CONFIG_SMP
++ /*
++ * set_task_cpu was called on the running task that doesn't
++ * want to deactivate so it has to be enqueued to a different
++ * CPU and we need its lock. Tag it to be moved with as the
++ * lock is dropped in finish_lock_switch.
++ */
++ if (unlikely(p->wake_cpu != cpu))
++ p->on_rq = TASK_ON_RQ_MIGRATING;
++ else
++#endif
++ enqueue_task(rq, p, ENQUEUE_RESTORE);
++ }
++}
++
++/* Enter with rq lock held. We know p is on the local cpu */
++static inline void __set_tsk_resched(struct task_struct *p)
++{
++ set_tsk_need_resched(p);
++ set_preempt_need_resched();
++}
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++ return cpu_curr(task_cpu(p)) == p;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * If @match_state is nonzero, it's the @p->state value just checked and
++ * not expected to change. If it changes, i.e. @p might have woken up,
++ * then return zero. When we succeed in waiting for @p to be off its CPU,
++ * we return a positive number (its total switch count). If a second call
++ * a short while later returns the same number, the caller can be sure that
++ * @p has remained unscheduled the whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, long match_state)
++{
++ int running, queued;
++ unsigned long flags;
++ unsigned long ncsw;
++ struct rq *rq;
++
++ for (;;) {
++ rq = task_rq(p);
++
++ /*
++ * If the task is actively running on another CPU
++ * still, just relax and busy-wait without holding
++ * any locks.
++ *
++ * NOTE! Since we don't hold any locks, it's not
++ * even sure that "rq" stays as the right runqueue!
++ * But we don't care, since this will return false
++ * if the runqueue has changed and p is actually now
++ * running somewhere else!
++ */
++ while (task_running(rq, p)) {
++ if (match_state && unlikely(p->state != match_state))
++ return 0;
++ cpu_relax();
++ }
++
++ /*
++ * Ok, time to look more closely! We need the rq
++ * lock now, to be *sure*. If we're wrong, we'll
++ * just go back and repeat.
++ */
++ rq = task_rq_lock(p, &flags);
++ trace_sched_wait_task(p);
++ running = task_running(rq, p);
++ queued = task_on_rq_queued(p);
++ ncsw = 0;
++ if (!match_state || p->state == match_state)
++ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++ task_rq_unlock(rq, p, &flags);
++
++ /*
++ * If it changed from the expected state, bail out now.
++ */
++ if (unlikely(!ncsw))
++ break;
++
++ /*
++ * Was it really running after all now that we
++ * checked with the proper locks actually held?
++ *
++ * Oops. Go back and try again..
++ */
++ if (unlikely(running)) {
++ cpu_relax();
++ continue;
++ }
++
++ /*
++ * It's not enough that it's not actively running,
++ * it must be off the runqueue _entirely_, and not
++ * preempted!
++ *
++ * So if it was still runnable (but just not actively
++ * running right now), it's preempted, and we should
++ * yield - it could be a while.
++ */
++ if (unlikely(queued)) {
++ ktime_t to = NSEC_PER_SEC / HZ;
++
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_hrtimeout(&to, HRTIMER_MODE_REL);
++ continue;
++ }
++
++ /*
++ * Ahh, all good. It wasn't running, and it wasn't
++ * runnable, which means that it will never become
++ * running in the future either. We're all done!
++ */
++ break;
++ }
++
++ return ncsw;
++}
++
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++ int cpu;
++
++ preempt_disable();
++ cpu = task_cpu(p);
++ if ((cpu != smp_processor_id()) && task_curr(p))
++ smp_sched_reschedule(cpu);
++ preempt_enable();
++}
++EXPORT_SYMBOL_GPL(kick_process);
++#endif
++
++/*
++ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the
++ * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or
++ * between themselves, they cooperatively multitask. An idle rq scores as
++ * prio PRIO_LIMIT so it is always preempted.
++ */
++static inline bool
++can_preempt(struct task_struct *p, int prio, u64 deadline)
++{
++ /* Better static priority RT task or better policy preemption */
++ if (p->prio < prio)
++ return true;
++ if (p->prio > prio)
++ return false;
++ if (p->policy == SCHED_BATCH)
++ return false;
++ /* SCHED_NORMAL and ISO will preempt based on deadline */
++ if (!deadline_before(p->deadline, deadline))
++ return false;
++ return true;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * Check to see if p can run on cpu, and if not, whether there are any online
++ * CPUs it can run on instead.
++ */
++static inline bool needs_other_cpu(struct task_struct *p, int cpu)
++{
++ if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed)))
++ return true;
++ return false;
++}
++#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
++
++static void try_preempt(struct task_struct *p, struct rq *this_rq)
++{
++ int i, this_entries = rq_load(this_rq);
++ cpumask_t tmp;
++
++ if (suitable_idle_cpus(p) && resched_best_idle(p, task_cpu(p)))
++ return;
++
++ /* IDLEPRIO tasks never preempt anything but idle */
++ if (p->policy == SCHED_IDLEPRIO)
++ return;
++
++ cpumask_and(&tmp, &cpu_online_map, &p->cpus_allowed);
++
++ for (i = 0; i < num_possible_cpus(); i++) {
++ struct rq *rq = this_rq->rq_order[i];
++
++ if (!cpumask_test_cpu(rq->cpu, &tmp))
++ continue;
++
++ if (!sched_interactive && rq != this_rq && rq_load(rq) <= this_entries)
++ continue;
++ if (smt_schedule(p, rq) && can_preempt(p, rq->rq_prio, rq->rq_deadline)) {
++ /* We set rq->preempting lockless, it's a hint only */
++ rq->preempting = p;
++ resched_curr(rq);
++ return;
++ }
++ }
++}
++
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++ const struct cpumask *new_mask, bool check);
++#else /* CONFIG_SMP */
++static inline bool needs_other_cpu(struct task_struct *p, int cpu)
++{
++ return false;
++}
++
++static void try_preempt(struct task_struct *p, struct rq *this_rq)
++{
++ if (p->policy == SCHED_IDLEPRIO)
++ return;
++ if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline))
++ resched_curr(uprq);
++}
++
++static inline int __set_cpus_allowed_ptr(struct task_struct *p,
++ const struct cpumask *new_mask, bool check)
++{
++ return set_cpus_allowed_ptr(p, new_mask);
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * wake flags
++ */
++#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
++#define WF_FORK 0x02 /* child wakeup after fork */
++#define WF_MIGRATED 0x04 /* internal use, task got migrated */
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++ struct rq *rq;
++
++ if (!schedstat_enabled())
++ return;
++
++ rq = this_rq();
++
++#ifdef CONFIG_SMP
++ if (cpu == rq->cpu)
++ schedstat_inc(rq->ttwu_local);
++ else {
++ struct sched_domain *sd;
++
++ rcu_read_lock();
++ for_each_domain(rq->cpu, sd) {
++ if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
++ schedstat_inc(sd->ttwu_wake_remote);
++ break;
++ }
++ }
++ rcu_read_unlock();
++ }
++
++#endif /* CONFIG_SMP */
++
++ schedstat_inc(rq->ttwu_count);
++}
++
++static inline void ttwu_activate(struct rq *rq, struct task_struct *p)
++{
++ activate_task(p, rq);
++
++ /* if a worker is waking up, notify the workqueue */
++ if (p->flags & PF_WQ_WORKER)
++ wq_worker_waking_up(p, cpu_of(rq));
++}
++
++/*
++ * Mark the task runnable and perform wakeup-preemption.
++ */
++static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++ /*
++ * Sync wakeups (i.e. those types of wakeups where the waker
++ * has indicated that it will leave the CPU in short order)
++ * don't trigger a preemption if there are no idle cpus,
++ * instead waiting for current to deschedule.
++ */
++ if (wake_flags & WF_SYNC)
++ resched_suitable_idle(p);
++ else
++ try_preempt(p, rq);
++ p->state = TASK_RUNNING;
++ trace_sched_wakeup(p);
++}
++
++static void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++ lockdep_assert_held(&rq->lock);
++
++#ifdef CONFIG_SMP
++ if (p->sched_contributes_to_load)
++ rq->nr_uninterruptible--;
++#endif
++
++ ttwu_activate(rq, p);
++ ttwu_do_wakeup(rq, p, wake_flags);
++}
++
++/*
++ * Called in case the task @p isn't fully descheduled from its runqueue,
++ * in this case we must do a remote wakeup. Its a 'light' wakeup though,
++ * since all we need to do is flip p->state to TASK_RUNNING, since
++ * the task is still ->on_rq.
++ */
++static int ttwu_remote(struct task_struct *p, int wake_flags)
++{
++ struct rq *rq;
++ int ret = 0;
++
++ rq = __task_rq_lock(p);
++ if (likely(task_on_rq_queued(p))) {
++ ttwu_do_wakeup(rq, p, wake_flags);
++ ret = 1;
++ }
++ __task_rq_unlock(rq);
++
++ return ret;
++}
++
++#ifdef CONFIG_SMP
++void sched_ttwu_pending(void)
++{
++ struct rq *rq = this_rq();
++ struct llist_node *llist = llist_del_all(&rq->wake_list);
++ struct task_struct *p, *t;
++ unsigned long flags;
++
++ if (!llist)
++ return;
++
++ rq_lock_irqsave(rq, &flags);
++
++ llist_for_each_entry_safe(p, t, llist, wake_entry)
++ ttwu_do_activate(rq, p, 0);
++
++ rq_unlock_irqrestore(rq, &flags);
++}
++
++void scheduler_ipi(void)
++{
++ /*
++ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
++ * TIF_NEED_RESCHED remotely (for the first time) will also send
++ * this IPI.
++ */
++ preempt_fold_need_resched();
++
++ if (llist_empty(&this_rq()->wake_list) && (!idle_cpu(smp_processor_id()) || need_resched()))
++ return;
++
++ /*
++ * Not all reschedule IPI handlers call irq_enter/irq_exit, since
++ * traditionally all their work was done from the interrupt return
++ * path. Now that we actually do some work, we need to make sure
++ * we do call them.
++ *
++ * Some archs already do call them, luckily irq_enter/exit nest
++ * properly.
++ *
++ * Arguably we should visit all archs and update all handlers,
++ * however a fair share of IPIs are still resched only so this would
++ * somewhat pessimize the simple resched case.
++ */
++ irq_enter();
++ sched_ttwu_pending();
++ irq_exit();
++}
++
++static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
++ if (!set_nr_if_polling(rq->idle))
++ smp_sched_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++ }
++}
++
++void wake_up_if_idle(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ rcu_read_lock();
++
++ if (!is_idle_task(rcu_dereference(rq->curr)))
++ goto out;
++
++ if (set_nr_if_polling(rq->idle)) {
++ trace_sched_wake_idle_without_ipi(cpu);
++ } else {
++ rq_lock_irqsave(rq, &flags);
++ if (likely(is_idle_task(rq->curr)))
++ smp_sched_reschedule(cpu);
++ /* Else cpu is not in idle, do nothing here */
++ rq_unlock_irqrestore(rq, &flags);
++ }
++
++out:
++ rcu_read_unlock();
++}
++
++static int valid_task_cpu(struct task_struct *p)
++{
++ cpumask_t valid_mask;
++
++ if (p->flags & PF_KTHREAD)
++ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_online_mask);
++ else
++ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_active_mask);
++
++ if (unlikely(!cpumask_weight(&valid_mask))) {
++ /* Hotplug boot threads do this before the CPU is up */
++ printk(KERN_INFO "SCHED: No cpumask for %s/%d weight %d\n", p->comm, p->pid, cpumask_weight(&p->cpus_allowed));
++ return cpumask_any(&p->cpus_allowed);
++ }
++ return cpumask_any(&valid_mask);
++}
++
++/*
++ * For a task that's just being woken up we have a valuable balancing
++ * opportunity so choose the nearest cache most lightly loaded runqueue.
++ * Entered with rq locked and returns with the chosen runqueue locked.
++ */
++static inline int select_best_cpu(struct task_struct *p)
++{
++ unsigned int idlest = ~0U;
++ struct rq *rq = NULL;
++ int i;
++
++ if (suitable_idle_cpus(p)) {
++ int cpu = task_cpu(p);
++
++ if (unlikely(needs_other_cpu(p, cpu)))
++ cpu = valid_task_cpu(p);
++ rq = resched_best_idle(p, cpu);
++ if (likely(rq))
++ return rq->cpu;
++ }
++
++ for (i = 0; i < num_possible_cpus(); i++) {
++ struct rq *other_rq = task_rq(p)->rq_order[i];
++ int entries;
++
++ if (!other_rq->online)
++ continue;
++ if (needs_other_cpu(p, other_rq->cpu))
++ continue;
++ entries = rq_load(other_rq);
++ if (entries >= idlest)
++ continue;
++ idlest = entries;
++ rq = other_rq;
++ }
++ if (unlikely(!rq))
++ return task_cpu(p);
++ return rq->cpu;
++}
++#else /* CONFIG_SMP */
++static int valid_task_cpu(struct task_struct *p)
++{
++ return 0;
++}
++
++static inline int select_best_cpu(struct task_struct *p)
++{
++ return 0;
++}
++
++static struct rq *resched_best_idle(struct task_struct *p, int cpu)
++{
++ return NULL;
++}
++#endif /* CONFIG_SMP */
++
++static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++#if defined(CONFIG_SMP)
++ if (!cpus_share_cache(smp_processor_id(), cpu)) {
++ sched_clock_cpu(cpu); /* Sync clocks across CPUs */
++ ttwu_queue_remote(p, cpu, wake_flags);
++ return;
++ }
++#endif
++ rq_lock(rq);
++ ttwu_do_activate(rq, p, wake_flags);
++ rq_unlock(rq);
++}
++
++/***
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Put it on the run-queue if it's not already there. The "current"
++ * thread is always on the run-queue (except when the actual
++ * re-schedule is in progress), and as such you're allowed to do
++ * the simpler "current->state = TASK_RUNNING" to mark yourself
++ * runnable without the overhead of this.
++ *
++ * Return: %true if @p was woken up, %false if it was already running.
++ * or @state didn't match @p's state.
++ */
++static int
++try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
++{
++ unsigned long flags;
++ int cpu, success = 0;
++
++ /*
++ * If we are going to wake up a thread waiting for CONDITION we
++ * need to ensure that CONDITION=1 done by the caller can not be
++ * reordered with p->state check below. This pairs with mb() in
++ * set_current_state() the waiting thread does.
++ */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ smp_mb__after_spinlock();
++ /* state is a volatile long, どうして、分からない */
++ if (!((unsigned int)p->state & state))
++ goto out;
++
++ trace_sched_waking(p);
++
++ /* We're going to change ->state: */
++ success = 1;
++ cpu = task_cpu(p);
++
++ /*
++ * Ensure we load p->on_rq _after_ p->state, otherwise it would
++ * be possible to, falsely, observe p->on_rq == 0 and get stuck
++ * in smp_cond_load_acquire() below.
++ *
++ * sched_ttwu_pending() try_to_wake_up()
++ * [S] p->on_rq = 1; [L] P->state
++ * UNLOCK rq->lock -----.
++ * \
++ * +--- RMB
++ * schedule() /
++ * LOCK rq->lock -----'
++ * UNLOCK rq->lock
++ *
++ * [task p]
++ * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
++ *
++ * Pairs with the UNLOCK+LOCK on rq->lock from the
++ * last wakeup of our task and the schedule that got our task
++ * current.
++ */
++ smp_rmb();
++ if (p->on_rq && ttwu_remote(p, wake_flags))
++ goto stat;
++
++#ifdef CONFIG_SMP
++ /*
++ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++ * possible to, falsely, observe p->on_cpu == 0.
++ *
++ * One must be running (->on_cpu == 1) in order to remove oneself
++ * from the runqueue.
++ *
++ * [S] ->on_cpu = 1; [L] ->on_rq
++ * UNLOCK rq->lock
++ * RMB
++ * LOCK rq->lock
++ * [S] ->on_rq = 0; [L] ->on_cpu
++ *
++ * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
++ * from the consecutive calls to schedule(); the first switching to our
++ * task, the second putting it to sleep.
++ */
++ smp_rmb();
++
++ /*
++ * If the owning (remote) CPU is still in the middle of schedule() with
++ * this task as prev, wait until its done referencing the task.
++ *
++ * Pairs with the smp_store_release() in finish_lock_switch().
++ *
++ * This ensures that tasks getting woken will be fully ordered against
++ * their previous state and preserve Program Order.
++ */
++ smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++ p->sched_contributes_to_load = !!task_contributes_to_load(p);
++ p->state = TASK_WAKING;
++
++ if (p->in_iowait) {
++ delayacct_blkio_end();
++ atomic_dec(&task_rq(p)->nr_iowait);
++ }
++
++ cpu = select_best_cpu(p);
++ if (task_cpu(p) != cpu)
++ set_task_cpu(p, cpu);
++
++#else /* CONFIG_SMP */
++
++ if (p->in_iowait) {
++ delayacct_blkio_end();
++ atomic_dec(&task_rq(p)->nr_iowait);
++ }
++
++#endif /* CONFIG_SMP */
++
++ ttwu_queue(p, cpu, wake_flags);
++stat:
++ ttwu_stat(p, cpu, wake_flags);
++out:
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++ return success;
++}
++
++/**
++ * try_to_wake_up_local - try to wake up a local task with rq lock held
++ * @p: the thread to be awakened
++ *
++ * Put @p on the run-queue if it's not already there. The caller must
++ * ensure that rq is locked and, @p is not the current task.
++ * rq stays locked over invocation.
++ */
++static void try_to_wake_up_local(struct task_struct *p)
++{
++ struct rq *rq = task_rq(p);
++
++ if (WARN_ON_ONCE(rq != this_rq()) ||
++ WARN_ON_ONCE(p == current))
++ return;
++
++ lockdep_assert_held(&rq->lock);
++
++ if (!raw_spin_trylock(&p->pi_lock)) {
++ /*
++ * This is OK, because current is on_cpu, which avoids it being
++ * picked for load-balance and preemption/IRQs are still
++ * disabled avoiding further scheduler activity on it and we've
++ * not yet picked a replacement task.
++ */
++ rq_unlock(rq);
++ raw_spin_lock(&p->pi_lock);
++ rq_lock(rq);
++ }
++
++ if (!(p->state & TASK_NORMAL))
++ goto out;
++
++ trace_sched_waking(p);
++
++ if (!task_on_rq_queued(p)) {
++ if (p->in_iowait) {
++ delayacct_blkio_end();
++ atomic_dec(&rq->nr_iowait);
++ }
++ ttwu_activate(rq, p);
++ }
++
++ ttwu_do_wakeup(rq, p, 0);
++ ttwu_stat(p, smp_processor_id(), 0);
++out:
++ raw_spin_unlock(&p->pi_lock);
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * It may be assumed that this function implies a write memory barrier before
++ * changing the task state if and only if any tasks are woken up.
++ */
++int wake_up_process(struct task_struct *p)
++{
++ return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++ return try_to_wake_up(p, state, 0);
++}
++
++static void time_slice_expired(struct task_struct *p, struct rq *rq);
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ */
++int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p)
++{
++ unsigned long flags;
++ int cpu = get_cpu();
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++ INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++ /*
++ * We mark the process as NEW here. This guarantees that
++ * nobody will actually run it, and a signal or other external
++ * event cannot wake it up and insert it on the runqueue either.
++ */
++ p->state = TASK_NEW;
++
++ /*
++ * The process state is set to the same value of the process executing
++ * do_fork() code. That is running. This guarantees that nobody will
++ * actually run it, and a signal or other external event cannot wake
++ * it up and insert it on the runqueue either.
++ */
++
++ /* Should be reset in fork.c but done here for ease of MuQSS patching */
++ p->on_cpu =
++ p->on_rq =
++ p->utime =
++ p->stime =
++ p->sched_time =
++ p->stime_ns =
++ p->utime_ns = 0;
++ skiplist_node_init(&p->node);
++
++ /*
++ * Revert to default priority/policy on fork if requested.
++ */
++ if (unlikely(p->sched_reset_on_fork)) {
++ if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
++ p->policy = SCHED_NORMAL;
++ p->normal_prio = normal_prio(p);
++ }
++
++ if (PRIO_TO_NICE(p->static_prio) < 0) {
++ p->static_prio = NICE_TO_PRIO(0);
++ p->normal_prio = p->static_prio;
++ }
++
++ /*
++ * We don't need the reset flag anymore after the fork. It has
++ * fulfilled its duty:
++ */
++ p->sched_reset_on_fork = 0;
++ }
++
++ /*
++ * Silence PROVE_RCU.
++ */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ set_task_cpu(p, cpu);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++#ifdef CONFIG_SCHED_INFO
++ if (unlikely(sched_info_on()))
++ memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++ init_task_preempt_count(p);
++
++ put_cpu();
++ return 0;
++}
++
++#ifdef CONFIG_SCHEDSTATS
++
++DEFINE_STATIC_KEY_FALSE(sched_schedstats);
++static bool __initdata __sched_schedstats = false;
++
++static void set_schedstats(bool enabled)
++{
++ if (enabled)
++ static_branch_enable(&sched_schedstats);
++ else
++ static_branch_disable(&sched_schedstats);
++}
++
++void force_schedstat_enabled(void)
++{
++ if (!schedstat_enabled()) {
++ pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
++ static_branch_enable(&sched_schedstats);
++ }
++}
++
++static int __init setup_schedstats(char *str)
++{
++ int ret = 0;
++ if (!str)
++ goto out;
++
++ /*
++ * This code is called before jump labels have been set up, so we can't
++ * change the static branch directly just yet. Instead set a temporary
++ * variable so init_schedstats() can do it later.
++ */
++ if (!strcmp(str, "enable")) {
++ __sched_schedstats = true;
++ ret = 1;
++ } else if (!strcmp(str, "disable")) {
++ __sched_schedstats = false;
++ ret = 1;
++ }
++out:
++ if (!ret)
++ pr_warn("Unable to parse schedstats=\n");
++
++ return ret;
++}
++__setup("schedstats=", setup_schedstats);
++
++static void __init init_schedstats(void)
++{
++ set_schedstats(__sched_schedstats);
++}
++
++#ifdef CONFIG_PROC_SYSCTL
++int sysctl_schedstats(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ struct ctl_table t;
++ int err;
++ int state = static_branch_likely(&sched_schedstats);
++
++ if (write && !capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ t = *table;
++ t.data = &state;
++ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++ if (err < 0)
++ return err;
++ if (write)
++ set_schedstats(state);
++ return err;
++}
++#endif /* CONFIG_PROC_SYSCTL */
++#else /* !CONFIG_SCHEDSTATS */
++static inline void init_schedstats(void) {}
++#endif /* CONFIG_SCHEDSTATS */
++
++static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p);
++
++static void account_task_cpu(struct rq *rq, struct task_struct *p)
++{
++ update_clocks(rq);
++ /* This isn't really a context switch but accounting is the same */
++ update_cpu_clock_switch(rq, p);
++ p->last_ran = rq->niffies;
++}
++
++bool sched_smp_initialized __read_mostly;
++
++static inline int hrexpiry_enabled(struct rq *rq)
++{
++ if (unlikely(!cpu_active(cpu_of(rq)) || !sched_smp_initialized))
++ return 0;
++ return hrtimer_is_hres_active(&rq->hrexpiry_timer);
++}
++
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++static inline void hrexpiry_clear(struct rq *rq)
++{
++ if (!hrexpiry_enabled(rq))
++ return;
++ if (hrtimer_active(&rq->hrexpiry_timer))
++ hrtimer_cancel(&rq->hrexpiry_timer);
++}
++
++/*
++ * High-resolution time_slice expiry.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrexpiry(struct hrtimer *timer)
++{
++ struct rq *rq = container_of(timer, struct rq, hrexpiry_timer);
++ struct task_struct *p;
++
++ /* This can happen during CPU hotplug / resume */
++ if (unlikely(cpu_of(rq) != smp_processor_id()))
++ goto out;
++
++ /*
++ * We're doing this without the runqueue lock but this should always
++ * be run on the local CPU. Time slice should run out in __schedule
++ * but we set it to zero here in case niffies is slightly less.
++ */
++ p = rq->curr;
++ p->time_slice = 0;
++ __set_tsk_resched(p);
++out:
++ return HRTIMER_NORESTART;
++}
++
++/*
++ * Called to set the hrexpiry timer state.
++ *
++ * called with irqs disabled from the local CPU only
++ */
++static void hrexpiry_start(struct rq *rq, u64 delay)
++{
++ if (!hrexpiry_enabled(rq))
++ return;
++
++ hrtimer_start(&rq->hrexpiry_timer, ns_to_ktime(delay),
++ HRTIMER_MODE_REL_PINNED);
++}
++
++static void init_rq_hrexpiry(struct rq *rq)
++{
++ hrtimer_init(&rq->hrexpiry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ rq->hrexpiry_timer.function = hrexpiry;
++}
++
++static inline int rq_dither(struct rq *rq)
++{
++ if (!hrexpiry_enabled(rq))
++ return HALF_JIFFY_US;
++ return 0;
++}
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++ struct task_struct *parent, *rq_curr;
++ struct rq *rq, *new_rq;
++ unsigned long flags;
++
++ parent = p->parent;
++
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ p->state = TASK_RUNNING;
++ /* Task_rq can't change yet on a new task */
++ new_rq = rq = task_rq(p);
++ if (unlikely(needs_other_cpu(p, task_cpu(p)))) {
++ set_task_cpu(p, valid_task_cpu(p));
++ new_rq = task_rq(p);
++ }
++
++ double_rq_lock(rq, new_rq);
++ rq_curr = rq->curr;
++
++ /*
++ * Make sure we do not leak PI boosting priority to the child.
++ */
++ p->prio = rq_curr->normal_prio;
++
++ trace_sched_wakeup_new(p);
++
++ /*
++ * Share the timeslice between parent and child, thus the
++ * total amount of pending timeslices in the system doesn't change,
++ * resulting in more scheduling fairness. If it's negative, it won't
++ * matter since that's the same as being 0. rq->rq_deadline is only
++ * modified within schedule() so it is always equal to
++ * current->deadline.
++ */
++ account_task_cpu(rq, rq_curr);
++ p->last_ran = rq_curr->last_ran;
++ if (likely(rq_curr->policy != SCHED_FIFO)) {
++ rq_curr->time_slice /= 2;
++ if (rq_curr->time_slice < RESCHED_US) {
++ /*
++ * Forking task has run out of timeslice. Reschedule it and
++ * start its child with a new time slice and deadline. The
++ * child will end up running first because its deadline will
++ * be slightly earlier.
++ */
++ __set_tsk_resched(rq_curr);
++ time_slice_expired(p, new_rq);
++ if (suitable_idle_cpus(p))
++ resched_best_idle(p, task_cpu(p));
++ else if (unlikely(rq != new_rq))
++ try_preempt(p, new_rq);
++ } else {
++ p->time_slice = rq_curr->time_slice;
++ if (rq_curr == parent && rq == new_rq && !suitable_idle_cpus(p)) {
++ /*
++ * The VM isn't cloned, so we're in a good position to
++ * do child-runs-first in anticipation of an exec. This
++ * usually avoids a lot of COW overhead.
++ */
++ __set_tsk_resched(rq_curr);
++ } else {
++ /*
++ * Adjust the hrexpiry since rq_curr will keep
++ * running and its timeslice has been shortened.
++ */
++ hrexpiry_start(rq, US_TO_NS(rq_curr->time_slice));
++ try_preempt(p, new_rq);
++ }
++ }
++ } else {
++ time_slice_expired(p, new_rq);
++ try_preempt(p, new_rq);
++ }
++ activate_task(p, new_rq);
++ double_rq_unlock(rq, new_rq);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
++
++void preempt_notifier_inc(void)
++{
++ static_key_slow_inc(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_inc);
++
++void preempt_notifier_dec(void)
++{
++ static_key_slow_dec(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_dec);
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++ if (!static_key_false(&preempt_notifier_key))
++ WARN(1, "registering preempt_notifier while notifiers disabled\n");
++
++ hlist_add_head(&notifier->link, &current->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is *not* safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++ hlist_del(&notifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++ struct preempt_notifier *notifier;
++
++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++ notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++ if (static_key_false(&preempt_notifier_key))
++ __fire_sched_in_preempt_notifiers(curr);
++}
++
++static void
++__fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++ struct preempt_notifier *notifier;
++
++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++ notifier->ops->sched_out(notifier, next);
++}
++
++static __always_inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++ if (static_key_false(&preempt_notifier_key))
++ __fire_sched_out_preempt_notifiers(curr, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next)
++{
++ sched_info_switch(rq, prev, next);
++ perf_event_task_sched_out(prev, next);
++ fire_sched_out_preempt_notifiers(prev, next);
++ prepare_lock_switch(rq, next);
++ prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock. (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. prev == current is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static void finish_task_switch(struct task_struct *prev)
++ __releases(rq->lock)
++{
++ struct rq *rq = this_rq();
++ struct mm_struct *mm = rq->prev_mm;
++ long prev_state;
++
++ /*
++ * The previous task will have left us with a preempt_count of 2
++ * because it left us after:
++ *
++ * schedule()
++ * preempt_disable(); // 1
++ * __schedule()
++ * raw_spin_lock_irq(&rq->lock) // 2
++ *
++ * Also, see FORK_PREEMPT_COUNT.
++ */
++ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
++ "corrupted preempt_count: %s/%d/0x%x\n",
++ current->comm, current->pid, preempt_count()))
++ preempt_count_set(FORK_PREEMPT_COUNT);
++
++ rq->prev_mm = NULL;
++
++ /*
++ * A task struct has one reference for the use as "current".
++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++ * schedule one last time. The schedule call will never return, and
++ * the scheduled task must drop that reference.
++ *
++ * We must observe prev->state before clearing prev->on_cpu (in
++ * finish_lock_switch), otherwise a concurrent wakeup can get prev
++ * running on another CPU and we could rave with its RUNNING -> DEAD
++ * transition, resulting in a double drop.
++ */
++ prev_state = prev->state;
++ vtime_task_switch(prev);
++ perf_event_task_sched_in(prev, current);
++ /*
++ * The membarrier system call requires a full memory barrier
++ * after storing to rq->curr, before going back to user-space.
++ *
++ * TODO: This smp_mb__after_unlock_lock can go away if PPC end
++ * up adding a full barrier to switch_mm(), or we should figure
++ * out if a smp_mb__after_unlock_lock is really the proper API
++ * to use.
++ */
++ smp_mb__after_unlock_lock();
++ finish_lock_switch(rq, prev);
++ finish_arch_post_lock_switch();
++
++ fire_sched_in_preempt_notifiers(current);
++ if (mm)
++ mmdrop(mm);
++ if (unlikely(prev_state == TASK_DEAD)) {
++ /*
++ * Remove function-return probe instances associated with this
++ * task and put them back on the free list.
++ */
++ kprobe_flush_task(prev);
++
++ /* Task is done with its stack. */
++ put_task_stack(prev);
++
++ put_task_struct(prev);
++ }
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++{
++ /*
++ * New tasks start with FORK_PREEMPT_COUNT, see there and
++ * finish_task_switch() for details.
++ *
++ * finish_task_switch() will drop rq->lock() and lower preempt_count
++ * and the preempt_enable() will end up enabling preemption (on
++ * PREEMPT_COUNT kernels).
++ */
++
++ finish_task_switch(prev);
++ preempt_enable();
++
++ if (current->set_child_tid)
++ put_user(task_pid_vnr(current), current->set_child_tid);
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static __always_inline void
++context_switch(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next)
++{
++ struct mm_struct *mm, *oldmm;
++
++ prepare_task_switch(rq, prev, next);
++
++ mm = next->mm;
++ oldmm = prev->active_mm;
++ /*
++ * For paravirt, this is coupled with an exit in switch_to to
++ * combine the page table reload and the switch backend into
++ * one hypercall.
++ */
++ arch_start_context_switch(prev);
++
++ if (!mm) {
++ next->active_mm = oldmm;
++ mmgrab(oldmm);
++ enter_lazy_tlb(oldmm, next);
++ } else
++ switch_mm_irqs_off(oldmm, mm, next);
++
++ if (!prev->mm) {
++ prev->active_mm = NULL;
++ rq->prev_mm = oldmm;
++ }
++ /*
++ * Since the runqueue lock will be released by the next
++ * task (which is an invalid locking op but in the case
++ * of the scheduler it's an obvious special-case), so we
++ * do an early lockdep release here:
++ */
++ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
++
++ /* Here we just switch the register state and the stack. */
++ switch_to(prev, next, prev);
++ barrier();
++
++ finish_task_switch(prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned long nr_running(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_online_cpu(i)
++ sum += cpu_rq(i)->nr_running;
++
++ return sum;
++}
++
++static unsigned long nr_uninterruptible(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_online_cpu(i)
++ sum += cpu_rq(i)->nr_uninterruptible;
++
++ return sum;
++}
++
++/*
++ * Check if only the current task is running on the CPU.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race. The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptable section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
++ */
++bool single_task_running(void)
++{
++ struct rq *rq = cpu_rq(smp_processor_id());
++
++ if (rq_load(rq) == 1)
++ return true;
++ else
++ return false;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches(void)
++{
++ int i;
++ unsigned long long sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += cpu_rq(i)->nr_switches;
++
++ return sum;
++}
++
++/*
++ * IO-wait accounting, and how its mostly bollocks (on SMP).
++ *
++ * The idea behind IO-wait account is to account the idle time that we could
++ * have spend running if it were not for IO. That is, if we were to improve the
++ * storage performance, we'd have a proportional reduction in IO-wait time.
++ *
++ * This all works nicely on UP, where, when a task blocks on IO, we account
++ * idle time as IO-wait, because if the storage were faster, it could've been
++ * running and we'd not be idle.
++ *
++ * This has been extended to SMP, by doing the same for each CPU. This however
++ * is broken.
++ *
++ * Imagine for instance the case where two tasks block on one CPU, only the one
++ * CPU will have IO-wait accounted, while the other has regular idle. Even
++ * though, if the storage were faster, both could've ran at the same time,
++ * utilising both CPUs.
++ *
++ * This means, that when looking globally, the current IO-wait accounting on
++ * SMP is a lower bound, by reason of under accounting.
++ *
++ * Worse, since the numbers are provided per CPU, they are sometimes
++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
++ * associated with any one particular CPU, it can wake to another CPU than it
++ * blocked on. This means the per CPU IO-wait number is meaningless.
++ *
++ * Task CPU affinities can make all that even more 'interesting'.
++ */
++
++unsigned long nr_iowait(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += atomic_read(&cpu_rq(i)->nr_iowait);
++
++ return sum;
++}
++
++/*
++ * Consumers of these two interfaces, like for example the cpufreq menu
++ * governor are using nonsensical data. Boosting frequency for a CPU that has
++ * IO-wait which might not even end up running the task when it does become
++ * runnable.
++ */
++
++unsigned long nr_iowait_cpu(int cpu)
++{
++ struct rq *this = cpu_rq(cpu);
++ return atomic_read(&this->nr_iowait);
++}
++
++unsigned long nr_active(void)
++{
++ return nr_running() + nr_uninterruptible();
++}
++
++/*
++ * I/O wait is the number of running or queued tasks with their ->rq pointer
++ * set to this cpu as being the CPU they're more likely to run on.
++ */
++void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
++{
++ struct rq *rq = this_rq();
++
++ *nr_waiters = atomic_read(&rq->nr_iowait);
++ *load = rq_load(rq);
++}
++
++/* Variables and functions for calc_load */
++static unsigned long calc_load_update;
++unsigned long avenrun[3];
++EXPORT_SYMBOL(avenrun);
++
++/**
++ * get_avenrun - get the load average array
++ * @loads: pointer to dest load array
++ * @offset: offset to add
++ * @shift: shift count to shift the result left
++ *
++ * These values are estimates at best, so no need for locking.
++ */
++void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
++{
++ loads[0] = (avenrun[0] + offset) << shift;
++ loads[1] = (avenrun[1] + offset) << shift;
++ loads[2] = (avenrun[2] + offset) << shift;
++}
++
++static unsigned long
++calc_load(unsigned long load, unsigned long exp, unsigned long active)
++{
++ unsigned long newload;
++
++ newload = load * exp + active * (FIXED_1 - exp);
++ if (active >= load)
++ newload += FIXED_1-1;
++
++ return newload / FIXED_1;
++}
++
++/*
++ * calc_load - update the avenrun load estimates every LOAD_FREQ seconds.
++ */
++void calc_global_load(unsigned long ticks)
++{
++ long active;
++
++ if (time_before(jiffies, READ_ONCE(calc_load_update)))
++ return;
++ active = nr_active() * FIXED_1;
++
++ avenrun[0] = calc_load(avenrun[0], EXP_1, active);
++ avenrun[1] = calc_load(avenrun[1], EXP_5, active);
++ avenrun[2] = calc_load(avenrun[2], EXP_15, active);
++
++ calc_load_update = jiffies + LOAD_FREQ;
++}
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++#ifdef CONFIG_PARAVIRT
++static inline u64 steal_ticks(u64 steal)
++{
++ if (unlikely(steal > NSEC_PER_SEC))
++ return div_u64(steal, TICK_NSEC);
++
++ return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
++}
++#endif
++
++#ifndef nsecs_to_cputime
++# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
++#endif
++
++/*
++ * On each tick, add the number of nanoseconds to the unbanked variables and
++ * once one tick's worth has accumulated, account it allowing for accurate
++ * sub-tick accounting and totals.
++ */
++static void pc_idle_time(struct rq *rq, struct task_struct *idle, unsigned long ns)
++{
++ u64 *cpustat = kcpustat_this_cpu->cpustat;
++ unsigned long ticks;
++
++ if (atomic_read(&rq->nr_iowait) > 0) {
++ rq->iowait_ns += ns;
++ if (rq->iowait_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->iowait_ns);
++ cpustat[CPUTIME_IOWAIT] += (__force u64)TICK_NSEC * ticks;
++ rq->iowait_ns %= JIFFY_NS;
++ }
++ } else {
++ rq->idle_ns += ns;
++ if (rq->idle_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->idle_ns);
++ cpustat[CPUTIME_IDLE] += (__force u64)TICK_NSEC * ticks;
++ rq->idle_ns %= JIFFY_NS;
++ }
++ }
++ acct_update_integrals(idle);
++}
++
++static void pc_system_time(struct rq *rq, struct task_struct *p,
++ int hardirq_offset, unsigned long ns)
++{
++ u64 *cpustat = kcpustat_this_cpu->cpustat;
++ unsigned long ticks;
++
++ p->stime_ns += ns;
++ if (p->stime_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(p->stime_ns);
++ p->stime_ns %= JIFFY_NS;
++ p->stime += (__force u64)TICK_NSEC * ticks;
++ account_group_system_time(p, TICK_NSEC * ticks);
++ }
++ p->sched_time += ns;
++ account_group_exec_runtime(p, ns);
++
++ if (hardirq_count() - hardirq_offset) {
++ rq->irq_ns += ns;
++ if (rq->irq_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->irq_ns);
++ cpustat[CPUTIME_IRQ] += (__force u64)TICK_NSEC * ticks;
++ rq->irq_ns %= JIFFY_NS;
++ }
++ } else if (in_serving_softirq()) {
++ rq->softirq_ns += ns;
++ if (rq->softirq_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->softirq_ns);
++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_NSEC * ticks;
++ rq->softirq_ns %= JIFFY_NS;
++ }
++ } else {
++ rq->system_ns += ns;
++ if (rq->system_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->system_ns);
++ cpustat[CPUTIME_SYSTEM] += (__force u64)TICK_NSEC * ticks;
++ rq->system_ns %= JIFFY_NS;
++ }
++ }
++ acct_update_integrals(p);
++}
++
++static void pc_user_time(struct rq *rq, struct task_struct *p, unsigned long ns)
++{
++ u64 *cpustat = kcpustat_this_cpu->cpustat;
++ unsigned long ticks;
++
++ p->utime_ns += ns;
++ if (p->utime_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(p->utime_ns);
++ p->utime_ns %= JIFFY_NS;
++ p->utime += (__force u64)TICK_NSEC * ticks;
++ account_group_user_time(p, TICK_NSEC * ticks);
++ }
++ p->sched_time += ns;
++ account_group_exec_runtime(p, ns);
++
++ if (this_cpu_ksoftirqd() == p) {
++ /*
++ * ksoftirqd time do not get accounted in cpu_softirq_time.
++ * So, we have to handle it separately here.
++ */
++ rq->softirq_ns += ns;
++ if (rq->softirq_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->softirq_ns);
++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_NSEC * ticks;
++ rq->softirq_ns %= JIFFY_NS;
++ }
++ }
++
++ if (task_nice(p) > 0 || idleprio_task(p)) {
++ rq->nice_ns += ns;
++ if (rq->nice_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->nice_ns);
++ cpustat[CPUTIME_NICE] += (__force u64)TICK_NSEC * ticks;
++ rq->nice_ns %= JIFFY_NS;
++ }
++ } else {
++ rq->user_ns += ns;
++ if (rq->user_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->user_ns);
++ cpustat[CPUTIME_USER] += (__force u64)TICK_NSEC * ticks;
++ rq->user_ns %= JIFFY_NS;
++ }
++ }
++ acct_update_integrals(p);
++}
++
++/*
++ * This is called on clock ticks.
++ * Bank in p->sched_time the ns elapsed since the last tick or switch.
++ * CPU scheduler quota accounting is also performed here in microseconds.
++ */
++static void update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
++{
++ s64 account_ns = rq->niffies - p->last_ran;
++ struct task_struct *idle = rq->idle;
++
++ /* Accurate tick timekeeping */
++ if (user_mode(get_irq_regs()))
++ pc_user_time(rq, p, account_ns);
++ else if (p != idle || (irq_count() != HARDIRQ_OFFSET)) {
++ pc_system_time(rq, p, HARDIRQ_OFFSET, account_ns);
++ } else
++ pc_idle_time(rq, idle, account_ns);
++
++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
++ if (p->policy != SCHED_FIFO && p != idle)
++ p->time_slice -= NS_TO_US(account_ns);
++
++ p->last_ran = rq->niffies;
++}
++
++/*
++ * This is called on context switches.
++ * Bank in p->sched_time the ns elapsed since the last tick or switch.
++ * CPU scheduler quota accounting is also performed here in microseconds.
++ */
++static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p)
++{
++ s64 account_ns = rq->niffies - p->last_ran;
++ struct task_struct *idle = rq->idle;
++
++ /* Accurate subtick timekeeping */
++ if (p != idle)
++ pc_user_time(rq, p, account_ns);
++ else
++ pc_idle_time(rq, idle, account_ns);
++
++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
++ if (p->policy != SCHED_FIFO && p != idle)
++ p->time_slice -= NS_TO_US(account_ns);
++}
++
++/*
++ * Return any ns on the sched_clock that have not yet been accounted in
++ * @p in case that task is currently running.
++ *
++ * Called with task_rq_lock(p) held.
++ */
++static inline u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
++{
++ u64 ns = 0;
++
++ /*
++ * Must be ->curr _and_ ->on_rq. If dequeued, we would
++ * project cycles that may never be accounted to this
++ * thread, breaking clock_gettime().
++ */
++ if (p == rq->curr && task_on_rq_queued(p)) {
++ update_clocks(rq);
++ ns = rq->niffies - p->last_ran;
++ }
++
++ return ns;
++}
++
++/*
++ * Return accounted runtime for the task.
++ * Return separately the current's pending runtime that have not been
++ * accounted yet.
++ *
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++ unsigned long flags;
++ struct rq *rq;
++ u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++ /*
++ * 64-bit doesn't need locks to atomically read a 64bit value.
++ * So we have a optimization chance when the task's delta_exec is 0.
++ * Reading ->on_cpu is racy, but this is ok.
++ *
++ * If we race with it leaving CPU, we'll take a lock. So we're correct.
++ * If we race with it entering CPU, unaccounted time is 0. This is
++ * indistinguishable from the read occurring a few cycles earlier.
++ * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++ * been accounted, so we're correct here as well.
++ */
++ if (!p->on_cpu || !task_on_rq_queued(p))
++ return tsk_seruntime(p);
++#endif
++
++ rq = task_rq_lock(p, &flags);
++ ns = p->sched_time + do_task_delta_exec(p, rq);
++ task_rq_unlock(rq, p, &flags);
++
++ return ns;
++}
++
++/*
++ * Functions to test for when SCHED_ISO tasks have used their allocated
++ * quota as real time scheduling and convert them back to SCHED_NORMAL. All
++ * data is modified only by the local runqueue during scheduler_tick with
++ * interrupts disabled.
++ */
++
++/*
++ * Test if SCHED_ISO tasks have run longer than their alloted period as RT
++ * tasks and set the refractory flag if necessary. There is 10% hysteresis
++ * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a
++ * slow division.
++ */
++static inline void iso_tick(struct rq *rq)
++{
++ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - 1) / ISO_PERIOD;
++ rq->iso_ticks += 100;
++ if (rq->iso_ticks > ISO_PERIOD * sched_iso_cpu) {
++ rq->iso_refractory = true;
++ if (unlikely(rq->iso_ticks > ISO_PERIOD * 100))
++ rq->iso_ticks = ISO_PERIOD * 100;
++ }
++}
++
++/* No SCHED_ISO task was running so decrease rq->iso_ticks */
++static inline void no_iso_tick(struct rq *rq, int ticks)
++{
++ if (rq->iso_ticks > 0 || rq->iso_refractory) {
++ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - ticks) / ISO_PERIOD;
++ if (rq->iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128)) {
++ rq->iso_refractory = false;
++ if (unlikely(rq->iso_ticks < 0))
++ rq->iso_ticks = 0;
++ }
++ }
++}
++
++/* This manages tasks that have run out of timeslice during a scheduler_tick */
++static void task_running_tick(struct rq *rq)
++{
++ struct task_struct *p = rq->curr;
++
++ /*
++ * If a SCHED_ISO task is running we increment the iso_ticks. In
++ * order to prevent SCHED_ISO tasks from causing starvation in the
++ * presence of true RT tasks we account those as iso_ticks as well.
++ */
++ if (rt_task(p) || task_running_iso(p))
++ iso_tick(rq);
++ else
++ no_iso_tick(rq, 1);
++
++ /* SCHED_FIFO tasks never run out of timeslice. */
++ if (p->policy == SCHED_FIFO)
++ return;
++
++ if (iso_task(p)) {
++ if (task_running_iso(p)) {
++ if (rq->iso_refractory) {
++ /*
++ * SCHED_ISO task is running as RT and limit
++ * has been hit. Force it to reschedule as
++ * SCHED_NORMAL by zeroing its time_slice
++ */
++ p->time_slice = 0;
++ }
++ } else if (!rq->iso_refractory) {
++ /* Can now run again ISO. Reschedule to pick up prio */
++ goto out_resched;
++ }
++ }
++
++ /*
++ * Tasks that were scheduled in the first half of a tick are not
++ * allowed to run into the 2nd half of the next tick if they will
++ * run out of time slice in the interim. Otherwise, if they have
++ * less than RESCHED_US μs of time slice left they will be rescheduled.
++ * Dither is used as a backup for when hrexpiry is disabled or high res
++ * timers not configured in.
++ */
++ if (p->time_slice - rq->dither >= RESCHED_US)
++ return;
++out_resched:
++ rq_lock(rq);
++ __set_tsk_resched(p);
++ rq_unlock(rq);
++}
++
++#ifdef CONFIG_NO_HZ_FULL
++/*
++ * We can stop the timer tick any time highres timers are active since
++ * we rely entirely on highres timeouts for task expiry rescheduling.
++ */
++static void sched_stop_tick(struct rq *rq, int cpu)
++{
++ if (!hrexpiry_enabled(rq))
++ return;
++ if (!tick_nohz_full_enabled())
++ return;
++ if (!tick_nohz_full_cpu(cpu))
++ return;
++ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++
++static inline void sched_start_tick(struct rq *rq, int cpu)
++{
++ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++
++/**
++ * scheduler_tick_max_deferment
++ *
++ * Keep at least one tick per second when a single
++ * active task is running.
++ *
++ * This makes sure that uptime continues to move forward, even
++ * with a very low granularity.
++ *
++ * Return: Maximum deferment in nanoseconds.
++ */
++u64 scheduler_tick_max_deferment(void)
++{
++ struct rq *rq = this_rq();
++ unsigned long next, now = READ_ONCE(jiffies);
++
++ next = rq->last_jiffy + HZ;
++
++ if (time_before_eq(next, now))
++ return 0;
++
++ return jiffies_to_nsecs(next - now);
++}
++#else
++static inline void sched_stop_tick(struct rq *rq, int cpu)
++{
++}
++
++static inline void sched_start_tick(struct rq *rq, int cpu)
++{
++}
++#endif
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void scheduler_tick(void)
++{
++ int cpu __maybe_unused = smp_processor_id();
++ struct rq *rq = cpu_rq(cpu);
++
++ sched_clock_tick();
++ update_clocks(rq);
++ update_load_avg(rq, 0);
++ update_cpu_clock_tick(rq, rq->curr);
++ if (!rq_idle(rq))
++ task_running_tick(rq);
++ else if (rq->last_jiffy > rq->last_scheduler_tick)
++ no_iso_tick(rq, rq->last_jiffy - rq->last_scheduler_tick);
++ rq->last_scheduler_tick = rq->last_jiffy;
++ rq->last_tick = rq->clock;
++ perf_event_task_tick();
++ sched_stop_tick(rq, cpu);
++}
++
++#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
++ defined(CONFIG_PREEMPT_TRACER))
++/*
++ * If the value passed in is equal to the current preempt count
++ * then we just disabled preemption. Start timing the latency.
++ */
++static inline void preempt_latency_start(int val)
++{
++ if (preempt_count() == val) {
++ unsigned long ip = get_lock_parent_ip();
++#ifdef CONFIG_DEBUG_PREEMPT
++ current->preempt_disable_ip = ip;
++#endif
++ trace_preempt_off(CALLER_ADDR0, ip);
++ }
++}
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++ return;
++#endif
++ __preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Spinlock count overflowing soon?
++ */
++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++ PREEMPT_MASK - 10);
++#endif
++ preempt_latency_start(val);
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++/*
++ * If the value passed in equals to the current preempt count
++ * then we just enabled preemption. Stop timing the latency.
++ */
++static inline void preempt_latency_stop(int val)
++{
++ if (preempt_count() == val)
++ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++}
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++ return;
++ /*
++ * Is the spinlock portion underflowing?
++ */
++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++ !(preempt_count() & PREEMPT_MASK)))
++ return;
++#endif
++
++ preempt_latency_stop(val);
++ __preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#else
++static inline void preempt_latency_start(int val) { }
++static inline void preempt_latency_stop(int val) { }
++#endif
++
++static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ return p->preempt_disable_ip;
++#else
++ return 0;
++#endif
++}
++
++/*
++ * The time_slice is only refilled when it is empty and that is when we set a
++ * new deadline. Make sure update_clocks has been called recently to update
++ * rq->niffies.
++ */
++static void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++ p->time_slice = timeslice();
++ p->deadline = rq->niffies + task_deadline_diff(p);
++#ifdef CONFIG_SMT_NICE
++ if (!p->mm)
++ p->smt_bias = 0;
++ else if (rt_task(p))
++ p->smt_bias = 1 << 30;
++ else if (task_running_iso(p))
++ p->smt_bias = 1 << 29;
++ else if (idleprio_task(p)) {
++ if (task_running_idle(p))
++ p->smt_bias = 0;
++ else
++ p->smt_bias = 1;
++ } else if (--p->smt_bias < 1)
++ p->smt_bias = MAX_PRIO - p->static_prio;
++#endif
++}
++
++/*
++ * Timeslices below RESCHED_US are considered as good as expired as there's no
++ * point rescheduling when there's so little time left. SCHED_BATCH tasks
++ * have been flagged be not latency sensitive and likely to be fully CPU
++ * bound so every time they're rescheduled they have their time_slice
++ * refilled, but get a new later deadline to have little effect on
++ * SCHED_NORMAL tasks.
++
++ */
++static inline void check_deadline(struct task_struct *p, struct rq *rq)
++{
++ if (p->time_slice < RESCHED_US || batch_task(p))
++ time_slice_expired(p, rq);
++}
++
++/*
++ * Task selection with skiplists is a simple matter of picking off the first
++ * task in the sorted list, an O(1) operation. The lookup is amortised O(1)
++ * being bound to the number of processors.
++ *
++ * Runqueues are selectively locked based on their unlocked data and then
++ * unlocked if not needed. At most 3 locks will be held at any time and are
++ * released as soon as they're no longer needed. All balancing between CPUs
++ * is thus done here in an extremely simple first come best fit manner.
++ *
++ * This iterates over runqueues in cache locality order. In interactive mode
++ * it iterates over all CPUs and finds the task with the best key/deadline.
++ * In non-interactive mode it will only take a task if it's from the current
++ * runqueue or a runqueue with more tasks than the current one with a better
++ * key/deadline.
++ */
++#ifdef CONFIG_SMP
++static inline struct task_struct
++*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
++{
++ struct rq *locked = NULL, *chosen = NULL;
++ struct task_struct *edt = idle;
++ int i, best_entries = 0;
++ u64 best_key = ~0ULL;
++
++ for (i = 0; i < num_possible_cpus(); i++) {
++ struct rq *other_rq = rq_order(rq, i);
++ int entries = other_rq->sl->entries;
++ skiplist_node *next;
++
++ /*
++ * Check for queued entres lockless first. The local runqueue
++ * is locked so entries will always be accurate.
++ */
++ if (!sched_interactive) {
++ /*
++ * Don't reschedule balance across nodes unless the CPU
++ * is idle.
++ */
++ if (edt != idle && rq->cpu_locality[other_rq->cpu] > 3)
++ break;
++ if (entries <= best_entries)
++ continue;
++ } else if (!entries)
++ continue;
++
++ /* if (i) implies other_rq != rq */
++ if (i) {
++ /* Check for best id queued lockless first */
++ if (other_rq->best_key >= best_key)
++ continue;
++
++ if (unlikely(!trylock_rq(rq, other_rq)))
++ continue;
++
++ /* Need to reevaluate entries after locking */
++ entries = other_rq->sl->entries;
++ if (unlikely(!entries)) {
++ unlock_rq(other_rq);
++ continue;
++ }
++ }
++
++ next = &other_rq->node;
++ /*
++ * In interactive mode we check beyond the best entry on other
++ * runqueues if we can't get the best for smt or affinity
++ * reasons.
++ */
++ while ((next = next->next[0]) != &other_rq->node) {
++ struct task_struct *p;
++ u64 key = next->key;
++
++ /* Reevaluate key after locking */
++ if (key >= best_key)
++ break;
++
++ p = next->value;
++ if (!smt_schedule(p, rq)) {
++ if (i && !sched_interactive)
++ break;
++ continue;
++ }
++
++ /* Make sure affinity is ok */
++ if (i) {
++ if (needs_other_cpu(p, cpu)) {
++ if (sched_interactive)
++ continue;
++ break;
++ }
++ /* From this point on p is the best so far */
++ if (locked)
++ unlock_rq(locked);
++ chosen = locked = other_rq;
++ }
++ best_entries = entries;
++ best_key = key;
++ edt = p;
++ break;
++ }
++ /* rq->preempting is a hint only as the state may have changed
++ * since it was set with the resched call but if we have met
++ * the condition we can break out here. */
++ if (edt == rq->preempting)
++ break;
++ if (i && other_rq != chosen)
++ unlock_rq(other_rq);
++ }
++
++ if (likely(edt != idle))
++ take_task(rq, cpu, edt);
++
++ if (locked)
++ unlock_rq(locked);
++
++ rq->preempting = NULL;
++
++ return edt;
++}
++#else /* CONFIG_SMP */
++static inline struct task_struct
++*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
++{
++ struct task_struct *edt;
++
++ if (unlikely(!rq->sl->entries))
++ return idle;
++ edt = rq->node.next[0]->value;
++ take_task(rq, cpu, edt);
++ return edt;
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++ /* Save this before calling printk(), since that will clobber it */
++ unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
++
++ if (oops_in_progress)
++ return;
++
++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++ prev->comm, prev->pid, preempt_count());
++
++ debug_show_held_locks(prev);
++ print_modules();
++ if (irqs_disabled())
++ print_irqtrace_events(prev);
++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++ && in_atomic_preempt_off()) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(preempt_disable_ip);
++ pr_cont("\n");
++ }
++ dump_stack();
++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++ if (task_stack_end_corrupted(prev))
++ panic("corrupted stack end detected inside scheduler\n");
++#endif
++
++ if (unlikely(in_atomic_preempt_off())) {
++ __schedule_bug(prev);
++ preempt_count_set(PREEMPT_DISABLED);
++ }
++ rcu_sleep_check();
++
++ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++ schedstat_inc(this_rq()->sched_count);
++}
++
++/*
++ * The currently running task's information is all stored in rq local data
++ * which is only modified by the local CPU.
++ */
++static inline void set_rq_task(struct rq *rq, struct task_struct *p)
++{
++ if (p == rq->idle || p->policy == SCHED_FIFO)
++ hrexpiry_clear(rq);
++ else
++ hrexpiry_start(rq, US_TO_NS(p->time_slice));
++ if (rq->clock - rq->last_tick > HALF_JIFFY_NS)
++ rq->dither = 0;
++ else
++ rq->dither = rq_dither(rq);
++
++ rq->rq_deadline = p->deadline;
++ rq->rq_prio = p->prio;
++#ifdef CONFIG_SMT_NICE
++ rq->rq_mm = p->mm;
++ rq->rq_smt_bias = p->smt_bias;
++#endif
++}
++
++#ifdef CONFIG_SMT_NICE
++static void check_no_siblings(struct rq __maybe_unused *this_rq) {}
++static void wake_no_siblings(struct rq __maybe_unused *this_rq) {}
++static void (*check_siblings)(struct rq *this_rq) = &check_no_siblings;
++static void (*wake_siblings)(struct rq *this_rq) = &wake_no_siblings;
++
++/* Iterate over smt siblings when we've scheduled a process on cpu and decide
++ * whether they should continue running or be descheduled. */
++static void check_smt_siblings(struct rq *this_rq)
++{
++ int other_cpu;
++
++ for_each_cpu(other_cpu, &this_rq->thread_mask) {
++ struct task_struct *p;
++ struct rq *rq;
++
++ rq = cpu_rq(other_cpu);
++ if (rq_idle(rq))
++ continue;
++ p = rq->curr;
++ if (!smt_schedule(p, this_rq))
++ resched_curr(rq);
++ }
++}
++
++static void wake_smt_siblings(struct rq *this_rq)
++{
++ int other_cpu;
++
++ for_each_cpu(other_cpu, &this_rq->thread_mask) {
++ struct rq *rq;
++
++ rq = cpu_rq(other_cpu);
++ if (rq_idle(rq))
++ resched_idle(rq);
++ }
++}
++#else
++static void check_siblings(struct rq __maybe_unused *this_rq) {}
++static void wake_siblings(struct rq __maybe_unused *this_rq) {}
++#endif
++
++/*
++ * schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ * paths. For example, see arch/x86/entry_64.S.
++ *
++ * To drive preemption between tasks, the scheduler sets the flag in timer
++ * interrupt handler scheduler_tick().
++ *
++ * 3. Wakeups don't really cause entry into schedule(). They add a
++ * task to the run-queue and that's it.
++ *
++ * Now, if the new task added to the run-queue preempts the current
++ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ * called on the nearest possible occasion:
++ *
++ * - If the kernel is preemptible (CONFIG_PREEMPT=y):
++ *
++ * - in syscall or exception context, at the next outmost
++ * preempt_enable(). (this might be as soon as the wake_up()'s
++ * spin_unlock()!)
++ *
++ * - in IRQ context, return from interrupt-handler to
++ * preemptible context
++ *
++ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
++ * then at the next:
++ *
++ * - cond_resched() call
++ * - explicit schedule() call
++ * - return from syscall or exception to user-space
++ * - return from interrupt-handler to user-space
++ *
++ * WARNING: must be called with preemption disabled!
++ */
++static void __sched notrace __schedule(bool preempt)
++{
++ struct task_struct *prev, *next, *idle;
++ unsigned long *switch_count;
++ bool deactivate = false;
++ struct rq *rq;
++ u64 niffies;
++ int cpu;
++
++ cpu = smp_processor_id();
++ rq = cpu_rq(cpu);
++ prev = rq->curr;
++ idle = rq->idle;
++
++ schedule_debug(prev);
++
++ local_irq_disable();
++ rcu_note_context_switch(preempt);
++
++ /*
++ * Make sure that signal_pending_state()->signal_pending() below
++ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++ * done by the caller to avoid the race with signal_wake_up().
++ */
++ rq_lock(rq);
++ smp_mb__after_spinlock();
++#ifdef CONFIG_SMP
++ if (rq->preempt) {
++ /*
++ * Make sure resched_curr hasn't triggered a preemption
++ * locklessly on a task that has since scheduled away. Spurious
++ * wakeup of idle is okay though.
++ */
++ if (unlikely(preempt && prev != idle && !test_tsk_need_resched(prev))) {
++ rq->preempt = NULL;
++ clear_preempt_need_resched();
++ rq_unlock_irq(rq);
++ return;
++ }
++ rq->preempt = NULL;
++ }
++#endif
++
++ switch_count = &prev->nivcsw;
++ if (!preempt && prev->state) {
++ if (unlikely(signal_pending_state(prev->state, prev))) {
++ prev->state = TASK_RUNNING;
++ } else {
++ deactivate = true;
++ prev->on_rq = 0;
++
++ if (prev->in_iowait) {
++ atomic_inc(&rq->nr_iowait);
++ delayacct_blkio_start();
++ }
++
++ /*
++ * If a worker is going to sleep, notify and
++ * ask workqueue whether it wants to wake up a
++ * task to maintain concurrency. If so, wake
++ * up the task.
++ */
++ if (prev->flags & PF_WQ_WORKER) {
++ struct task_struct *to_wakeup;
++
++ to_wakeup = wq_worker_sleeping(prev);
++ if (to_wakeup)
++ try_to_wake_up_local(to_wakeup);
++ }
++ }
++ switch_count = &prev->nvcsw;
++ }
++
++ /*
++ * Store the niffy value here for use by the next task's last_ran
++ * below to avoid losing niffies due to update_clocks being called
++ * again after this point.
++ */
++ update_clocks(rq);
++ niffies = rq->niffies;
++ update_cpu_clock_switch(rq, prev);
++
++ clear_tsk_need_resched(prev);
++ clear_preempt_need_resched();
++
++ if (idle != prev) {
++ check_deadline(prev, rq);
++ return_task(prev, rq, cpu, deactivate);
++ }
++
++ next = earliest_deadline_task(rq, cpu, idle);
++ if (likely(next->prio != PRIO_LIMIT))
++ clear_cpuidle_map(cpu);
++ else {
++ set_cpuidle_map(cpu);
++ update_load_avg(rq, 0);
++ }
++
++ set_rq_task(rq, next);
++ next->last_ran = niffies;
++
++ if (likely(prev != next)) {
++ /*
++ * Don't reschedule an idle task or deactivated tasks
++ */
++ if (prev != idle && !deactivate)
++ resched_suitable_idle(prev);
++ if (next != idle)
++ check_siblings(rq);
++ else
++ wake_siblings(rq);
++ rq->nr_switches++;
++ rq->curr = next;
++ /*
++ * The membarrier system call requires each architecture
++ * to have a full memory barrier after updating
++ * rq->curr, before returning to user-space. For TSO
++ * (e.g. x86), the architecture must provide its own
++ * barrier in switch_mm(). For weakly ordered machines
++ * for which spin_unlock() acts as a full memory
++ * barrier, finish_lock_switch() in common code takes
++ * care of this barrier. For weakly ordered machines for
++ * which spin_unlock() acts as a RELEASE barrier (only
++ * arm64 and PowerPC), arm64 has a full barrier in
++ * switch_to(), and PowerPC has
++ * smp_mb__after_unlock_lock() before
++ * finish_lock_switch().
++ */
++ ++*switch_count;
++
++ trace_sched_switch(preempt, prev, next);
++ context_switch(rq, prev, next); /* unlocks the rq */
++ } else {
++ check_siblings(rq);
++ rq_unlock(rq);
++ do_pending_softirq(rq, next);
++ local_irq_enable();
++ }
++}
++
++void __noreturn do_task_dead(void)
++{
++ /*
++ * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
++ * when the following two conditions become true.
++ * - There is race condition of mmap_sem (It is acquired by
++ * exit_mm()), and
++ * - SMI occurs before setting TASK_RUNINNG.
++ * (or hypervisor of virtual machine switches to other guest)
++ * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
++ *
++ * To avoid it, we have to wait for releasing tsk->pi_lock which
++ * is held by try_to_wake_up()
++ */
++ raw_spin_lock_irq(&current->pi_lock);
++ raw_spin_unlock_irq(&current->pi_lock);
++
++ /* Causes final put_task_struct in finish_task_switch(). */
++ __set_current_state(TASK_DEAD);
++
++ /* Tell freezer to ignore us: */
++ current->flags |= PF_NOFREEZE;
++ __schedule(false);
++ BUG();
++
++ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
++ for (;;)
++ cpu_relax();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++ if (!tsk->state || tsk_is_pi_blocked(tsk) ||
++ preempt_count() ||
++ signal_pending_state(tsk->state, tsk))
++ return;
++
++ /*
++ * If we are going to sleep and we have plugged IO queued,
++ * make sure to submit it to avoid deadlocks.
++ */
++ if (blk_needs_flush_plug(tsk))
++ blk_schedule_flush_plug(tsk);
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++ struct task_struct *tsk = current;
++
++ sched_submit_work(tsk);
++ do {
++ preempt_disable();
++ __schedule(false);
++ sched_preempt_enable_no_resched();
++ } while (need_resched());
++}
++
++EXPORT_SYMBOL(schedule);
++
++/*
++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
++ * state (have scheduled out non-voluntarily) by making sure that all
++ * tasks have either left the run queue or have gone into user space.
++ * As idle tasks do not do either, they must not ever be preempted
++ * (schedule out non-voluntarily).
++ *
++ * schedule_idle() is similar to schedule_preempt_disable() except that it
++ * never enables preemption because it does not call sched_submit_work().
++ */
++void __sched schedule_idle(void)
++{
++ /*
++ * As this skips calling sched_submit_work(), which the idle task does
++ * regardless because that function is a nop when the task is in a
++ * TASK_RUNNING state, make sure this isn't used someplace that the
++ * current task can be in any other state. Note, idle is always in the
++ * TASK_RUNNING state.
++ */
++ WARN_ON_ONCE(current->state);
++ do {
++ __schedule(false);
++ } while (need_resched());
++}
++
++#ifdef CONFIG_CONTEXT_TRACKING
++asmlinkage __visible void __sched schedule_user(void)
++{
++ /*
++ * If we come here after a random call to set_need_resched(),
++ * or we have been woken up remotely but the IPI has not yet arrived,
++ * we haven't yet exited the RCU idle mode. Do it here manually until
++ * we find a better solution.
++ *
++ * NB: There are buggy callers of this function. Ideally we
++ * should warn if prev_state != IN_USER, but that will trigger
++ * too frequently to make sense yet.
++ */
++ enum ctx_state prev_state = exception_enter();
++ schedule();
++ exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++ sched_preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++}
++
++static void __sched notrace preempt_schedule_common(void)
++{
++ do {
++ /*
++ * Because the function tracer can trace preempt_count_sub()
++ * and it also uses preempt_enable/disable_notrace(), if
++ * NEED_RESCHED is set, the preempt_enable_notrace() called
++ * by the function tracer will call this function again and
++ * cause infinite recursion.
++ *
++ * Preemption must be disabled here before the function
++ * tracer can trace. Break up preempt_disable() into two
++ * calls. One to disable preemption without fear of being
++ * traced. The other to still record the preemption latency,
++ * which can also be traced by the function tracer.
++ */
++ preempt_disable_notrace();
++ preempt_latency_start(1);
++ __schedule(true);
++ preempt_latency_stop(1);
++ preempt_enable_no_resched_notrace();
++
++ /*
++ * Check again in case we missed a preemption opportunity
++ * between schedule and now.
++ */
++ } while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPT
++/*
++ * this is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable. Kernel preemptions off return from interrupt
++ * occur there and call schedule directly.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++ /*
++ * If there is a non-zero preempt_count or interrupts are disabled,
++ * we do not want to preempt the current task. Just return..
++ */
++ if (likely(!preemptible()))
++ return;
++
++ preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++/**
++ * preempt_schedule_notrace - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
++{
++ enum ctx_state prev_ctx;
++
++ if (likely(!preemptible()))
++ return;
++
++ do {
++ /*
++ * Because the function tracer can trace preempt_count_sub()
++ * and it also uses preempt_enable/disable_notrace(), if
++ * NEED_RESCHED is set, the preempt_enable_notrace() called
++ * by the function tracer will call this function again and
++ * cause infinite recursion.
++ *
++ * Preemption must be disabled here before the function
++ * tracer can trace. Break up preempt_disable() into two
++ * calls. One to disable preemption without fear of being
++ * traced. The other to still record the preemption latency,
++ * which can also be traced by the function tracer.
++ */
++ preempt_disable_notrace();
++ preempt_latency_start(1);
++ /*
++ * Needs preempt disabled in case user_exit() is traced
++ * and the tracer calls preempt_enable_notrace() causing
++ * an infinite recursion.
++ */
++ prev_ctx = exception_enter();
++ __schedule(true);
++ exception_exit(prev_ctx);
++
++ preempt_latency_stop(1);
++ preempt_enable_no_resched_notrace();
++ } while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
++
++#endif /* CONFIG_PREEMPT */
++
++/*
++ * this is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++ enum ctx_state prev_state;
++
++ /* Catch callers which need to be fixed */
++ BUG_ON(preempt_count() || !irqs_disabled());
++
++ prev_state = exception_enter();
++
++ do {
++ preempt_disable();
++ local_irq_enable();
++ __schedule(true);
++ local_irq_disable();
++ sched_preempt_enable_no_resched();
++ } while (need_resched());
++
++ exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
++ void *key)
++{
++ return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++#ifdef CONFIG_RT_MUTEXES
++
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++ if (pi_task)
++ prio = min(prio, pi_task->prio);
++
++ return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++ struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++ return __rt_effective_prio(pi_task, prio);
++}
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task to boost
++ * @pi_task: donor task
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
++{
++ int prio, oldprio;
++ struct rq *rq;
++
++ /* XXX used to be waiter->prio, not waiter->task->prio */
++ prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++ /*
++ * If nothing changed; bail early.
++ */
++ if (p->pi_top_task == pi_task && prio == p->prio)
++ return;
++
++ rq = __task_rq_lock(p);
++ update_rq_clock(rq);
++ /*
++ * Set under pi_lock && rq->lock, such that the value can be used under
++ * either lock.
++ *
++ * Note that there is loads of tricky to make this pointer cache work
++ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++ * ensure a task is de-boosted (pi_task is set to NULL) before the
++ * task is allowed to run again (and can exit). This ensures the pointer
++ * points to a blocked task -- which guaratees the task is present.
++ */
++ p->pi_top_task = pi_task;
++
++ /*
++ * For FIFO/RR we only need to set prio, if that matches we're done.
++ */
++ if (prio == p->prio)
++ goto out_unlock;
++
++ /*
++ * Idle task boosting is a nono in general. There is one
++ * exception, when PREEMPT_RT and NOHZ is active:
++ *
++ * The idle task calls get_next_timer_interrupt() and holds
++ * the timer wheel base->lock on the CPU and another CPU wants
++ * to access the timer (probably to cancel it). We can safely
++ * ignore the boosting request, as the idle CPU runs this code
++ * with interrupts disabled and will complete the lock
++ * protected section without being interrupted. So there is no
++ * real need to boost.
++ */
++ if (unlikely(p == rq->idle)) {
++ WARN_ON(p != rq->curr);
++ WARN_ON(p->pi_blocked_on);
++ goto out_unlock;
++ }
++
++ trace_sched_pi_setprio(p, pi_task);
++ oldprio = p->prio;
++ p->prio = prio;
++ if (task_running(rq, p)){
++ if (prio > oldprio)
++ resched_task(p);
++ } else if (task_queued(p)) {
++ dequeue_task(rq, p, DEQUEUE_SAVE);
++ enqueue_task(rq, p, ENQUEUE_RESTORE);
++ if (prio < oldprio)
++ try_preempt(p, rq);
++ }
++out_unlock:
++ __task_rq_unlock(rq);
++}
++#else
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++ return prio;
++}
++#endif
++
++/*
++ * Adjust the deadline for when the priority is to change, before it's
++ * changed.
++ */
++static inline void adjust_deadline(struct task_struct *p, int new_prio)
++{
++ p->deadline += static_deadline_diff(new_prio) - task_deadline_diff(p);
++}
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++ int new_static, old_static;
++ unsigned long flags;
++ struct rq *rq;
++
++ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++ return;
++ new_static = NICE_TO_PRIO(nice);
++ /*
++ * We have to be careful, if called from sys_setpriority(),
++ * the task might be in the middle of scheduling on another CPU.
++ */
++ rq = task_rq_lock(p, &flags);
++ update_rq_clock(rq);
++
++ /*
++ * The RT priorities are set via sched_setscheduler(), but we still
++ * allow the 'normal' nice value to be set - but as expected
++ * it wont have any effect on scheduling until the task is
++ * not SCHED_NORMAL/SCHED_BATCH:
++ */
++ if (has_rt_policy(p)) {
++ p->static_prio = new_static;
++ goto out_unlock;
++ }
++
++ adjust_deadline(p, new_static);
++ old_static = p->static_prio;
++ p->static_prio = new_static;
++ p->prio = effective_prio(p);
++
++ if (task_queued(p)) {
++ dequeue_task(rq, p, DEQUEUE_SAVE);
++ enqueue_task(rq, p, ENQUEUE_RESTORE);
++ if (new_static < old_static)
++ try_preempt(p, rq);
++ } else if (task_running(rq, p)) {
++ set_rq_task(rq, p);
++ if (old_static < new_static)
++ resched_task(p);
++ }
++out_unlock:
++ task_rq_unlock(rq, p, &flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++ /* Convert nice value [19,-20] to rlimit style value [1,40] */
++ int nice_rlim = nice_to_rlimit(nice);
++
++ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
++ capable(CAP_SYS_NICE));
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++ long nice, retval;
++
++ /*
++ * Setpriority might change our priority at the same moment.
++ * We don't have to worry. Conceptually one call occurs first
++ * and we have a single winner.
++ */
++
++ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
++ nice = task_nice(current) + increment;
++
++ nice = clamp_val(nice, MIN_NICE, MAX_NICE);
++ if (increment < 0 && !can_nice(current, nice))
++ return -EPERM;
++
++ retval = security_task_setnice(current, nice);
++ if (retval)
++ return retval;
++
++ set_user_nice(current, nice);
++ return 0;
++}
++
++#endif
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes
++ * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO).
++ */
++int task_prio(const struct task_struct *p)
++{
++ int delta, prio = p->prio - MAX_RT_PRIO;
++
++ /* rt tasks and iso tasks */
++ if (prio <= 0)
++ goto out;
++
++ /* Convert to ms to avoid overflows */
++ delta = NS_TO_MS(p->deadline - task_rq(p)->niffies);
++ if (unlikely(delta < 0))
++ delta = 0;
++ delta = delta * 40 / ms_longest_deadline_diff();
++ if (delta <= 80)
++ prio += delta;
++ if (idleprio_task(p))
++ prio += 40;
++out:
++ return prio;
++}
++
++/**
++ * idle_cpu - is a given CPU idle currently?
++ * @cpu: the processor in question.
++ *
++ * Return: 1 if the CPU is currently idle. 0 otherwise.
++ */
++int idle_cpu(int cpu)
++{
++ return cpu_curr(cpu) == cpu_rq(cpu)->idle;
++}
++
++/**
++ * idle_task - return the idle task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * Return: The idle task for the CPU @cpu.
++ */
++struct task_struct *idle_task(int cpu)
++{
++ return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ *
++ * The task of @pid, if found. %NULL otherwise.
++ */
++static inline struct task_struct *find_process_by_pid(pid_t pid)
++{
++ return pid ? find_task_by_vpid(pid) : current;
++}
++
++/* Actually do priority change: must hold rq lock. */
++static void __setscheduler(struct task_struct *p, struct rq *rq, int policy,
++ int prio, bool keep_boost)
++{
++ int oldrtprio, oldprio;
++
++ p->policy = policy;
++ oldrtprio = p->rt_priority;
++ p->rt_priority = prio;
++ p->normal_prio = normal_prio(p);
++ oldprio = p->prio;
++ /*
++ * Keep a potential priority boosting if called from
++ * sched_setscheduler().
++ */
++ p->prio = normal_prio(p);
++ if (keep_boost)
++ p->prio = rt_effective_prio(p, p->prio);
++
++ if (task_running(rq, p)) {
++ set_rq_task(rq, p);
++ resched_task(p);
++ } else if (task_queued(p)) {
++ dequeue_task(rq, p, DEQUEUE_SAVE);
++ enqueue_task(rq, p, ENQUEUE_RESTORE);
++ if (p->prio < oldprio || p->rt_priority > oldrtprio)
++ try_preempt(p, rq);
++ }
++}
++
++/*
++ * Check the target process has a UID that matches the current process's
++ */
++static bool check_same_owner(struct task_struct *p)
++{
++ const struct cred *cred = current_cred(), *pcred;
++ bool match;
++
++ rcu_read_lock();
++ pcred = __task_cred(p);
++ match = (uid_eq(cred->euid, pcred->euid) ||
++ uid_eq(cred->euid, pcred->uid));
++ rcu_read_unlock();
++ return match;
++}
++
++static int
++__sched_setscheduler(struct task_struct *p, int policy,
++ const struct sched_param *param, bool user, bool pi)
++{
++ struct sched_param zero_param = { .sched_priority = 0 };
++ unsigned long flags, rlim_rtprio = 0;
++ int retval, oldpolicy = -1;
++ int reset_on_fork;
++ struct rq *rq;
++
++ /* The pi code expects interrupts enabled */
++ BUG_ON(pi && in_interrupt());
++
++ if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) {
++ unsigned long lflags;
++
++ if (!lock_task_sighand(p, &lflags))
++ return -ESRCH;
++ rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
++ unlock_task_sighand(p, &lflags);
++ if (rlim_rtprio)
++ goto recheck;
++ /*
++ * If the caller requested an RT policy without having the
++ * necessary rights, we downgrade the policy to SCHED_ISO.
++ * We also set the parameter to zero to pass the checks.
++ */
++ policy = SCHED_ISO;
++ param = &zero_param;
++ }
++recheck:
++ /* Double check policy once rq lock held */
++ if (policy < 0) {
++ reset_on_fork = p->sched_reset_on_fork;
++ policy = oldpolicy = p->policy;
++ } else {
++ reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
++ policy &= ~SCHED_RESET_ON_FORK;
++
++ if (!SCHED_RANGE(policy))
++ return -EINVAL;
++ }
++
++ /*
++ * Valid priorities for SCHED_FIFO and SCHED_RR are
++ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
++ * SCHED_BATCH is 0.
++ */
++ if (param->sched_priority < 0 ||
++ (p->mm && param->sched_priority > MAX_USER_RT_PRIO - 1) ||
++ (!p->mm && param->sched_priority > MAX_RT_PRIO - 1))
++ return -EINVAL;
++ if (is_rt_policy(policy) != (param->sched_priority != 0))
++ return -EINVAL;
++
++ /*
++ * Allow unprivileged RT tasks to decrease priority:
++ */
++ if (user && !capable(CAP_SYS_NICE)) {
++ if (is_rt_policy(policy)) {
++ unsigned long rlim_rtprio =
++ task_rlimit(p, RLIMIT_RTPRIO);
++
++ /* Can't set/change the rt policy */
++ if (policy != p->policy && !rlim_rtprio)
++ return -EPERM;
++
++ /* Can't increase priority */
++ if (param->sched_priority > p->rt_priority &&
++ param->sched_priority > rlim_rtprio)
++ return -EPERM;
++ } else {
++ switch (p->policy) {
++ /*
++ * Can only downgrade policies but not back to
++ * SCHED_NORMAL
++ */
++ case SCHED_ISO:
++ if (policy == SCHED_ISO)
++ goto out;
++ if (policy != SCHED_NORMAL)
++ return -EPERM;
++ break;
++ case SCHED_BATCH:
++ if (policy == SCHED_BATCH)
++ goto out;
++ if (policy != SCHED_IDLEPRIO)
++ return -EPERM;
++ break;
++ case SCHED_IDLEPRIO:
++ if (policy == SCHED_IDLEPRIO)
++ goto out;
++ return -EPERM;
++ default:
++ break;
++ }
++ }
++
++ /* Can't change other user's priorities */
++ if (!check_same_owner(p))
++ return -EPERM;
++
++ /* Normal users shall not reset the sched_reset_on_fork flag: */
++ if (p->sched_reset_on_fork && !reset_on_fork)
++ return -EPERM;
++ }
++
++ if (user) {
++ retval = security_task_setscheduler(p);
++ if (retval)
++ return retval;
++ }
++
++ /*
++ * Make sure no PI-waiters arrive (or leave) while we are
++ * changing the priority of the task:
++ *
++ * To be able to change p->policy safely, the runqueue lock must be
++ * held.
++ */
++ rq = task_rq_lock(p, &flags);
++ update_rq_clock(rq);
++
++ /*
++ * Changing the policy of the stop threads its a very bad idea:
++ */
++ if (p == rq->stop) {
++ task_rq_unlock(rq, p, &flags);
++ return -EINVAL;
++ }
++
++ /*
++ * If not changing anything there's no need to proceed further:
++ */
++ if (unlikely(policy == p->policy && (!is_rt_policy(policy) ||
++ param->sched_priority == p->rt_priority))) {
++ task_rq_unlock(rq, p, &flags);
++ return 0;
++ }
++
++ /* Re-check policy now with rq lock held */
++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++ policy = oldpolicy = -1;
++ task_rq_unlock(rq, p, &flags);
++ goto recheck;
++ }
++ p->sched_reset_on_fork = reset_on_fork;
++
++ __setscheduler(p, rq, policy, param->sched_priority, pi);
++ task_rq_unlock(rq, p, &flags);
++
++ if (pi)
++ rt_mutex_adjust_pi(p);
++out:
++ return 0;
++}
++
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++ const struct sched_param *param)
++{
++ return __sched_setscheduler(p, policy, param, true, true);
++}
++
++EXPORT_SYMBOL_GPL(sched_setscheduler);
++
++int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
++{
++ const struct sched_param param = { .sched_priority = attr->sched_priority };
++ int policy = attr->sched_policy;
++
++ return __sched_setscheduler(p, policy, &param, true, true);
++}
++EXPORT_SYMBOL_GPL(sched_setattr);
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission. For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++ const struct sched_param *param)
++{
++ return __sched_setscheduler(p, policy, param, false, true);
++}
++EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++ struct sched_param lparam;
++ struct task_struct *p;
++ int retval;
++
++ if (!param || pid < 0)
++ return -EINVAL;
++ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++ return -EFAULT;
++
++ rcu_read_lock();
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (p != NULL)
++ retval = sched_setscheduler(p, policy, &lparam);
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/*
++ * Mimics kernel/events/core.c perf_copy_attr().
++ */
++static int sched_copy_attr(struct sched_attr __user *uattr,
++ struct sched_attr *attr)
++{
++ u32 size;
++ int ret;
++
++ if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
++ return -EFAULT;
++
++ /* Zero the full structure, so that a short copy will be nice: */
++ memset(attr, 0, sizeof(*attr));
++
++ ret = get_user(size, &uattr->size);
++ if (ret)
++ return ret;
++
++ /* Bail out on silly large: */
++ if (size > PAGE_SIZE)
++ goto err_size;
++
++ /* ABI compatibility quirk: */
++ if (!size)
++ size = SCHED_ATTR_SIZE_VER0;
++
++ if (size < SCHED_ATTR_SIZE_VER0)
++ goto err_size;
++
++ /*
++ * If we're handed a bigger struct than we know of,
++ * ensure all the unknown bits are 0 - i.e. new
++ * user-space does not rely on any kernel feature
++ * extensions we dont know about yet.
++ */
++ if (size > sizeof(*attr)) {
++ unsigned char __user *addr;
++ unsigned char __user *end;
++ unsigned char val;
++
++ addr = (void __user *)uattr + sizeof(*attr);
++ end = (void __user *)uattr + size;
++
++ for (; addr < end; addr++) {
++ ret = get_user(val, addr);
++ if (ret)
++ return ret;
++ if (val)
++ goto err_size;
++ }
++ size = sizeof(*attr);
++ }
++
++ ret = copy_from_user(attr, uattr, size);
++ if (ret)
++ return -EFAULT;
++
++ /*
++ * XXX: Do we want to be lenient like existing syscalls; or do we want
++ * to be strict and return an error on out-of-bounds values?
++ */
++ attr->sched_nice = clamp(attr->sched_nice, -20, 19);
++
++ /* sched/core.c uses zero here but we already know ret is zero */
++ return 0;
++
++err_size:
++ put_user(sizeof(*attr), &uattr->size);
++ return -E2BIG;
++}
++
++/*
++ * sched_setparam() passes in -1 for its policy, to let the functions
++ * it calls know not to change it.
++ */
++#define SETPARAM_POLICY -1
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
++{
++ if (policy < 0)
++ return -EINVAL;
++
++ return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++ return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
++}
++
++/**
++ * sys_sched_setattr - same as above, but with extended sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ */
++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
++ unsigned int, flags)
++{
++ struct sched_attr attr;
++ struct task_struct *p;
++ int retval;
++
++ if (!uattr || pid < 0 || flags)
++ return -EINVAL;
++
++ retval = sched_copy_attr(uattr, &attr);
++ if (retval)
++ return retval;
++
++ if ((int)attr.sched_policy < 0)
++ return -EINVAL;
++
++ rcu_read_lock();
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (p != NULL)
++ retval = sched_setattr(p, &attr);
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ *
++ * Return: On success, the policy of the thread. Otherwise, a negative error
++ * code.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++ struct task_struct *p;
++ int retval = -EINVAL;
++
++ if (pid < 0)
++ goto out_nounlock;
++
++ retval = -ESRCH;
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ if (p) {
++ retval = security_task_getscheduler(p);
++ if (!retval)
++ retval = p->policy;
++ }
++ rcu_read_unlock();
++
++out_nounlock:
++ return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ *
++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
++ * code.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++ struct sched_param lp = { .sched_priority = 0 };
++ struct task_struct *p;
++ int retval = -EINVAL;
++
++ if (!param || pid < 0)
++ goto out_nounlock;
++
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ retval = -ESRCH;
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ if (has_rt_policy(p))
++ lp.sched_priority = p->rt_priority;
++ rcu_read_unlock();
++
++ /*
++ * This one might sleep, we cannot do it with a spinlock held ...
++ */
++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++out_nounlock:
++ return retval;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++static int sched_read_attr(struct sched_attr __user *uattr,
++ struct sched_attr *attr,
++ unsigned int usize)
++{
++ int ret;
++
++ if (!access_ok(VERIFY_WRITE, uattr, usize))
++ return -EFAULT;
++
++ /*
++ * If we're handed a smaller struct than we know of,
++ * ensure all the unknown bits are 0 - i.e. old
++ * user-space does not get uncomplete information.
++ */
++ if (usize < sizeof(*attr)) {
++ unsigned char *addr;
++ unsigned char *end;
++
++ addr = (void *)attr + usize;
++ end = (void *)attr + sizeof(*attr);
++
++ for (; addr < end; addr++) {
++ if (*addr)
++ return -EFBIG;
++ }
++
++ attr->size = usize;
++ }
++
++ ret = copy_to_user(uattr, attr, attr->size);
++ if (ret)
++ return -EFAULT;
++
++ /* sched/core.c uses zero here but we already know ret is zero */
++ return ret;
++}
++
++/**
++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @size: sizeof(attr) for fwd/bwd comp.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
++ unsigned int, size, unsigned int, flags)
++{
++ struct sched_attr attr = {
++ .size = sizeof(struct sched_attr),
++ };
++ struct task_struct *p;
++ int retval;
++
++ if (!uattr || pid < 0 || size > PAGE_SIZE ||
++ size < SCHED_ATTR_SIZE_VER0 || flags)
++ return -EINVAL;
++
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ retval = -ESRCH;
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ attr.sched_policy = p->policy;
++ if (rt_task(p))
++ attr.sched_priority = p->rt_priority;
++ else
++ attr.sched_nice = task_nice(p);
++
++ rcu_read_unlock();
++
++ retval = sched_read_attr(uattr, &attr, size);
++ return retval;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
++{
++ cpumask_var_t cpus_allowed, new_mask;
++ struct task_struct *p;
++ int retval;
++
++ rcu_read_lock();
++
++ p = find_process_by_pid(pid);
++ if (!p) {
++ rcu_read_unlock();
++ return -ESRCH;
++ }
++
++ /* Prevent p going away */
++ get_task_struct(p);
++ rcu_read_unlock();
++
++ if (p->flags & PF_NO_SETAFFINITY) {
++ retval = -EINVAL;
++ goto out_put_task;
++ }
++ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
++ retval = -ENOMEM;
++ goto out_put_task;
++ }
++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
++ retval = -ENOMEM;
++ goto out_free_cpus_allowed;
++ }
++ retval = -EPERM;
++ if (!check_same_owner(p)) {
++ rcu_read_lock();
++ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
++ rcu_read_unlock();
++ goto out_unlock;
++ }
++ rcu_read_unlock();
++ }
++
++ retval = security_task_setscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ cpuset_cpus_allowed(p, cpus_allowed);
++ cpumask_and(new_mask, in_mask, cpus_allowed);
++again:
++ retval = __set_cpus_allowed_ptr(p, new_mask, true);
++
++ if (!retval) {
++ cpuset_cpus_allowed(p, cpus_allowed);
++ if (!cpumask_subset(new_mask, cpus_allowed)) {
++ /*
++ * We must have raced with a concurrent cpuset
++ * update. Just reset the cpus_allowed to the
++ * cpuset's cpus_allowed
++ */
++ cpumask_copy(new_mask, cpus_allowed);
++ goto again;
++ }
++ }
++out_unlock:
++ free_cpumask_var(new_mask);
++out_free_cpus_allowed:
++ free_cpumask_var(cpus_allowed);
++out_put_task:
++ put_task_struct(p);
++ return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++ cpumask_t *new_mask)
++{
++ if (len < cpumask_size())
++ cpumask_clear(new_mask);
++ else if (len > cpumask_size())
++ len = cpumask_size();
++
++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++
++/**
++ * sys_sched_setaffinity - set the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++ unsigned long __user *, user_mask_ptr)
++{
++ cpumask_var_t new_mask;
++ int retval;
++
++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
++ return -ENOMEM;
++
++ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
++ if (retval == 0)
++ retval = sched_setaffinity(pid, new_mask);
++ free_cpumask_var(new_mask);
++ return retval;
++}
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++ struct task_struct *p;
++ unsigned long flags;
++ int retval;
++
++ get_online_cpus();
++ rcu_read_lock();
++
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++out_unlock:
++ rcu_read_unlock();
++ put_online_cpus();
++
++ return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++ unsigned long __user *, user_mask_ptr)
++{
++ int ret;
++ cpumask_var_t mask;
++
++ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++ return -EINVAL;
++ if (len & (sizeof(unsigned long)-1))
++ return -EINVAL;
++
++ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++ return -ENOMEM;
++
++ ret = sched_getaffinity(pid, mask);
++ if (ret == 0) {
++ size_t retlen = min_t(size_t, len, cpumask_size());
++
++ if (copy_to_user(user_mask_ptr, mask, retlen))
++ ret = -EFAULT;
++ else
++ ret = retlen;
++ }
++ free_cpumask_var(mask);
++
++ return ret;
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. It does this by
++ * scheduling away the current task. If it still has the earliest deadline
++ * it will be scheduled again as the next task.
++ *
++ * Return: 0.
++ */
++SYSCALL_DEFINE0(sched_yield)
++{
++ struct rq *rq;
++
++ if (!sched_yield_type)
++ goto out;
++
++ local_irq_disable();
++ rq = this_rq();
++ rq_lock(rq);
++
++ if (sched_yield_type > 1)
++ time_slice_expired(current, rq);
++ schedstat_inc(rq->yld_count);
++
++ /*
++ * Since we are going to call schedule() anyway, there's
++ * no need to preempt or enable interrupts:
++ */
++ preempt_disable();
++ rq_unlock(rq);
++ sched_preempt_enable_no_resched();
++
++ schedule();
++out:
++ return 0;
++}
++
++#ifndef CONFIG_PREEMPT
++int __sched _cond_resched(void)
++{
++ if (should_resched(0)) {
++ preempt_schedule_common();
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(_cond_resched);
++#endif
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++ int resched = should_resched(PREEMPT_LOCK_OFFSET);
++ int ret = 0;
++
++ lockdep_assert_held(lock);
++
++ if (spin_needbreak(lock) || resched) {
++ spin_unlock(lock);
++ if (resched)
++ preempt_schedule_common();
++ else
++ cpu_relax();
++ ret = 1;
++ spin_lock(lock);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++int __sched __cond_resched_softirq(void)
++{
++ BUG_ON(!in_softirq());
++
++ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
++ local_bh_enable();
++ preempt_schedule_common();
++ local_bh_disable();
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(__cond_resched_softirq);
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * Do not ever use this function, there's a 99% chance you're doing it wrong.
++ *
++ * The scheduler is at all times free to pick the calling task as the most
++ * eligible task to run, if removing the yield() call from your code breaks
++ * it, its already broken.
++ *
++ * Typical broken usage is:
++ *
++ * while (!event)
++ * yield();
++ *
++ * where one assumes that yield() will let 'the other' process run that will
++ * make event true. If the current task is a SCHED_FIFO task that will never
++ * happen. Never use yield() as a progress guarantee!!
++ *
++ * If you want to use yield() to wait for something, use wait_event().
++ * If you want to use yield() to be 'nice' for others, use cond_resched().
++ * If you still want to use yield(), do not!
++ */
++void __sched yield(void)
++{
++ set_current_state(TASK_RUNNING);
++ sys_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/**
++ * yield_to - yield the current processor to another thread in
++ * your thread group, or accelerate that thread toward the
++ * processor it's on.
++ * @p: target task
++ * @preempt: whether task preemption is allowed or not
++ *
++ * It's the caller's job to ensure that the target task struct
++ * can't go away on us before we can do any checks.
++ *
++ * Return:
++ * true (>0) if we indeed boosted the target task.
++ * false (0) if we failed to boost the target.
++ * -ESRCH if there's no task to yield to.
++ */
++int __sched yield_to(struct task_struct *p, bool preempt)
++{
++ struct task_struct *rq_p;
++ struct rq *rq, *p_rq;
++ unsigned long flags;
++ int yielded = 0;
++
++ local_irq_save(flags);
++ rq = this_rq();
++
++again:
++ p_rq = task_rq(p);
++ /*
++ * If we're the only runnable task on the rq and target rq also
++ * has only one task, there's absolutely no point in yielding.
++ */
++ if (task_running(p_rq, p) || p->state) {
++ yielded = -ESRCH;
++ goto out_irq;
++ }
++
++ double_rq_lock(rq, p_rq);
++ if (unlikely(task_rq(p) != p_rq)) {
++ double_rq_unlock(rq, p_rq);
++ goto again;
++ }
++
++ yielded = 1;
++ schedstat_inc(rq->yld_count);
++ rq_p = rq->curr;
++ if (p->deadline > rq_p->deadline)
++ p->deadline = rq_p->deadline;
++ p->time_slice += rq_p->time_slice;
++ if (p->time_slice > timeslice())
++ p->time_slice = timeslice();
++ time_slice_expired(rq_p, rq);
++ if (preempt && rq != p_rq)
++ resched_task(p_rq->curr);
++ double_rq_unlock(rq, p_rq);
++out_irq:
++ local_irq_restore(flags);
++
++ if (yielded > 0)
++ schedule();
++ return yielded;
++}
++EXPORT_SYMBOL_GPL(yield_to);
++
++int io_schedule_prepare(void)
++{
++ int old_iowait = current->in_iowait;
++
++ current->in_iowait = 1;
++ blk_schedule_flush_plug(current);
++
++ return old_iowait;
++}
++
++void io_schedule_finish(int token)
++{
++ current->in_iowait = token;
++}
++
++/*
++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++
++long __sched io_schedule_timeout(long timeout)
++{
++ int token;
++ long ret;
++
++ token = io_schedule_prepare();
++ ret = schedule_timeout(timeout);
++ io_schedule_finish(token);
++
++ return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++void io_schedule(void)
++{
++ int token;
++
++ token = io_schedule_prepare();
++ schedule();
++ io_schedule_finish(token);
++}
++EXPORT_SYMBOL(io_schedule);
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the maximum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = MAX_USER_RT_PRIO-1;
++ break;
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ case SCHED_ISO:
++ case SCHED_IDLEPRIO:
++ ret = 0;
++ break;
++ }
++ return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the minimum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = 1;
++ break;
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ case SCHED_ISO:
++ case SCHED_IDLEPRIO:
++ ret = 0;
++ break;
++ }
++ return ret;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ *
++ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
++ * an error code.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++ struct timespec __user *, interval)
++{
++ struct task_struct *p;
++ unsigned int time_slice;
++ unsigned long flags;
++ struct timespec t;
++ struct rq *rq;
++ int retval;
++
++ if (pid < 0)
++ return -EINVAL;
++
++ retval = -ESRCH;
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ rq = task_rq_lock(p, &flags);
++ time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p));
++ task_rq_unlock(rq, p, &flags);
++
++ rcu_read_unlock();
++ t = ns_to_timespec(time_slice);
++ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
++ return retval;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++void sched_show_task(struct task_struct *p)
++{
++ unsigned long free = 0;
++ int ppid;
++
++ if (!try_get_task_stack(p))
++ return;
++
++ printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p));
++
++ if (p->state == TASK_RUNNING)
++ printk(KERN_CONT " running task ");
++#ifdef CONFIG_DEBUG_STACK_USAGE
++ free = stack_not_used(p);
++#endif
++ ppid = 0;
++ rcu_read_lock();
++ if (pid_alive(p))
++ ppid = task_pid_nr(rcu_dereference(p->real_parent));
++ rcu_read_unlock();
++ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
++ task_pid_nr(p), ppid,
++ (unsigned long)task_thread_info(p)->flags);
++
++ print_worker_info(KERN_INFO, p);
++ show_stack(p, NULL);
++ put_task_stack(p);
++}
++
++static inline bool
++state_filter_match(unsigned long state_filter, struct task_struct *p)
++{
++ /* no filter, everything matches */
++ if (!state_filter)
++ return true;
++
++ /* filter, but doesn't match */
++ if (!(p->state & state_filter))
++ return false;
++
++ /*
++ * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
++ * TASK_KILLABLE).
++ */
++ if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
++ return false;
++
++ return true;
++}
++
++void show_state_filter(unsigned long state_filter)
++{
++ struct task_struct *g, *p;
++
++#if BITS_PER_LONG == 32
++ printk(KERN_INFO
++ " task PC stack pid father\n");
++#else
++ printk(KERN_INFO
++ " task PC stack pid father\n");
++#endif
++ rcu_read_lock();
++ for_each_process_thread(g, p) {
++ /*
++ * reset the NMI-timeout, listing all files on a slow
++ * console might take a lot of time:
++ * Also, reset softlockup watchdogs on all CPUs, because
++ * another CPU might be blocked waiting for us to process
++ * an IPI.
++ */
++ touch_nmi_watchdog();
++ touch_all_softlockup_watchdogs();
++ if (state_filter_match(state_filter, p))
++ sched_show_task(p);
++ }
++
++ rcu_read_unlock();
++ /*
++ * Only show locks if all tasks are dumped:
++ */
++ if (!state_filter)
++ debug_show_all_locks();
++}
++
++void dump_cpu_task(int cpu)
++{
++ pr_info("Task dump for CPU %d:\n", cpu);
++ sched_show_task(cpu_curr(cpu));
++}
++
++#ifdef CONFIG_SMP
++void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
++{
++ cpumask_copy(&p->cpus_allowed, new_mask);
++ p->nr_cpus_allowed = cpumask_weight(new_mask);
++}
++
++void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++ struct rq *rq = task_rq(p);
++
++ lockdep_assert_held(&p->pi_lock);
++
++ cpumask_copy(&p->cpus_allowed, new_mask);
++
++ if (task_queued(p)) {
++ /*
++ * Because __kthread_bind() calls this on blocked tasks without
++ * holding rq->lock.
++ */
++ lockdep_assert_held(&rq->lock);
++ }
++}
++
++/*
++ * Calling do_set_cpus_allowed from outside the scheduler code should not be
++ * called on a running or queued task. We should be holding pi_lock.
++ */
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++ __do_set_cpus_allowed(p, new_mask);
++ if (needs_other_cpu(p, task_cpu(p))) {
++ struct rq *rq;
++
++ rq = __task_rq_lock(p);
++ set_task_cpu(p, valid_task_cpu(p));
++ resched_task(p);
++ __task_rq_unlock(rq);
++ }
++}
++#endif
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: cpu the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void init_idle(struct task_struct *idle, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&idle->pi_lock, flags);
++ raw_spin_lock(&rq->lock);
++ idle->last_ran = rq->niffies;
++ time_slice_expired(idle, rq);
++ idle->state = TASK_RUNNING;
++ /* Setting prio to illegal value shouldn't matter when never queued */
++ idle->prio = PRIO_LIMIT;
++
++ kasan_unpoison_task_stack(idle);
++
++#ifdef CONFIG_SMP
++ /*
++ * It's possible that init_idle() gets called multiple times on a task,
++ * in that case do_set_cpus_allowed() will not do the right thing.
++ *
++ * And since this is boot we can forgo the serialisation.
++ */
++ set_cpus_allowed_common(idle, cpumask_of(cpu));
++#ifdef CONFIG_SMT_NICE
++ idle->smt_bias = 0;
++#endif
++#endif
++ set_rq_task(rq, idle);
++
++ /* Silence PROVE_RCU */
++ rcu_read_lock();
++ set_task_cpu(idle, cpu);
++ rcu_read_unlock();
++
++ rq->curr = rq->idle = idle;
++ idle->on_rq = TASK_ON_RQ_QUEUED;
++ raw_spin_unlock(&rq->lock);
++ raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
++
++ /* Set the preempt count _outside_ the spinlocks! */
++ init_idle_preempt_count(idle, cpu);
++
++ ftrace_graph_init_idle_task(idle, cpu);
++ vtime_init_idle(idle, cpu);
++#ifdef CONFIG_SMP
++ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
++ const struct cpumask __maybe_unused *trial)
++{
++ return 1;
++}
++
++int task_can_attach(struct task_struct *p,
++ const struct cpumask *cs_cpus_allowed)
++{
++ int ret = 0;
++
++ /*
++ * Kthreads which disallow setaffinity shouldn't be moved
++ * to a new cpuset; we don't want to change their CPU
++ * affinity and isolating such threads by their set of
++ * allowed nodes is unnecessary. Thus, cpusets are not
++ * applicable for such threads. This prevents checking for
++ * success of set_cpus_allowed_ptr() on all attached tasks
++ * before cpus_allowed may be changed.
++ */
++ if (p->flags & PF_NO_SETAFFINITY)
++ ret = -EINVAL;
++
++ return ret;
++}
++
++void resched_cpu(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ rq_lock_irqsave(rq, &flags);
++ resched_task(cpu_curr(cpu));
++ rq_unlock_irqrestore(rq, &flags);
++}
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_NO_HZ_COMMON
++void nohz_balance_enter_idle(int cpu)
++{
++}
++
++void select_nohz_load_balancer(int stop_tick)
++{
++}
++
++void set_cpu_sd_state_idle(void) {}
++
++/*
++ * In the semi idle case, use the nearest busy CPU for migrating timers
++ * from an idle CPU. This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle CPU will add more delays to the timers than intended
++ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
++ */
++int get_nohz_timer_target(void)
++{
++ int i, cpu = smp_processor_id();
++ struct sched_domain *sd;
++
++ if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
++ return cpu;
++
++ rcu_read_lock();
++ for_each_domain(cpu, sd) {
++ for_each_cpu(i, sched_domain_span(sd)) {
++ if (cpu == i)
++ continue;
++
++ if (!idle_cpu(i) && is_housekeeping_cpu(i)) {
++ cpu = i;
++ cpu = i;
++ goto unlock;
++ }
++ }
++ }
++
++ if (!is_housekeeping_cpu(cpu))
++ cpu = housekeeping_any_cpu();
++unlock:
++ rcu_read_unlock();
++ return cpu;
++}
++
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++void wake_up_idle_cpu(int cpu)
++{
++ if (cpu == smp_processor_id())
++ return;
++
++ if (set_nr_and_not_polling(cpu_rq(cpu)->idle))
++ smp_sched_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static bool wake_up_full_nohz_cpu(int cpu)
++{
++ /*
++ * We just need the target to call irq_exit() and re-evaluate
++ * the next tick. The nohz full kick at least implies that.
++ * If needed we can still optimize that later with an
++ * empty IRQ.
++ */
++ if (cpu_is_offline(cpu))
++ return true; /* Don't try to wake offline CPUs. */
++ if (tick_nohz_full_cpu(cpu)) {
++ if (cpu != smp_processor_id() ||
++ tick_nohz_tick_stopped())
++ tick_nohz_full_kick_cpu(cpu);
++ return true;
++ }
++
++ return false;
++}
++
++/*
++ * Wake up the specified CPU. If the CPU is going offline, it is the
++ * caller's responsibility to deal with the lost wakeup, for example,
++ * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
++ */
++void wake_up_nohz_cpu(int cpu)
++{
++ if (!wake_up_full_nohz_cpu(cpu))
++ wake_up_idle_cpu(cpu);
++}
++#endif /* CONFIG_NO_HZ_COMMON */
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * proper CPU and schedule it away if the CPU it's executing on
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++ const struct cpumask *new_mask, bool check)
++{
++ const struct cpumask *cpu_valid_mask = cpu_active_mask;
++ bool queued = false, running_wrong = false, kthread;
++ struct cpumask old_mask;
++ unsigned long flags;
++ struct rq *rq;
++ int ret = 0;
++
++ rq = task_rq_lock(p, &flags);
++ update_rq_clock(rq);
++
++ kthread = !!(p->flags & PF_KTHREAD);
++ if (kthread) {
++ /*
++ * Kernel threads are allowed on online && !active CPUs
++ */
++ cpu_valid_mask = cpu_online_mask;
++ }
++
++ /*
++ * Must re-check here, to close a race against __kthread_bind(),
++ * sched_setaffinity() is not guaranteed to observe the flag.
++ */
++ if (check && (p->flags & PF_NO_SETAFFINITY)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ cpumask_copy(&old_mask, &p->cpus_allowed);
++ if (cpumask_equal(&old_mask, new_mask))
++ goto out;
++
++ if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ queued = task_queued(p);
++ __do_set_cpus_allowed(p, new_mask);
++
++ if (kthread) {
++ /*
++ * For kernel threads that do indeed end up on online &&
++ * !active we want to ensure they are strict per-CPU threads.
++ */
++ WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
++ !cpumask_intersects(new_mask, cpu_active_mask) &&
++ p->nr_cpus_allowed != 1);
++ }
++
++ /* Can the task run on the task's current CPU? If so, we're done */
++ if (cpumask_test_cpu(task_cpu(p), new_mask))
++ goto out;
++
++ if (task_running(rq, p)) {
++ /* Task is running on the wrong cpu now, reschedule it. */
++ if (rq == this_rq()) {
++ set_tsk_need_resched(p);
++ running_wrong = true;
++ } else
++ resched_task(p);
++ } else {
++ int cpu = cpumask_any_and(cpu_valid_mask, new_mask);
++
++ if (queued) {
++ /*
++ * Switch runqueue locks after dequeueing the task
++ * here while still holding the pi_lock to be holding
++ * the correct lock for enqueueing.
++ */
++ dequeue_task(rq, p, 0);
++ rq_unlock(rq);
++
++ rq = cpu_rq(cpu);
++ rq_lock(rq);
++ }
++ set_task_cpu(p, cpu);
++ if (queued)
++ enqueue_task(rq, p, 0);
++ }
++ if (queued)
++ try_preempt(p, rq);
++ if (running_wrong)
++ preempt_disable();
++out:
++ task_rq_unlock(rq, p, &flags);
++
++ if (running_wrong) {
++ __schedule(true);
++ preempt_enable();
++ }
++
++ return ret;
++}
++
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++ return __set_cpus_allowed_ptr(p, new_mask, false);
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Run through task list and find tasks affined to the dead cpu, then remove
++ * that cpu from the list, enable cpu0 and set the zerobound flag. Must hold
++ * cpu 0 and src_cpu's runqueue locks.
++ */
++static void bind_zero(int src_cpu)
++{
++ struct task_struct *p, *t;
++ struct rq *rq0;
++ int bound = 0;
++
++ if (src_cpu == 0)
++ return;
++
++ rq0 = cpu_rq(0);
++
++ do_each_thread(t, p) {
++ if (cpumask_test_cpu(src_cpu, &p->cpus_allowed)) {
++ bool local = (task_cpu(p) == src_cpu);
++ struct rq *rq = task_rq(p);
++
++ /* task_running is the cpu stopper thread */
++ if (local && task_running(rq, p))
++ continue;
++ atomic_clear_cpu(src_cpu, &p->cpus_allowed);
++ atomic_set_cpu(0, &p->cpus_allowed);
++ p->zerobound = true;
++ bound++;
++ if (local) {
++ bool queued = task_queued(p);
++
++ if (queued)
++ dequeue_task(rq, p, 0);
++ set_task_cpu(p, 0);
++ if (queued)
++ enqueue_task(rq0, p, 0);
++ }
++ }
++ } while_each_thread(t, p);
++
++ if (bound) {
++ printk(KERN_INFO "Removed affinity for %d processes to cpu %d\n",
++ bound, src_cpu);
++ }
++}
++
++/* Find processes with the zerobound flag and reenable their affinity for the
++ * CPU coming alive. */
++static void unbind_zero(int src_cpu)
++{
++ int unbound = 0, zerobound = 0;
++ struct task_struct *p, *t;
++
++ if (src_cpu == 0)
++ return;
++
++ do_each_thread(t, p) {
++ if (!p->mm)
++ p->zerobound = false;
++ if (p->zerobound) {
++ unbound++;
++ cpumask_set_cpu(src_cpu, &p->cpus_allowed);
++ /* Once every CPU affinity has been re-enabled, remove
++ * the zerobound flag */
++ if (cpumask_subset(cpu_possible_mask, &p->cpus_allowed)) {
++ p->zerobound = false;
++ zerobound++;
++ }
++ }
++ } while_each_thread(t, p);
++
++ if (unbound) {
++ printk(KERN_INFO "Added affinity for %d processes to cpu %d\n",
++ unbound, src_cpu);
++ }
++ if (zerobound) {
++ printk(KERN_INFO "Released forced binding to cpu0 for %d processes\n",
++ zerobound);
++ }
++}
++
++/*
++ * Ensure that the idle task is using init_mm right before its cpu goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++ struct mm_struct *mm = current->active_mm;
++
++ BUG_ON(cpu_online(smp_processor_id()));
++
++ if (mm != &init_mm) {
++ switch_mm(mm, &init_mm, current);
++ finish_arch_post_lock_switch();
++ }
++ mmdrop(mm);
++}
++#else /* CONFIG_HOTPLUG_CPU */
++static void unbind_zero(int src_cpu) {}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++ struct sched_param stop_param = { .sched_priority = STOP_PRIO };
++ struct sched_param start_param = { .sched_priority = 0 };
++ struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++ if (stop) {
++ /*
++ * Make it appear like a SCHED_FIFO task, its something
++ * userspace knows about and won't get confused about.
++ *
++ * Also, it will make PI more or less work without too
++ * much confusion -- but then, stop work should not
++ * rely on PI working anyway.
++ */
++ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
++ }
++
++ cpu_rq(cpu)->stop = stop;
++
++ if (old_stop) {
++ /*
++ * Reset it back to a normal scheduling policy so that
++ * it can die in pieces.
++ */
++ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
++ }
++}
++
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++
++static struct ctl_table sd_ctl_dir[] = {
++ {
++ .procname = "sched_domain",
++ .mode = 0555,
++ },
++ {}
++};
++
++static struct ctl_table sd_ctl_root[] = {
++ {
++ .procname = "kernel",
++ .mode = 0555,
++ .child = sd_ctl_dir,
++ },
++ {}
++};
++
++static struct ctl_table *sd_alloc_ctl_entry(int n)
++{
++ struct ctl_table *entry =
++ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
++
++ return entry;
++}
++
++static void sd_free_ctl_entry(struct ctl_table **tablep)
++{
++ struct ctl_table *entry;
++
++ /*
++ * In the intermediate directories, both the child directory and
++ * procname are dynamically allocated and could fail but the mode
++ * will always be set. In the lowest directory the names are
++ * static strings and all have proc handlers.
++ */
++ for (entry = *tablep; entry->mode; entry++) {
++ if (entry->child)
++ sd_free_ctl_entry(&entry->child);
++ if (entry->proc_handler == NULL)
++ kfree(entry->procname);
++ }
++
++ kfree(*tablep);
++ *tablep = NULL;
++}
++
++#define CPU_LOAD_IDX_MAX 5
++static int min_load_idx = 0;
++static int max_load_idx = CPU_LOAD_IDX_MAX-1;
++
++static void
++set_table_entry(struct ctl_table *entry,
++ const char *procname, void *data, int maxlen,
++ umode_t mode, proc_handler *proc_handler,
++ bool load_idx)
++{
++ entry->procname = procname;
++ entry->data = data;
++ entry->maxlen = maxlen;
++ entry->mode = mode;
++ entry->proc_handler = proc_handler;
++
++ if (load_idx) {
++ entry->extra1 = &min_load_idx;
++ entry->extra2 = &max_load_idx;
++ }
++}
++
++static struct ctl_table *
++sd_alloc_ctl_domain_table(struct sched_domain *sd)
++{
++ struct ctl_table *table = sd_alloc_ctl_entry(14);
++
++ if (table == NULL)
++ return NULL;
++
++ set_table_entry(&table[0], "min_interval", &sd->min_interval,
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
++ set_table_entry(&table[1], "max_interval", &sd->max_interval,
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
++ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[9], "cache_nice_tries",
++ &sd->cache_nice_tries,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[10], "flags", &sd->flags,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[11], "max_newidle_lb_cost",
++ &sd->max_newidle_lb_cost,
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
++ set_table_entry(&table[12], "name", sd->name,
++ CORENAME_MAX_SIZE, 0444, proc_dostring, false);
++ /* &table[13] is terminator */
++
++ return table;
++}
++
++static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
++{
++ struct ctl_table *entry, *table;
++ struct sched_domain *sd;
++ int domain_num = 0, i;
++ char buf[32];
++
++ for_each_domain(cpu, sd)
++ domain_num++;
++ entry = table = sd_alloc_ctl_entry(domain_num + 1);
++ if (table == NULL)
++ return NULL;
++
++ i = 0;
++ for_each_domain(cpu, sd) {
++ snprintf(buf, 32, "domain%d", i);
++ entry->procname = kstrdup(buf, GFP_KERNEL);
++ entry->mode = 0555;
++ entry->child = sd_alloc_ctl_domain_table(sd);
++ entry++;
++ i++;
++ }
++ return table;
++}
++
++static cpumask_var_t sd_sysctl_cpus;
++static struct ctl_table_header *sd_sysctl_header;
++
++void register_sched_domain_sysctl(void)
++{
++ static struct ctl_table *cpu_entries;
++ static struct ctl_table **cpu_idx;
++ char buf[32];
++ int i;
++
++ if (!cpu_entries) {
++ cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
++ if (!cpu_entries)
++ return;
++
++ WARN_ON(sd_ctl_dir[0].child);
++ sd_ctl_dir[0].child = cpu_entries;
++ }
++
++ if (!cpu_idx) {
++ struct ctl_table *e = cpu_entries;
++
++ cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
++ if (!cpu_idx)
++ return;
++
++ /* deal with sparse possible map */
++ for_each_possible_cpu(i) {
++ cpu_idx[i] = e;
++ e++;
++ }
++ }
++
++ if (!cpumask_available(sd_sysctl_cpus)) {
++ if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
++ return;
++
++ /* init to possible to not have holes in @cpu_entries */
++ cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
++ }
++
++ for_each_cpu(i, sd_sysctl_cpus) {
++ struct ctl_table *e = cpu_idx[i];
++
++ if (e->child)
++ sd_free_ctl_entry(&e->child);
++
++ if (!e->procname) {
++ snprintf(buf, 32, "cpu%d", i);
++ e->procname = kstrdup(buf, GFP_KERNEL);
++ }
++ e->mode = 0555;
++ e->child = sd_alloc_ctl_cpu_table(i);
++
++ __cpumask_clear_cpu(i, sd_sysctl_cpus);
++ }
++
++ WARN_ON(sd_sysctl_header);
++ sd_sysctl_header = register_sysctl_table(sd_ctl_root);
++}
++
++void dirty_sched_domain_sysctl(int cpu)
++{
++ if (cpumask_available(sd_sysctl_cpus))
++ __cpumask_set_cpu(cpu, sd_sysctl_cpus);
++}
++
++/* may be called multiple times per register */
++void unregister_sched_domain_sysctl(void)
++{
++ unregister_sysctl_table(sd_sysctl_header);
++ sd_sysctl_header = NULL;
++}
++#endif /* CONFIG_SYSCTL */
++
++void set_rq_online(struct rq *rq)
++{
++ if (!rq->online) {
++ cpumask_set_cpu(cpu_of(rq), rq->rd->online);
++ rq->online = true;
++ }
++}
++
++void set_rq_offline(struct rq *rq)
++{
++ if (rq->online) {
++ int cpu = cpu_of(rq);
++
++ cpumask_clear_cpu(cpu, rq->rd->online);
++ rq->online = false;
++ clear_cpuidle_map(cpu);
++ }
++}
++
++/*
++ * used to mark begin/end of suspend/resume:
++ */
++static int num_cpus_frozen;
++
++/*
++ * Update cpusets according to cpu_active mask. If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static void cpuset_cpu_active(void)
++{
++ if (cpuhp_tasks_frozen) {
++ /*
++ * num_cpus_frozen tracks how many CPUs are involved in suspend
++ * resume sequence. As long as this is not the last online
++ * operation in the resume sequence, just build a single sched
++ * domain, ignoring cpusets.
++ */
++ partition_sched_domains(1, NULL, NULL);
++ if (--num_cpus_frozen)
++ return;
++ /*
++ * This is the last CPU online operation. So fall through and
++ * restore the original sched domains by considering the
++ * cpuset configurations.
++ */
++ cpuset_force_rebuild();
++ }
++
++ cpuset_update_active_cpus();
++}
++
++static int cpuset_cpu_inactive(unsigned int cpu)
++{
++ if (!cpuhp_tasks_frozen) {
++ cpuset_update_active_cpus();
++ } else {
++ num_cpus_frozen++;
++ partition_sched_domains(1, NULL, NULL);
++ }
++ return 0;
++}
++
++int sched_cpu_activate(unsigned int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ set_cpu_active(cpu, true);
++
++ if (sched_smp_initialized) {
++ sched_domains_numa_masks_set(cpu);
++ cpuset_cpu_active();
++ }
++
++ /*
++ * Put the rq online, if not already. This happens:
++ *
++ * 1) In the early boot process, because we build the real domains
++ * after all CPUs have been brought up.
++ *
++ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
++ * domains.
++ */
++ rq_lock_irqsave(rq, &flags);
++ if (rq->rd) {
++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
++ set_rq_online(rq);
++ }
++ unbind_zero(cpu);
++ rq_unlock_irqrestore(rq, &flags);
++
++ return 0;
++}
++
++int sched_cpu_deactivate(unsigned int cpu)
++{
++ int ret;
++
++ set_cpu_active(cpu, false);
++ /*
++ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
++ * users of this state to go away such that all new such users will
++ * observe it.
++ *
++ * Do sync before park smpboot threads to take care the rcu boost case.
++ */
++ synchronize_rcu_mult(call_rcu, call_rcu_sched);
++
++ if (!sched_smp_initialized)
++ return 0;
++
++ ret = cpuset_cpu_inactive(cpu);
++ if (ret) {
++ set_cpu_active(cpu, true);
++ return ret;
++ }
++ sched_domains_numa_masks_clear(cpu);
++ return 0;
++}
++
++int sched_cpu_starting(unsigned int __maybe_unused cpu)
++{
++ return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++int sched_cpu_dying(unsigned int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ local_irq_save(flags);
++ double_rq_lock(rq, cpu_rq(0));
++ if (rq->rd) {
++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
++ set_rq_offline(rq);
++ }
++ bind_zero(cpu);
++ double_rq_unlock(rq, cpu_rq(0));
++ sched_start_tick(rq, cpu);
++ hrexpiry_clear(rq);
++ local_irq_restore(flags);
++
++ return 0;
++}
++#endif
++
++#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
++/*
++ * Cheaper version of the below functions in case support for SMT and MC is
++ * compiled in but CPUs have no siblings.
++ */
++static bool sole_cpu_idle(struct rq *rq)
++{
++ return rq_idle(rq);
++}
++#endif
++#ifdef CONFIG_SCHED_SMT
++static const cpumask_t *thread_cpumask(int cpu)
++{
++ return topology_sibling_cpumask(cpu);
++}
++/* All this CPU's SMT siblings are idle */
++static bool siblings_cpu_idle(struct rq *rq)
++{
++ return cpumask_subset(&rq->thread_mask, &cpu_idle_map);
++}
++#endif
++#ifdef CONFIG_SCHED_MC
++static const cpumask_t *core_cpumask(int cpu)
++{
++ return topology_core_cpumask(cpu);
++}
++/* All this CPU's shared cache siblings are idle */
++static bool cache_cpu_idle(struct rq *rq)
++{
++ return cpumask_subset(&rq->core_mask, &cpu_idle_map);
++}
++#endif
++
++enum sched_domain_level {
++ SD_LV_NONE = 0,
++ SD_LV_SIBLING,
++ SD_LV_MC,
++ SD_LV_BOOK,
++ SD_LV_CPU,
++ SD_LV_NODE,
++ SD_LV_ALLNODES,
++ SD_LV_MAX
++};
++
++void __init sched_init_smp(void)
++{
++ struct sched_domain *sd;
++ int cpu, other_cpu;
++#ifdef CONFIG_SCHED_SMT
++ bool smt_threads = false;
++#endif
++ cpumask_var_t non_isolated_cpus;
++ struct rq *rq;
++
++ alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
++
++ sched_init_numa();
++
++ /*
++ * There's no userspace yet to cause hotplug operations; hence all the
++ * cpu masks are stable and all blatant races in the below code cannot
++ * happen.
++ */
++ mutex_lock(&sched_domains_mutex);
++ sched_init_domains(cpu_active_mask);
++ cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
++ if (cpumask_empty(non_isolated_cpus))
++ cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
++ mutex_unlock(&sched_domains_mutex);
++
++ /* Move init over to a non-isolated CPU */
++ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
++ BUG();
++ free_cpumask_var(non_isolated_cpus);
++
++ mutex_lock(&sched_domains_mutex);
++ local_irq_disable();
++ lock_all_rqs();
++ /*
++ * Set up the relative cache distance of each online cpu from each
++ * other in a simple array for quick lookup. Locality is determined
++ * by the closest sched_domain that CPUs are separated by. CPUs with
++ * shared cache in SMT and MC are treated as local. Separate CPUs
++ * (within the same package or physically) within the same node are
++ * treated as not local. CPUs not even in the same domain (different
++ * nodes) are treated as very distant.
++ */
++ for_each_online_cpu(cpu) {
++ rq = cpu_rq(cpu);
++
++ /* First check if this cpu is in the same node */
++ for_each_domain(cpu, sd) {
++ if (sd->level > SD_LV_MC)
++ continue;
++ /* Set locality to local node if not already found lower */
++ for_each_cpu(other_cpu, sched_domain_span(sd)) {
++ if (rq->cpu_locality[other_cpu] > 3)
++ rq->cpu_locality[other_cpu] = 3;
++ }
++ }
++
++ /*
++ * Each runqueue has its own function in case it doesn't have
++ * siblings of its own allowing mixed topologies.
++ */
++#ifdef CONFIG_SCHED_MC
++ for_each_cpu(other_cpu, core_cpumask(cpu)) {
++ if (rq->cpu_locality[other_cpu] > 2)
++ rq->cpu_locality[other_cpu] = 2;
++ }
++ if (cpumask_weight(core_cpumask(cpu)) > 1) {
++ cpumask_copy(&rq->core_mask, core_cpumask(cpu));
++ cpumask_clear_cpu(cpu, &rq->core_mask);
++ rq->cache_idle = cache_cpu_idle;
++ }
++#endif
++#ifdef CONFIG_SCHED_SMT
++ if (cpumask_weight(thread_cpumask(cpu)) > 1) {
++ cpumask_copy(&rq->thread_mask, thread_cpumask(cpu));
++ cpumask_clear_cpu(cpu, &rq->thread_mask);
++ for_each_cpu(other_cpu, thread_cpumask(cpu))
++ rq->cpu_locality[other_cpu] = 1;
++ rq->siblings_idle = siblings_cpu_idle;
++ smt_threads = true;
++ }
++#endif
++ }
++ for_each_possible_cpu(cpu) {
++ int total_cpus = 1, locality;
++
++ rq = cpu_rq(cpu);
++ for (locality = 1; locality <= 4; locality++) {
++ for_each_possible_cpu(other_cpu) {
++ if (rq->cpu_locality[other_cpu] == locality)
++ rq->rq_order[total_cpus++] = cpu_rq(other_cpu);
++ }
++ }
++ }
++#ifdef CONFIG_SMT_NICE
++ if (smt_threads) {
++ check_siblings = &check_smt_siblings;
++ wake_siblings = &wake_smt_siblings;
++ smt_schedule = &smt_should_schedule;
++ }
++#endif
++ unlock_all_rqs();
++ local_irq_enable();
++ mutex_unlock(&sched_domains_mutex);
++
++ for_each_online_cpu(cpu) {
++ rq = cpu_rq(cpu);
++
++ for_each_online_cpu(other_cpu) {
++ if (other_cpu <= cpu)
++ continue;
++ printk(KERN_DEBUG "MuQSS locality CPU %d to %d: %d\n", cpu, other_cpu, rq->cpu_locality[other_cpu]);
++ }
++ }
++
++ sched_smp_initialized = true;
++}
++#else
++void __init sched_init_smp(void)
++{
++ sched_smp_initialized = true;
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++ return in_lock_functions(addr) ||
++ (addr >= (unsigned long)__sched_text_start
++ && addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task group related information */
++struct task_group {
++ struct cgroup_subsys_state css;
++
++ struct rcu_head rcu;
++ struct list_head list;
++
++ struct task_group *parent;
++ struct list_head siblings;
++ struct list_head children;
++};
++
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++
++/* Cacheline aligned slab cache for task_group */
++static struct kmem_cache *task_group_cache __read_mostly;
++#endif /* CONFIG_CGROUP_SCHED */
++
++void __init sched_init(void)
++{
++#ifdef CONFIG_SMP
++ int cpu_ids;
++#endif
++ int i;
++ struct rq *rq;
++
++ sched_clock_init();
++
++ wait_bit_init();
++
++ prio_ratios[0] = 128;
++ for (i = 1 ; i < NICE_WIDTH ; i++)
++ prio_ratios[i] = prio_ratios[i - 1] * 11 / 10;
++
++ skiplist_node_init(&init_task.node);
++
++#ifdef CONFIG_SMP
++ init_defrootdomain();
++ cpumask_clear(&cpu_idle_map);
++#else
++ uprq = &per_cpu(runqueues, 0);
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++ task_group_cache = KMEM_CACHE(task_group, 0);
++
++ list_add(&root_task_group.list, &task_groups);
++ INIT_LIST_HEAD(&root_task_group.children);
++ INIT_LIST_HEAD(&root_task_group.siblings);
++#endif /* CONFIG_CGROUP_SCHED */
++ for_each_possible_cpu(i) {
++ rq = cpu_rq(i);
++ skiplist_init(&rq->node);
++ rq->sl = new_skiplist(&rq->node);
++ raw_spin_lock_init(&rq->lock);
++ rq->nr_running = 0;
++ rq->nr_uninterruptible = 0;
++ rq->nr_switches = 0;
++ rq->clock = rq->old_clock = rq->last_niffy = rq->niffies = 0;
++ rq->last_jiffy = jiffies;
++ rq->user_ns = rq->nice_ns = rq->softirq_ns = rq->system_ns =
++ rq->iowait_ns = rq->idle_ns = 0;
++ rq->dither = 0;
++ set_rq_task(rq, &init_task);
++ rq->iso_ticks = 0;
++ rq->iso_refractory = false;
++#ifdef CONFIG_SMP
++ rq->sd = NULL;
++ rq->rd = NULL;
++ rq->online = false;
++ rq->cpu = i;
++ rq_attach_root(rq, &def_root_domain);
++#endif
++ init_rq_hrexpiry(rq);
++ atomic_set(&rq->nr_iowait, 0);
++ }
++
++#ifdef CONFIG_SMP
++ cpu_ids = i;
++ /*
++ * Set the base locality for cpu cache distance calculation to
++ * "distant" (3). Make sure the distance from a CPU to itself is 0.
++ */
++ for_each_possible_cpu(i) {
++ int j;
++
++ rq = cpu_rq(i);
++#ifdef CONFIG_SCHED_SMT
++ rq->siblings_idle = sole_cpu_idle;
++#endif
++#ifdef CONFIG_SCHED_MC
++ rq->cache_idle = sole_cpu_idle;
++#endif
++ rq->cpu_locality = kmalloc(cpu_ids * sizeof(int *), GFP_ATOMIC);
++ for_each_possible_cpu(j) {
++ if (i == j)
++ rq->cpu_locality[j] = 0;
++ else
++ rq->cpu_locality[j] = 4;
++ }
++ rq->rq_order = kmalloc(cpu_ids * sizeof(struct rq *), GFP_ATOMIC);
++ rq->rq_order[0] = rq;
++ for (j = 1; j < cpu_ids; j++)
++ rq->rq_order[j] = cpu_rq(j);
++ }
++#endif
++
++ /*
++ * The boot idle thread does lazy MMU switching as well:
++ */
++ mmgrab(&init_mm);
++ enter_lazy_tlb(&init_mm, current);
++
++ /*
++ * Make us the idle thread. Technically, schedule() should not be
++ * called from this thread, however somewhere below it might be,
++ * but because we are the idle thread, we just pick up running again
++ * when this runqueue becomes "idle".
++ */
++ init_idle(current, smp_processor_id());
++
++#ifdef CONFIG_SMP
++ /* May be allocated at isolcpus cmdline parse time */
++ if (cpu_isolated_map == NULL)
++ zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
++ idle_thread_set_boot_cpu();
++#endif /* SMP */
++
++ init_schedstats();
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++static inline int preempt_count_equals(int preempt_offset)
++{
++ int nested = preempt_count() + rcu_preempt_depth();
++
++ return (nested == preempt_offset);
++}
++
++void __might_sleep(const char *file, int line, int preempt_offset)
++{
++ /*
++ * Blocking primitives will set (and therefore destroy) current->state,
++ * since we will exit with TASK_RUNNING make sure we enter with it,
++ * otherwise we will destroy state.
++ */
++ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
++ "do not call blocking ops when !TASK_RUNNING; "
++ "state=%lx set at [<%p>] %pS\n",
++ current->state,
++ (void *)current->task_state_change,
++ (void *)current->task_state_change);
++
++ ___might_sleep(file, line, preempt_offset);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++void ___might_sleep(const char *file, int line, int preempt_offset)
++{
++ /* Ratelimiting timestamp: */
++ static unsigned long prev_jiffy;
++
++ unsigned long preempt_disable_ip;
++
++ /* WARN_ON_ONCE() by default, no rate limit required: */
++ rcu_sleep_check();
++
++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
++ !is_idle_task(current)) ||
++ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++ oops_in_progress)
++ return;
++
++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++ return;
++ prev_jiffy = jiffies;
++
++ /* Save this before calling printk(), since that will clobber it: */
++ preempt_disable_ip = get_preempt_disable_ip(current);
++
++ printk(KERN_ERR
++ "BUG: sleeping function called from invalid context at %s:%d\n",
++ file, line);
++ printk(KERN_ERR
++ "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++ in_atomic(), irqs_disabled(),
++ current->pid, current->comm);
++
++ if (task_stack_end_corrupted(current))
++ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
++
++ debug_show_held_locks(current);
++ if (irqs_disabled())
++ print_irqtrace_events(current);
++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++ && !preempt_count_equals(preempt_offset)) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(preempt_disable_ip);
++ pr_cont("\n");
++ }
++ dump_stack();
++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL(___might_sleep);
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++static inline void normalise_rt_tasks(void)
++{
++ struct task_struct *g, *p;
++ unsigned long flags;
++ struct rq *rq;
++
++ read_lock(&tasklist_lock);
++ for_each_process_thread(g, p) {
++ /*
++ * Only normalize user tasks:
++ */
++ if (p->flags & PF_KTHREAD)
++ continue;
++
++ if (!rt_task(p) && !iso_task(p))
++ continue;
++
++ rq = task_rq_lock(p, &flags);
++ __setscheduler(p, rq, SCHED_NORMAL, 0, false);
++ task_rq_unlock(rq, p, &flags);
++ }
++ read_unlock(&tasklist_lock);
++}
++
++void normalize_rt_tasks(void)
++{
++ normalise_rt_tasks();
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for the IA64 MCA handling, or kdb.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++ return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_IA64
++/**
++ * set_curr_task - set the current task for a given CPU.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack. It allows the architecture to switch the
++ * notion of the current task on a CPU in a non-blocking manner. This function
++ * must be called with all CPU's synchronised, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void ia64_set_curr_task(int cpu, struct task_struct *p)
++{
++ cpu_curr(cpu) = p;
++}
++
++#endif
++
++void init_idle_bootup_task(struct task_struct *idle)
++{}
++
++#ifdef CONFIG_SCHED_DEBUG
++__read_mostly bool sched_debug_enabled;
++
++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
++ struct seq_file *m)
++{}
++
++void proc_sched_set_task(struct task_struct *p)
++{}
++#endif
++
++#ifdef CONFIG_SMP
++#define SCHED_LOAD_SHIFT (10)
++#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
++
++unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
++{
++ return SCHED_LOAD_SCALE;
++}
++
++unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
++{
++ unsigned long weight = cpumask_weight(sched_domain_span(sd));
++ unsigned long smt_gain = sd->smt_gain;
++
++ smt_gain /= weight;
++
++ return smt_gain;
++}
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++static void sched_free_group(struct task_group *tg)
++{
++ kmem_cache_free(task_group_cache, tg);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++ struct task_group *tg;
++
++ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
++ if (!tg)
++ return ERR_PTR(-ENOMEM);
++
++ return tg;
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void sched_free_group_rcu(struct rcu_head *rhp)
++{
++ /* Now it should be safe to free those cfs_rqs */
++ sched_free_group(container_of(rhp, struct task_group, rcu));
++}
++
++void sched_destroy_group(struct task_group *tg)
++{
++ /* Wait for possible concurrent references to cfs_rqs complete */
++ call_rcu(&tg->rcu, sched_free_group_rcu);
++}
++
++void sched_offline_group(struct task_group *tg)
++{
++}
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++ return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++ struct task_group *parent = css_tg(parent_css);
++ struct task_group *tg;
++
++ if (!parent) {
++ /* This is early initialization for the top cgroup */
++ return &root_task_group.css;
++ }
++
++ tg = sched_create_group(parent);
++ if (IS_ERR(tg))
++ return ERR_PTR(-ENOMEM);
++ return &tg->css;
++}
++
++/* Expose task group only after completing cgroup initialization */
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++ struct task_group *parent = css_tg(css->parent);
++
++ if (parent)
++ sched_online_group(tg, parent);
++ return 0;
++}
++
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++
++ sched_offline_group(tg);
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++
++ /*
++ * Relies on the RCU grace period between css_released() and this.
++ */
++ sched_free_group(tg);
++}
++
++static void cpu_cgroup_fork(struct task_struct *task)
++{
++}
++
++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
++{
++ return 0;
++}
++
++static void cpu_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++
++static struct cftype cpu_files[] = {
++ { } /* Terminate */
++};
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++ .css_alloc = cpu_cgroup_css_alloc,
++ .css_online = cpu_cgroup_css_online,
++ .css_released = cpu_cgroup_css_released,
++ .css_free = cpu_cgroup_css_free,
++ .fork = cpu_cgroup_fork,
++ .can_attach = cpu_cgroup_can_attach,
++ .attach = cpu_cgroup_attach,
++ .legacy_cftypes = cpu_files,
++ .early_init = true,
++};
++#endif /* CONFIG_CGROUP_SCHED */
+diff -Nur a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h
+--- a/kernel/sched/MuQSS.h 1970-01-01 01:00:00.000000000 +0100
++++ b/kernel/sched/MuQSS.h 2018-11-03 16:06:32.715529032 +0000
+@@ -0,0 +1,725 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#include <linux/sched.h>
++#include <linux/cpuidle.h>
++#include <linux/freezer.h>
++#include <linux/interrupt.h>
++#include <linux/skip_list.h>
++#include <linux/stop_machine.h>
++#include <linux/sched/topology.h>
++#include <linux/u64_stats_sync.h>
++#include <linux/tsacct_kern.h>
++#include <linux/sched/clock.h>
++#include <linux/sched/wake_q.h>
++#include <linux/sched/signal.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/cpufreq.h>
++#include <linux/sched/stat.h>
++#include <linux/sched/nohz.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/hotplug.h>
++#include <linux/sched/task.h>
++#include <linux/sched/task_stack.h>
++#include <linux/sched/cputime.h>
++#include <linux/sched/init.h>
++
++#include <linux/u64_stats_sync.h>
++#include <linux/kernel_stat.h>
++#include <linux/tick.h>
++#include <linux/slab.h>
++
++#ifdef CONFIG_PARAVIRT
++#include <asm/paravirt.h>
++#endif
++
++#include "cpuacct.h"
++
++#ifndef MUQSS_SCHED_H
++#define MUQSS_SCHED_H
++
++#ifdef CONFIG_SCHED_DEBUG
++# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
++#else
++# define SCHED_WARN_ON(x) ((void)(x))
++#endif
++
++/* task_struct::on_rq states: */
++#define TASK_ON_RQ_QUEUED 1
++#define TASK_ON_RQ_MIGRATING 2
++
++struct rq;
++
++#ifdef CONFIG_SMP
++
++static inline bool sched_asym_prefer(int a, int b)
++{
++ return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
++}
++
++/*
++ * We add the notion of a root-domain which will be used to define per-domain
++ * variables. Each exclusive cpuset essentially defines an island domain by
++ * fully partitioning the member cpus from any other cpuset. Whenever a new
++ * exclusive cpuset is created, we also create and attach a new root-domain
++ * object.
++ *
++ */
++struct root_domain {
++ atomic_t refcount;
++ atomic_t rto_count;
++ struct rcu_head rcu;
++ cpumask_var_t span;
++ cpumask_var_t online;
++
++ /* Indicate more than one runnable task for any CPU */
++ bool overload;
++
++ /*
++ * The bit corresponding to a CPU gets set here if such CPU has more
++ * than one runnable -deadline task (as it is below for RT tasks).
++ */
++ cpumask_var_t dlo_mask;
++ atomic_t dlo_count;
++ /* Replace unused CFS structures with void */
++ //struct dl_bw dl_bw;
++ //struct cpudl cpudl;
++ void *dl_bw;
++ void *cpudl;
++
++ /*
++ * The "RT overload" flag: it gets set if a CPU has more than
++ * one runnable RT task.
++ */
++ cpumask_var_t rto_mask;
++ //struct cpupri cpupri;
++ void *cpupri;
++
++ unsigned long max_cpu_capacity;
++};
++
++extern struct root_domain def_root_domain;
++extern struct mutex sched_domains_mutex;
++
++extern void init_defrootdomain(void);
++extern int sched_init_domains(const struct cpumask *cpu_map);
++extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
++
++static inline void cpupri_cleanup(void __maybe_unused *cpupri)
++{
++}
++
++static inline void cpudl_cleanup(void __maybe_unused *cpudl)
++{
++}
++
++static inline void init_dl_bw(void __maybe_unused *dl_bw)
++{
++}
++
++static inline int cpudl_init(void __maybe_unused *dl_bw)
++{
++ return 0;
++}
++
++static inline int cpupri_init(void __maybe_unused *cpupri)
++{
++ return 0;
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ * This data should only be modified by the local cpu.
++ */
++struct rq {
++ raw_spinlock_t lock;
++
++ struct task_struct *curr, *idle, *stop;
++ struct mm_struct *prev_mm;
++
++ unsigned int nr_running;
++ /*
++ * This is part of a global counter where only the total sum
++ * over all CPUs matters. A task can increase this counter on
++ * one CPU and if it got migrated afterwards it may decrease
++ * it on another CPU. Always updated under the runqueue lock:
++ */
++ unsigned long nr_uninterruptible;
++ u64 nr_switches;
++
++ /* Stored data about rq->curr to work outside rq lock */
++ u64 rq_deadline;
++ int rq_prio;
++
++ /* Best queued id for use outside lock */
++ u64 best_key;
++
++ unsigned long last_scheduler_tick; /* Last jiffy this RQ ticked */
++ unsigned long last_jiffy; /* Last jiffy this RQ updated rq clock */
++ u64 niffies; /* Last time this RQ updated rq clock */
++ u64 last_niffy; /* Last niffies as updated by local clock */
++ u64 last_jiffy_niffies; /* Niffies @ last_jiffy */
++
++ u64 load_update; /* When we last updated load */
++ unsigned long load_avg; /* Rolling load average */
++#ifdef CONFIG_SMT_NICE
++ struct mm_struct *rq_mm;
++ int rq_smt_bias; /* Policy/nice level bias across smt siblings */
++#endif
++ /* Accurate timekeeping data */
++ unsigned long user_ns, nice_ns, irq_ns, softirq_ns, system_ns,
++ iowait_ns, idle_ns;
++ atomic_t nr_iowait;
++
++ skiplist_node node;
++ skiplist *sl;
++#ifdef CONFIG_SMP
++ struct task_struct *preempt; /* Preempt triggered on this task */
++ struct task_struct *preempting; /* Hint only, what task is preempting */
++
++ int cpu; /* cpu of this runqueue */
++ bool online;
++
++ struct root_domain *rd;
++ struct sched_domain *sd;
++
++ unsigned long cpu_capacity_orig;
++
++ int *cpu_locality; /* CPU relative cache distance */
++ struct rq **rq_order; /* RQs ordered by relative cache distance */
++
++#ifdef CONFIG_SCHED_SMT
++ cpumask_t thread_mask;
++ bool (*siblings_idle)(struct rq *rq);
++ /* See if all smt siblings are idle */
++#endif /* CONFIG_SCHED_SMT */
++#ifdef CONFIG_SCHED_MC
++ cpumask_t core_mask;
++ bool (*cache_idle)(struct rq *rq);
++ /* See if all cache siblings are idle */
++#endif /* CONFIG_SCHED_MC */
++#endif /* CONFIG_SMP */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++ u64 prev_irq_time;
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++#ifdef CONFIG_PARAVIRT
++ u64 prev_steal_time;
++#endif /* CONFIG_PARAVIRT */
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++ u64 prev_steal_time_rq;
++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
++
++ u64 clock, old_clock, last_tick;
++ u64 clock_task;
++ int dither;
++
++ int iso_ticks;
++ bool iso_refractory;
++
++#ifdef CONFIG_HIGH_RES_TIMERS
++ struct hrtimer hrexpiry_timer;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++
++ /* latency stats */
++ struct sched_info rq_sched_info;
++ unsigned long long rq_cpu_time;
++ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
++
++ /* sys_sched_yield() stats */
++ unsigned int yld_count;
++
++ /* schedule() stats */
++ unsigned int sched_switch;
++ unsigned int sched_count;
++ unsigned int sched_goidle;
++
++ /* try_to_wake_up() stats */
++ unsigned int ttwu_count;
++ unsigned int ttwu_local;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_SMP
++ struct llist_head wake_list;
++#endif
++
++#ifdef CONFIG_CPU_IDLE
++ /* Must be inspected within a rcu lock section */
++ struct cpuidle_state *idle_state;
++#endif
++};
++
++#ifdef CONFIG_SMP
++struct rq *cpu_rq(int cpu);
++#endif
++
++#ifndef CONFIG_SMP
++extern struct rq *uprq;
++#define cpu_rq(cpu) (uprq)
++#define this_rq() (uprq)
++#define raw_rq() (uprq)
++#define task_rq(p) (uprq)
++#define cpu_curr(cpu) ((uprq)->curr)
++#else /* CONFIG_SMP */
++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#define this_rq() this_cpu_ptr(&runqueues)
++#define raw_rq() raw_cpu_ptr(&runqueues)
++#define task_rq(p) cpu_rq(task_cpu(p))
++#endif /* CONFIG_SMP */
++
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++ return rq->curr == p;
++}
++
++static inline int task_running(struct rq *rq, struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++ return p->on_cpu;
++#else
++ return task_current(rq, p);
++#endif
++}
++
++static inline void rq_lock(struct rq *rq)
++ __acquires(rq->lock)
++{
++ raw_spin_lock(&rq->lock);
++}
++
++static inline void rq_unlock(struct rq *rq)
++ __releases(rq->lock)
++{
++ raw_spin_unlock(&rq->lock);
++}
++
++static inline void rq_lock_irq(struct rq *rq)
++ __acquires(rq->lock)
++{
++ raw_spin_lock_irq(&rq->lock);
++}
++
++static inline void rq_unlock_irq(struct rq *rq)
++ __releases(rq->lock)
++{
++ raw_spin_unlock_irq(&rq->lock);
++}
++
++static inline void rq_lock_irqsave(struct rq *rq, unsigned long *flags)
++ __acquires(rq->lock)
++{
++ raw_spin_lock_irqsave(&rq->lock, *flags);
++}
++
++static inline void rq_unlock_irqrestore(struct rq *rq, unsigned long *flags)
++ __releases(rq->lock)
++{
++ raw_spin_unlock_irqrestore(&rq->lock, *flags);
++}
++
++static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
++ __acquires(p->pi_lock)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++ while (42) {
++ raw_spin_lock_irqsave(&p->pi_lock, *flags);
++ rq = task_rq(p);
++ raw_spin_lock(&rq->lock);
++ if (likely(rq == task_rq(p)))
++ break;
++ raw_spin_unlock(&rq->lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++ }
++ return rq;
++}
++
++static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
++ __releases(rq->lock)
++ __releases(p->pi_lock)
++{
++ rq_unlock(rq);
++ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++}
++
++static inline struct rq *__task_rq_lock(struct task_struct *p)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++ lockdep_assert_held(&p->pi_lock);
++
++ while (42) {
++ rq = task_rq(p);
++ raw_spin_lock(&rq->lock);
++ if (likely(rq == task_rq(p)))
++ break;
++ raw_spin_unlock(&rq->lock);
++ }
++ return rq;
++}
++
++static inline void __task_rq_unlock(struct rq *rq)
++{
++ rq_unlock(rq);
++}
++
++/*
++ * {de,en}queue flags: Most not used on MuQSS.
++ *
++ * DEQUEUE_SLEEP - task is no longer runnable
++ * ENQUEUE_WAKEUP - task just became runnable
++ *
++ * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
++ * are in a known state which allows modification. Such pairs
++ * should preserve as much state as possible.
++ *
++ * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
++ * in the runqueue.
++ *
++ * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
++ * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
++ * ENQUEUE_MIGRATED - the task was migrated during wakeup
++ *
++ */
++
++#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */
++
++#define ENQUEUE_RESTORE 0x02
++
++static inline u64 __rq_clock_broken(struct rq *rq)
++{
++ return READ_ONCE(rq->clock);
++}
++
++static inline u64 rq_clock(struct rq *rq)
++{
++ lockdep_assert_held(&rq->lock);
++
++ return rq->clock;
++}
++
++static inline u64 rq_clock_task(struct rq *rq)
++{
++ lockdep_assert_held(&rq->lock);
++
++ return rq->clock_task;
++}
++
++#ifdef CONFIG_NUMA
++enum numa_topology_type {
++ NUMA_DIRECT,
++ NUMA_GLUELESS_MESH,
++ NUMA_BACKPLANE,
++};
++extern enum numa_topology_type sched_numa_topology_type;
++extern int sched_max_numa_distance;
++extern bool find_numa_distance(int distance);
++
++extern void sched_init_numa(void);
++extern void sched_domains_numa_masks_set(unsigned int cpu);
++extern void sched_domains_numa_masks_clear(unsigned int cpu);
++#else
++static inline void sched_init_numa(void) { }
++static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
++static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
++#endif
++
++extern struct mutex sched_domains_mutex;
++extern struct static_key_false sched_schedstats;
++
++#define rcu_dereference_check_sched_domain(p) \
++ rcu_dereference_check((p), \
++ lockdep_is_held(&sched_domains_mutex))
++
++#ifdef CONFIG_SMP
++
++/*
++ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
++ * See detach_destroy_domains: synchronize_sched for details.
++ *
++ * The domain tree of any CPU may only be accessed from within
++ * preempt-disabled sections.
++ */
++#define for_each_domain(cpu, __sd) \
++ for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
++ __sd; __sd = __sd->parent)
++
++#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
++
++/**
++ * highest_flag_domain - Return highest sched_domain containing flag.
++ * @cpu: The cpu whose highest level of sched domain is to
++ * be returned.
++ * @flag: The flag to check for the highest sched_domain
++ * for the given cpu.
++ *
++ * Returns the highest sched_domain of a cpu which contains the given flag.
++ */
++static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
++{
++ struct sched_domain *sd, *hsd = NULL;
++
++ for_each_domain(cpu, sd) {
++ if (!(sd->flags & flag))
++ break;
++ hsd = sd;
++ }
++
++ return hsd;
++}
++
++static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
++{
++ struct sched_domain *sd;
++
++ for_each_domain(cpu, sd) {
++ if (sd->flags & flag)
++ break;
++ }
++
++ return sd;
++}
++
++DECLARE_PER_CPU(struct sched_domain *, sd_llc);
++DECLARE_PER_CPU(int, sd_llc_size);
++DECLARE_PER_CPU(int, sd_llc_id);
++DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
++DECLARE_PER_CPU(struct sched_domain *, sd_numa);
++DECLARE_PER_CPU(struct sched_domain *, sd_asym);
++
++struct sched_group_capacity {
++ atomic_t ref;
++ /*
++ * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
++ * for a single CPU.
++ */
++ unsigned long capacity;
++ unsigned long min_capacity; /* Min per-CPU capacity in group */
++ unsigned long next_update;
++ int imbalance; /* XXX unrelated to capacity but shared group state */
++
++#ifdef CONFIG_SCHED_DEBUG
++ int id;
++#endif
++
++ unsigned long cpumask[0]; /* balance mask */
++};
++
++struct sched_group {
++ struct sched_group *next; /* Must be a circular list */
++ atomic_t ref;
++
++ unsigned int group_weight;
++ struct sched_group_capacity *sgc;
++ int asym_prefer_cpu; /* cpu of highest priority in group */
++
++ /*
++ * The CPUs this group covers.
++ *
++ * NOTE: this field is variable length. (Allocated dynamically
++ * by attaching extra space to the end of the structure,
++ * depending on how many CPUs the kernel has booted up with)
++ */
++ unsigned long cpumask[0];
++};
++
++static inline struct cpumask *sched_group_span(struct sched_group *sg)
++{
++ return to_cpumask(sg->cpumask);
++}
++
++/*
++ * See build_balance_mask().
++ */
++static inline struct cpumask *group_balance_mask(struct sched_group *sg)
++{
++ return to_cpumask(sg->sgc->cpumask);
++}
++
++/**
++ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
++ * @group: The group whose first cpu is to be returned.
++ */
++static inline unsigned int group_first_cpu(struct sched_group *group)
++{
++ return cpumask_first(sched_group_span(group));
++}
++
++
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++void register_sched_domain_sysctl(void);
++void dirty_sched_domain_sysctl(int cpu);
++void unregister_sched_domain_sysctl(void);
++#else
++static inline void register_sched_domain_sysctl(void)
++{
++}
++static inline void dirty_sched_domain_sysctl(int cpu)
++{
++}
++static inline void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++extern void sched_ttwu_pending(void);
++extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
++extern void set_rq_online (struct rq *rq);
++extern void set_rq_offline(struct rq *rq);
++extern bool sched_smp_initialized;
++
++static inline void update_group_capacity(struct sched_domain *sd, int cpu)
++{
++}
++
++static inline void trigger_load_balance(struct rq *rq)
++{
++}
++
++#define sched_feat(x) 0
++
++#else /* CONFIG_SMP */
++
++static inline void sched_ttwu_pending(void) { }
++
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_CPU_IDLE
++static inline void idle_set_state(struct rq *rq,
++ struct cpuidle_state *idle_state)
++{
++ rq->idle_state = idle_state;
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++ SCHED_WARN_ON(!rcu_read_lock_held());
++ return rq->idle_state;
++}
++#else
++static inline void idle_set_state(struct rq *rq,
++ struct cpuidle_state *idle_state)
++{
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++ return NULL;
++}
++#endif
++
++#ifdef CONFIG_SCHED_DEBUG
++extern bool sched_debug_enabled;
++#endif
++
++extern void schedule_idle(void);
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++struct irqtime {
++ u64 total;
++ u64 tick_delta;
++ u64 irq_start_time;
++ struct u64_stats_sync sync;
++};
++
++DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
++
++/*
++ * Returns the irqtime minus the softirq time computed by ksoftirqd.
++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
++ * and never move forward.
++ */
++static inline u64 irq_time_read(int cpu)
++{
++ struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
++ unsigned int seq;
++ u64 total;
++
++ do {
++ seq = __u64_stats_fetch_begin(&irqtime->sync);
++ total = irqtime->total;
++ } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
++
++ return total;
++}
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++#ifdef CONFIG_SMP
++static inline int cpu_of(struct rq *rq)
++{
++ return rq->cpu;
++}
++#else /* CONFIG_SMP */
++static inline int cpu_of(struct rq *rq)
++{
++ return 0;
++}
++#endif
++
++#ifdef CONFIG_CPU_FREQ
++DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
++
++static inline void cpufreq_trigger(struct rq *rq, unsigned int flags)
++{
++ struct update_util_data *data;
++
++ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
++ cpu_of(rq)));
++
++ if (data)
++ data->func(data, rq->niffies, flags);
++}
++#else
++static inline void cpufreq_trigger(struct rq *rq, unsigned int flag)
++{
++}
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef arch_scale_freq_capacity
++#ifndef arch_scale_freq_invariant
++#define arch_scale_freq_invariant() (true)
++#endif
++#else /* arch_scale_freq_capacity */
++#define arch_scale_freq_invariant() (false)
++#endif
++
++/*
++ * This should only be called when current == rq->idle. Dodgy workaround for
++ * when softirqs are pending and we are in the idle loop. Setting current to
++ * resched will kick us out of the idle loop and the softirqs will be serviced
++ * on our next pass through schedule().
++ */
++static inline bool softirq_pending(int cpu)
++{
++ if (likely(!local_softirq_pending()))
++ return false;
++ set_tsk_need_resched(current);
++ return true;
++}
++
++#ifdef CONFIG_64BIT
++static inline u64 read_sum_exec_runtime(struct task_struct *t)
++{
++ return tsk_seruntime(t);
++}
++#else
++struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags);
++void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags);
++
++static inline u64 read_sum_exec_runtime(struct task_struct *t)
++{
++ unsigned long flags;
++ u64 ns;
++ struct rq *rq;
++
++ rq = task_rq_lock(t, &flags);
++ ns = tsk_seruntime(t);
++ task_rq_unlock(rq, t, &flags);
++
++ return ns;
++}
++#endif
++
++#endif /* MUQSS_SCHED_H */
+diff -Nur a/kernel/sched/sched.h b/kernel/sched/sched.h
+--- a/kernel/sched/sched.h 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/sched/sched.h 2018-11-03 16:06:32.717529096 +0000
+@@ -1,5 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+
++#ifdef CONFIG_SCHED_MUQSS
++#include "MuQSS.h"
++#else /* CONFIG_SCHED_MUQSS */
+ #include <linux/sched.h>
+ #include <linux/sched/autogroup.h>
+ #include <linux/sched/sysctl.h>
+@@ -2103,3 +2106,29 @@
+ #else /* arch_scale_freq_capacity */
+ #define arch_scale_freq_invariant() (false)
+ #endif
++
++static inline bool softirq_pending(int cpu)
++{
++ return false;
++}
++
++#ifdef CONFIG_64BIT
++static inline u64 read_sum_exec_runtime(struct task_struct *t)
++{
++ return t->se.sum_exec_runtime;
++}
++#else
++static inline u64 read_sum_exec_runtime(struct task_struct *t)
++{
++ u64 ns;
++ struct rq_flags rf;
++ struct rq *rq;
++
++ rq = task_rq_lock(t, &rf);
++ ns = t->se.sum_exec_runtime;
++ task_rq_unlock(rq, t, &rf);
++
++ return ns;
++}
++#endif
++#endif /* CONFIG_SCHED_MUQSS */
+diff -Nur a/kernel/skip_list.c b/kernel/skip_list.c
+--- a/kernel/skip_list.c 1970-01-01 01:00:00.000000000 +0100
++++ b/kernel/skip_list.c 2018-11-03 16:06:32.717529096 +0000
+@@ -0,0 +1,148 @@
++/*
++ Copyright (C) 2011,2016 Con Kolivas.
++
++ Code based on example originally by William Pugh.
++
++Skip Lists are a probabilistic alternative to balanced trees, as
++described in the June 1990 issue of CACM and were invented by
++William Pugh in 1987.
++
++A couple of comments about this implementation:
++The routine randomLevel has been hard-coded to generate random
++levels using p=0.25. It can be easily changed.
++
++The insertion routine has been implemented so as to use the
++dirty hack described in the CACM paper: if a random level is
++generated that is more than the current maximum level, the
++current maximum level plus one is used instead.
++
++Levels start at zero and go up to MaxLevel (which is equal to
++MaxNumberOfLevels-1).
++
++The routines defined in this file are:
++
++init: defines slnode
++
++new_skiplist: returns a new, empty list
++
++randomLevel: Returns a random level based on a u64 random seed passed to it.
++In MuQSS, the "niffy" time is used for this purpose.
++
++insert(l,key, value): inserts the binding (key, value) into l. This operation
++occurs in O(log n) time.
++
++delnode(slnode, l, node): deletes any binding of key from the l based on the
++actual node value. This operation occurs in O(k) time where k is the
++number of levels of the node in question (max 8). The original delete
++function occurred in O(log n) time and involved a search.
++
++MuQSS Notes: In this implementation of skiplists, there are bidirectional
++next/prev pointers and the insert function returns a pointer to the actual
++node the value is stored. The key here is chosen by the scheduler so as to
++sort tasks according to the priority list requirements and is no longer used
++by the scheduler after insertion. The scheduler lookup, however, occurs in
++O(1) time because it is always the first item in the level 0 linked list.
++Since the task struct stores a copy of the node pointer upon skiplist_insert,
++it can also remove it much faster than the original implementation with the
++aid of prev<->next pointer manipulation and no searching.
++
++*/
++
++#include <linux/slab.h>
++#include <linux/skip_list.h>
++
++#define MaxNumberOfLevels 8
++#define MaxLevel (MaxNumberOfLevels - 1)
++
++void skiplist_init(skiplist_node *slnode)
++{
++ int i;
++
++ slnode->key = 0xFFFFFFFFFFFFFFFF;
++ slnode->level = 0;
++ slnode->value = NULL;
++ for (i = 0; i < MaxNumberOfLevels; i++)
++ slnode->next[i] = slnode->prev[i] = slnode;
++}
++
++skiplist *new_skiplist(skiplist_node *slnode)
++{
++ skiplist *l = kzalloc(sizeof(skiplist), GFP_ATOMIC);
++
++ BUG_ON(!l);
++ l->header = slnode;
++ return l;
++}
++
++void free_skiplist(skiplist *l)
++{
++ skiplist_node *p, *q;
++
++ p = l->header;
++ do {
++ q = p->next[0];
++ p->next[0]->prev[0] = q->prev[0];
++ skiplist_node_init(p);
++ p = q;
++ } while (p != l->header);
++ kfree(l);
++}
++
++void skiplist_node_init(skiplist_node *node)
++{
++ memset(node, 0, sizeof(skiplist_node));
++}
++
++static inline unsigned int randomLevel(const long unsigned int randseed)
++{
++ return find_first_bit(&randseed, MaxLevel) / 2;
++}
++
++void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed)
++{
++ skiplist_node *update[MaxNumberOfLevels];
++ skiplist_node *p, *q;
++ int k = l->level;
++
++ p = l->header;
++ do {
++ while (q = p->next[k], q->key <= key)
++ p = q;
++ update[k] = p;
++ } while (--k >= 0);
++
++ ++l->entries;
++ k = randomLevel(randseed);
++ if (k > l->level) {
++ k = ++l->level;
++ update[k] = l->header;
++ }
++
++ node->level = k;
++ node->key = key;
++ node->value = value;
++ do {
++ p = update[k];
++ node->next[k] = p->next[k];
++ p->next[k] = node;
++ node->prev[k] = p;
++ node->next[k]->prev[k] = node;
++ } while (--k >= 0);
++}
++
++void skiplist_delete(skiplist *l, skiplist_node *node)
++{
++ int k, m = node->level;
++
++ for (k = 0; k <= m; k++) {
++ node->prev[k]->next[k] = node->next[k];
++ node->next[k]->prev[k] = node->prev[k];
++ }
++ skiplist_node_init(node);
++ if (m == l->level) {
++ while (l->header->next[m] == l->header && l->header->prev[m] == l->header && m > 0)
++ m--;
++ l->level = m;
++ }
++ l->entries--;
++}
+diff -Nur a/kernel/sysctl.c b/kernel/sysctl.c
+--- a/kernel/sysctl.c 2018-11-03 16:00:51.933620936 +0000
++++ b/kernel/sysctl.c 2018-11-03 16:12:48.444570622 +0000
+@@ -133,8 +133,14 @@
+ static int __maybe_unused two __read_only = 2;
+ static int __maybe_unused four __read_only = 4;
+ static unsigned long one_ul __read_only = 1;
+-static int one_hundred __read_only = 100;
+-static int one_thousand __read_only = 1000;
++static int one_hundred __read_only = 100;
++static int one_thousand __read_only = 1000;
++#ifdef CONFIG_SCHED_MUQSS
++extern int rr_interval;
++extern int sched_interactive;
++extern int sched_iso_cpu;
++extern int sched_yield_type;
++#endif
+ #ifdef CONFIG_PRINTK
+ static int ten_thousand __read_only = 10000;
+ #endif
+@@ -296,7 +302,7 @@
+ { }
+ };
+
+-#ifdef CONFIG_SCHED_DEBUG
++#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_MUQSS)
+ static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */
+ static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
+ static int min_wakeup_granularity_ns __read_only; /* 0 usecs */
+@@ -313,6 +319,7 @@
+ #endif
+
+ static struct ctl_table kern_table[] = {
++#ifndef CONFIG_SCHED_MUQSS
+ {
+ .procname = "sched_child_runs_first",
+ .data = &sysctl_sched_child_runs_first,
+@@ -475,6 +482,7 @@
+ .extra1 = &one,
+ },
+ #endif
++#endif /* !CONFIG_SCHED_MUQSS */
+ #ifdef CONFIG_PROVE_LOCKING
+ {
+ .procname = "prove_locking",
+@@ -1073,6 +1081,44 @@
+ .proc_handler = proc_dointvec,
+ },
+ #endif
++#ifdef CONFIG_SCHED_MUQSS
++ {
++ .procname = "rr_interval",
++ .data = &rr_interval,
++ .maxlen = sizeof (int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &one_thousand,
++ },
++ {
++ .procname = "interactive",
++ .data = &sched_interactive,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++ {
++ .procname = "iso_cpu",
++ .data = &sched_iso_cpu,
++ .maxlen = sizeof (int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &one_hundred,
++ },
++ {
++ .procname = "yield_type",
++ .data = &sched_yield_type,
++ .maxlen = sizeof (int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &two,
++ },
++#endif
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ {
+ .procname = "spin_retry",
+diff -Nur a/kernel/time/clockevents.c b/kernel/time/clockevents.c
+--- a/kernel/time/clockevents.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/time/clockevents.c 2018-11-03 16:06:32.719529160 +0000
+@@ -198,8 +198,13 @@
+
+ #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
+
++#ifdef CONFIG_SCHED_MUQSS
++/* Limit min_delta to 100us */
++#define MIN_DELTA_LIMIT (NSEC_PER_SEC / 10000)
++#else
+ /* Limit min_delta to a jiffie */
+ #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
++#endif
+
+ /**
+ * clockevents_increase_min_delta - raise minimum delta of a clock event device
+diff -Nur a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+--- a/kernel/time/posix-cpu-timers.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/time/posix-cpu-timers.c 2018-11-03 16:06:32.719529160 +0000
+@@ -818,7 +818,7 @@
+ tsk_expires->virt_exp = expires;
+
+ tsk_expires->sched_exp = check_timers_list(++timers, firing,
+- tsk->se.sum_exec_runtime);
++ tsk_seruntime(tsk));
+
+ /*
+ * Check for the special case thread timers.
+@@ -828,7 +828,7 @@
+ unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
+
+ if (hard != RLIM_INFINITY &&
+- tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
++ tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
+ /*
+ * At the hard limit, we just die.
+ * No need to calculate anything else now.
+@@ -840,7 +840,7 @@
+ __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
+ return;
+ }
+- if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
++ if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
+ /*
+ * At the soft limit, send a SIGXCPU every second.
+ */
+@@ -1081,7 +1081,7 @@
+ struct task_cputime task_sample;
+
+ task_cputime(tsk, &task_sample.utime, &task_sample.stime);
+- task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
++ task_sample.sum_exec_runtime = tsk_seruntime(tsk);
+ if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
+ return 1;
+ }
+diff -Nur a/kernel/time/timer.c b/kernel/time/timer.c
+--- a/kernel/time/timer.c 2018-11-03 16:00:51.934620967 +0000
++++ b/kernel/time/timer.c 2018-11-03 16:06:32.720529192 +0000
+@@ -1434,7 +1434,7 @@
+ * Check, if the next hrtimer event is before the next timer wheel
+ * event:
+ */
+-static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
++static u64 cmp_next_hrtimer_event(struct timer_base *base, u64 basem, u64 expires)
+ {
+ u64 nextevt = hrtimer_get_next_event();
+
+@@ -1452,6 +1452,9 @@
+ if (nextevt <= basem)
+ return basem;
+
++ if (nextevt < expires && nextevt - basem <= TICK_NSEC)
++ base->is_idle = false;
++
+ /*
+ * Round up to the next jiffie. High resolution timers are
+ * off, so the hrtimers are expired in the tick and we need to
+@@ -1521,7 +1524,7 @@
+ }
+ raw_spin_unlock(&base->lock);
+
+- return cmp_next_hrtimer_event(basem, expires);
++ return cmp_next_hrtimer_event(base, basem, expires);
+ }
+
+ /**
+diff -Nur a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+--- a/kernel/trace/trace_selftest.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/trace/trace_selftest.c 2018-11-03 16:06:32.720529192 +0000
+@@ -1041,10 +1041,15 @@
+ {
+ /* Make this a -deadline thread */
+ static const struct sched_attr attr = {
++#ifdef CONFIG_SCHED_MUQSS
++ /* No deadline on MuQSS, use RR */
++ .sched_policy = SCHED_RR,
++#else
+ .sched_policy = SCHED_DEADLINE,
+ .sched_runtime = 100000ULL,
+ .sched_deadline = 10000000ULL,
+ .sched_period = 10000000ULL
++#endif
+ };
+ struct wakeup_test_data *x = data;
+
diff --git a/sys-kernel/linux-image-redcore-lts/files/0002-BFQ-v8r12-20180404.patch b/sys-kernel/linux-image-redcore-lts/files/0002-BFQ-v8r12-20180404.patch
new file mode 100644
index 00000000..104325d6
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0002-BFQ-v8r12-20180404.patch
@@ -0,0 +1,4611 @@
+From 7bd365a925748767d7ed807e5498f90bae0ebc25 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Tue, 14 Nov 2017 08:28:45 +0100
+Subject: [PATCH 01/23] block, bfq-mq: turn BUG_ON on request-size into WARN_ON
+
+BFQ has many checks of internal and external consistency. One of them
+checks that an I/O request has still sectors to serve, if it happens
+to be retired without being served. If the request has no sector to
+serve, a BUG_ON signals the failure and causes the kernel to
+terminate. Yet, from a crash report by a user [1], this condition may
+happen to hold, in apparently correct functioning, for I/O with a
+CD/DVD.
+
+To address this issue, this commit turns the above BUG_ON into a
+WARN_ON. This commit also adds a companion WARN_ON on request
+insertion into the scheduler.
+
+[1] https://groups.google.com/d/msg/bfq-iosched/DDOTJBroBa4/VyU1zUFtCgAJ
+
+Reported-by: Alexandre Frade <admfrade@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 0c09609a6099..0fc757ae7a42 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1540,6 +1540,8 @@ static void bfq_add_request(struct request *rq)
+
+ BUG_ON(!RQ_BFQQ(rq));
+ BUG_ON(RQ_BFQQ(rq) != bfqq);
++ WARN_ON(blk_rq_sectors(rq) == 0);
++
+ elv_rb_add(&bfqq->sort_list, rq);
+
+ /*
+@@ -4962,7 +4964,7 @@ static void bfq_finish_request(struct request *rq)
+ rq_io_start_time_ns(rq),
+ rq->cmd_flags);
+
+- BUG_ON(blk_rq_sectors(rq) == 0 && !(rq->rq_flags & RQF_STARTED));
++ WARN_ON(blk_rq_sectors(rq) == 0 && !(rq->rq_flags & RQF_STARTED));
+
+ if (likely(rq->rq_flags & RQF_STARTED)) {
+ unsigned long flags;
+
+From 1097d368a20456c88acd75b3184c68df38e8f7b8 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Sun, 12 Nov 2017 22:43:46 +0100
+Subject: [PATCH 02/23] block, bfq-sq, bfq-mq: consider also past I/O in soft
+ real-time detection
+
+BFQ privileges the I/O of soft real-time applications, such as video
+players, to guarantee to these application a high bandwidth and a low
+latency. In this respect, it is not easy to correctly detect when an
+application is soft real-time. A particularly nasty false positive is
+that of an I/O-bound application that occasionally happens to meet all
+requirements to be deemed as soft real-time. After being detected as
+soft real-time, such an application monopolizes the device. Fortunately,
+BFQ will realize soon that the application is actually not soft
+real-time and suspend every privilege. Yet, the application may happen
+again to be wrongly detected as soft real-time, and so on.
+
+As highlighted by our tests, this problem causes BFQ to occasionally
+fail to guarantee a high responsiveness, in the presence of heavy
+background I/O workloads. The reason is that the background workload
+happens to be detected as soft real-time, more or less frequently,
+during the execution of the interactive task under test. To give an
+idea, because of this problem, Libreoffice Writer occasionally takes 8
+seconds, instead of 3, to start up, if there are sequential reads and
+writes in the background, on a Kingston SSDNow V300.
+
+This commit addresses this issue by leveraging the following facts.
+
+The reason why some applications are detected as soft real-time despite
+all BFQ checks to avoid false positives, is simply that, during high
+CPU or storage-device load, I/O-bound applications may happen to do
+I/O slowly enough to meet all soft real-time requirements, and pass
+all BFQ extra checks. Yet, this happens only for limited time periods:
+slow-speed time intervals are usually interspersed between other time
+intervals during which these applications do I/O at a very high speed.
+To exploit these facts, this commit introduces a little change, in the
+detection of soft real-time behavior, to systematically consider also
+the recent past: the higher the speed was in the recent past, the
+later next I/O should arrive for the application to be considered as
+soft real-time. At the beginning of a slow-speed interval, the minimum
+arrival time allowed for the next I/O usually happens to still be so
+high, to fall *after* the end of the slow-speed period itself. As a
+consequence, the application does not risk to be deemed as soft
+real-time during the slow-speed interval. Then, during the next
+high-speed interval, the application cannot, evidently, be deemed as
+soft real-time (exactly because of its speed), and so on.
+
+This extra filtering proved to be rather effective: in the above test,
+the frequency of false positives became so low that the start-up time
+was 3 seconds in all iterations (apart from occasional outliers,
+caused by page-cache-management issues, which are out of the scope of
+this commit, and cannot be solved by an I/O scheduler).
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+---
+ block/bfq-mq-iosched.c | 115 ++++++++++++++++++++++++++++++++++---------------
+ block/bfq-sq-iosched.c | 115 ++++++++++++++++++++++++++++++++++---------------
+ 2 files changed, 162 insertions(+), 68 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 0fc757ae7a42..4d06d900f45e 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -3201,37 +3201,78 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * whereas soft_rt_next_start is set to infinity for applications that do
+ * not.
+ *
+- * Unfortunately, even a greedy application may happen to behave in an
+- * isochronous way if the CPU load is high. In fact, the application may
+- * stop issuing requests while the CPUs are busy serving other processes,
+- * then restart, then stop again for a while, and so on. In addition, if
+- * the disk achieves a low enough throughput with the request pattern
+- * issued by the application (e.g., because the request pattern is random
+- * and/or the device is slow), then the application may meet the above
+- * bandwidth requirement too. To prevent such a greedy application to be
+- * deemed as soft real-time, a further rule is used in the computation of
+- * soft_rt_next_start: soft_rt_next_start must be higher than the current
+- * time plus the maximum time for which the arrival of a request is waited
+- * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
+- * This filters out greedy applications, as the latter issue instead their
+- * next request as soon as possible after the last one has been completed
+- * (in contrast, when a batch of requests is completed, a soft real-time
+- * application spends some time processing data).
++ * Unfortunately, even a greedy (i.e., I/O-bound) application may
++ * happen to meet, occasionally or systematically, both the above
++ * bandwidth and isochrony requirements. This may happen at least in
++ * the following circumstances. First, if the CPU load is high. The
++ * application may stop issuing requests while the CPUs are busy
++ * serving other processes, then restart, then stop again for a while,
++ * and so on. The other circumstances are related to the storage
++ * device: the storage device is highly loaded or reaches a low-enough
++ * throughput with the I/O of the application (e.g., because the I/O
++ * is random and/or the device is slow). In all these cases, the
++ * I/O of the application may be simply slowed down enough to meet
++ * the bandwidth and isochrony requirements. To reduce the probability
++ * that greedy applications are deemed as soft real-time in these
++ * corner cases, a further rule is used in the computation of
++ * soft_rt_next_start: the return value of this function is forced to
++ * be higher than the maximum between the following two quantities.
+ *
+- * Unfortunately, the last filter may easily generate false positives if
+- * only bfqd->bfq_slice_idle is used as a reference time interval and one
+- * or both the following cases occur:
+- * 1) HZ is so low that the duration of a jiffy is comparable to or higher
+- * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
+- * HZ=100.
++ * (a) Current time plus: (1) the maximum time for which the arrival
++ * of a request is waited for when a sync queue becomes idle,
++ * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
++ * postpone for a moment the reason for adding a few extra
++ * jiffies; we get back to it after next item (b). Lower-bounding
++ * the return value of this function with the current time plus
++ * bfqd->bfq_slice_idle tends to filter out greedy applications,
++ * because the latter issue their next request as soon as possible
++ * after the last one has been completed. In contrast, a soft
++ * real-time application spends some time processing data, after a
++ * batch of its requests has been completed.
++ *
++ * (b) Current value of bfqq->soft_rt_next_start. As pointed out
++ * above, greedy applications may happen to meet both the
++ * bandwidth and isochrony requirements under heavy CPU or
++ * storage-device load. In more detail, in these scenarios, these
++ * applications happen, only for limited time periods, to do I/O
++ * slowly enough to meet all the requirements described so far,
++ * including the filtering in above item (a). These slow-speed
++ * time intervals are usually interspersed between other time
++ * intervals during which these applications do I/O at a very high
++ * speed. Fortunately, exactly because of the high speed of the
++ * I/O in the high-speed intervals, the values returned by this
++ * function happen to be so high, near the end of any such
++ * high-speed interval, to be likely to fall *after* the end of
++ * the low-speed time interval that follows. These high values are
++ * stored in bfqq->soft_rt_next_start after each invocation of
++ * this function. As a consequence, if the last value of
++ * bfqq->soft_rt_next_start is constantly used to lower-bound the
++ * next value that this function may return, then, from the very
++ * beginning of a low-speed interval, bfqq->soft_rt_next_start is
++ * likely to be constantly kept so high that any I/O request
++ * issued during the low-speed interval is considered as arriving
++ * to soon for the application to be deemed as soft
++ * real-time. Then, in the high-speed interval that follows, the
++ * application will not be deemed as soft real-time, just because
++ * it will do I/O at a high speed. And so on.
++ *
++ * Getting back to the filtering in item (a), in the following two
++ * cases this filtering might be easily passed by a greedy
++ * application, if the reference quantity was just
++ * bfqd->bfq_slice_idle:
++ * 1) HZ is so low that the duration of a jiffy is comparable to or
++ * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
++ * devices with HZ=100. The time granularity may be so coarse
++ * that the approximation, in jiffies, of bfqd->bfq_slice_idle
++ * is rather lower than the exact value.
+ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
+ * for a while, then suddenly 'jump' by several units to recover the lost
+ * increments. This seems to happen, e.g., inside virtual machines.
+- * To address this issue, we do not use as a reference time interval just
+- * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
+- * particular we add the minimum number of jiffies for which the filter
+- * seems to be quite precise also in embedded systems and KVM/QEMU virtual
+- * machines.
++ * To address this issue, in the filtering in (a) we do not use as a
++ * reference time interval just bfqd->bfq_slice_idle, but
++ * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
++ * minimum number of jiffies for which the filter seems to be quite
++ * precise also in embedded systems and KVM/QEMU virtual machines.
+ */
+ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+@@ -3243,10 +3284,11 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ jiffies_to_msecs(HZ * bfqq->service_from_backlogged /
+ bfqd->bfq_wr_max_softrt_rate));
+
+- return max(bfqq->last_idle_bklogged +
+- HZ * bfqq->service_from_backlogged /
+- bfqd->bfq_wr_max_softrt_rate,
+- jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
++ return max3(bfqq->soft_rt_next_start,
++ bfqq->last_idle_bklogged +
++ HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
+ }
+
+ /**
+@@ -4395,10 +4437,15 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfqq->split_time = bfq_smallest_from_now();
+
+ /*
+- * Set to the value for which bfqq will not be deemed as
+- * soft rt when it becomes backlogged.
++ * To not forget the possibly high bandwidth consumed by a
++ * process/queue in the recent past,
++ * bfq_bfqq_softrt_next_start() returns a value at least equal
++ * to the current value of bfqq->soft_rt_next_start (see
++ * comments on bfq_bfqq_softrt_next_start). Set
++ * soft_rt_next_start to now, to mean that bfqq has consumed
++ * no bandwidth so far.
+ */
+- bfqq->soft_rt_next_start = bfq_greatest_from_now();
++ bfqq->soft_rt_next_start = jiffies;
+
+ /* first request is almost certainly seeky */
+ bfqq->seek_history = 1;
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 4bbd7f4c0154..987dc255c82c 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -3089,37 +3089,78 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * whereas soft_rt_next_start is set to infinity for applications that do
+ * not.
+ *
+- * Unfortunately, even a greedy application may happen to behave in an
+- * isochronous way if the CPU load is high. In fact, the application may
+- * stop issuing requests while the CPUs are busy serving other processes,
+- * then restart, then stop again for a while, and so on. In addition, if
+- * the disk achieves a low enough throughput with the request pattern
+- * issued by the application (e.g., because the request pattern is random
+- * and/or the device is slow), then the application may meet the above
+- * bandwidth requirement too. To prevent such a greedy application to be
+- * deemed as soft real-time, a further rule is used in the computation of
+- * soft_rt_next_start: soft_rt_next_start must be higher than the current
+- * time plus the maximum time for which the arrival of a request is waited
+- * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
+- * This filters out greedy applications, as the latter issue instead their
+- * next request as soon as possible after the last one has been completed
+- * (in contrast, when a batch of requests is completed, a soft real-time
+- * application spends some time processing data).
++ * Unfortunately, even a greedy (i.e., I/O-bound) application may
++ * happen to meet, occasionally or systematically, both the above
++ * bandwidth and isochrony requirements. This may happen at least in
++ * the following circumstances. First, if the CPU load is high. The
++ * application may stop issuing requests while the CPUs are busy
++ * serving other processes, then restart, then stop again for a while,
++ * and so on. The other circumstances are related to the storage
++ * device: the storage device is highly loaded or reaches a low-enough
++ * throughput with the I/O of the application (e.g., because the I/O
++ * is random and/or the device is slow). In all these cases, the
++ * I/O of the application may be simply slowed down enough to meet
++ * the bandwidth and isochrony requirements. To reduce the probability
++ * that greedy applications are deemed as soft real-time in these
++ * corner cases, a further rule is used in the computation of
++ * soft_rt_next_start: the return value of this function is forced to
++ * be higher than the maximum between the following two quantities.
+ *
+- * Unfortunately, the last filter may easily generate false positives if
+- * only bfqd->bfq_slice_idle is used as a reference time interval and one
+- * or both the following cases occur:
+- * 1) HZ is so low that the duration of a jiffy is comparable to or higher
+- * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
+- * HZ=100.
++ * (a) Current time plus: (1) the maximum time for which the arrival
++ * of a request is waited for when a sync queue becomes idle,
++ * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
++ * postpone for a moment the reason for adding a few extra
++ * jiffies; we get back to it after next item (b). Lower-bounding
++ * the return value of this function with the current time plus
++ * bfqd->bfq_slice_idle tends to filter out greedy applications,
++ * because the latter issue their next request as soon as possible
++ * after the last one has been completed. In contrast, a soft
++ * real-time application spends some time processing data, after a
++ * batch of its requests has been completed.
++ *
++ * (b) Current value of bfqq->soft_rt_next_start. As pointed out
++ * above, greedy applications may happen to meet both the
++ * bandwidth and isochrony requirements under heavy CPU or
++ * storage-device load. In more detail, in these scenarios, these
++ * applications happen, only for limited time periods, to do I/O
++ * slowly enough to meet all the requirements described so far,
++ * including the filtering in above item (a). These slow-speed
++ * time intervals are usually interspersed between other time
++ * intervals during which these applications do I/O at a very high
++ * speed. Fortunately, exactly because of the high speed of the
++ * I/O in the high-speed intervals, the values returned by this
++ * function happen to be so high, near the end of any such
++ * high-speed interval, to be likely to fall *after* the end of
++ * the low-speed time interval that follows. These high values are
++ * stored in bfqq->soft_rt_next_start after each invocation of
++ * this function. As a consequence, if the last value of
++ * bfqq->soft_rt_next_start is constantly used to lower-bound the
++ * next value that this function may return, then, from the very
++ * beginning of a low-speed interval, bfqq->soft_rt_next_start is
++ * likely to be constantly kept so high that any I/O request
++ * issued during the low-speed interval is considered as arriving
++ * to soon for the application to be deemed as soft
++ * real-time. Then, in the high-speed interval that follows, the
++ * application will not be deemed as soft real-time, just because
++ * it will do I/O at a high speed. And so on.
++ *
++ * Getting back to the filtering in item (a), in the following two
++ * cases this filtering might be easily passed by a greedy
++ * application, if the reference quantity was just
++ * bfqd->bfq_slice_idle:
++ * 1) HZ is so low that the duration of a jiffy is comparable to or
++ * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
++ * devices with HZ=100. The time granularity may be so coarse
++ * that the approximation, in jiffies, of bfqd->bfq_slice_idle
++ * is rather lower than the exact value.
+ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
+ * for a while, then suddenly 'jump' by several units to recover the lost
+ * increments. This seems to happen, e.g., inside virtual machines.
+- * To address this issue, we do not use as a reference time interval just
+- * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
+- * particular we add the minimum number of jiffies for which the filter
+- * seems to be quite precise also in embedded systems and KVM/QEMU virtual
+- * machines.
++ * To address this issue, in the filtering in (a) we do not use as a
++ * reference time interval just bfqd->bfq_slice_idle, but
++ * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
++ * minimum number of jiffies for which the filter seems to be quite
++ * precise also in embedded systems and KVM/QEMU virtual machines.
+ */
+ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+@@ -3131,10 +3172,11 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ jiffies_to_msecs(HZ * bfqq->service_from_backlogged /
+ bfqd->bfq_wr_max_softrt_rate));
+
+- return max(bfqq->last_idle_bklogged +
+- HZ * bfqq->service_from_backlogged /
+- bfqd->bfq_wr_max_softrt_rate,
+- jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
++ return max3(bfqq->soft_rt_next_start,
++ bfqq->last_idle_bklogged +
++ HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
+ }
+
+ /**
+@@ -4167,10 +4209,15 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfqq->split_time = bfq_smallest_from_now();
+
+ /*
+- * Set to the value for which bfqq will not be deemed as
+- * soft rt when it becomes backlogged.
++ * To not forget the possibly high bandwidth consumed by a
++ * process/queue in the recent past,
++ * bfq_bfqq_softrt_next_start() returns a value at least equal
++ * to the current value of bfqq->soft_rt_next_start (see
++ * comments on bfq_bfqq_softrt_next_start). Set
++ * soft_rt_next_start to now, to mean that bfqq has consumed
++ * no bandwidth so far.
+ */
+- bfqq->soft_rt_next_start = bfq_greatest_from_now();
++ bfqq->soft_rt_next_start = jiffies;
+
+ /* first request is almost certainly seeky */
+ bfqq->seek_history = 1;
+
+From 2a09b505660c81dbb80a5d68c9bc558c326d041f Mon Sep 17 00:00:00 2001
+From: Chiara Bruschi <bruschi.chiara@outlook.it>
+Date: Thu, 7 Dec 2017 09:57:19 +0100
+Subject: [PATCH 03/23] block, bfq-mq: fix occurrences of request
+ prepare/finish methods' old names
+
+Commits 'b01f1fa3bb19' (Port of "blk-mq-sched: unify request prepare
+methods") and 'cc10d2d7d2c1' (Port of "blk-mq-sched: unify request
+finished methods") changed the old names of current bfq_prepare_request
+and bfq_finish_request methods, but left them unchanged elsewhere in
+the code (related comments, part of function name bfq_put_rq_priv_body).
+
+This commit fixes every occurrence of the old names of these methods
+by changing them into the current names.
+
+Fixes: b01f1fa3bb19 (Port of "blk-mq-sched: unify request prepare methods")
+Fixes: cc10d2d7d2c1 (Port of "blk-mq-sched: unify request finished methods")
+Reviewed-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Federico Motta <federico@willer.it>
+Signed-off-by: Chiara Bruschi <bruschi.chiara@outlook.it>
+---
+ block/bfq-mq-iosched.c | 38 +++++++++++++++++++-------------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 4d06d900f45e..8f8d5eccb016 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4018,20 +4018,20 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ /*
+ * TESTING: reset DISP_LIST flag, because: 1)
+ * this rq this request has passed through
+- * get_rq_private, 2) then it will have
+- * put_rq_private invoked on it, and 3) in
+- * put_rq_private we use this flag to check
+- * that put_rq_private is not invoked on
+- * requests for which get_rq_private has been
+- * invoked.
++ * bfq_prepare_request, 2) then it will have
++ * bfq_finish_request invoked on it, and 3) in
++ * bfq_finish_request we use this flag to check
++ * that bfq_finish_request is not invoked on
++ * requests for which bfq_prepare_request has
++ * been invoked.
+ */
+ rq->rq_flags &= ~RQF_DISP_LIST;
+ goto inc_in_driver_start_rq;
+ }
+
+ /*
+- * We exploit the put_rq_private hook to decrement
+- * rq_in_driver, but put_rq_private will not be
++ * We exploit the bfq_finish_request hook to decrement
++ * rq_in_driver, but bfq_finish_request will not be
+ * invoked on this request. So, to avoid unbalance,
+ * just start this request, without incrementing
+ * rq_in_driver. As a negative consequence,
+@@ -4040,14 +4040,14 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ * bfq_schedule_dispatch to be invoked uselessly.
+ *
+ * As for implementing an exact solution, the
+- * put_request hook, if defined, is probably invoked
+- * also on this request. So, by exploiting this hook,
+- * we could 1) increment rq_in_driver here, and 2)
+- * decrement it in put_request. Such a solution would
+- * let the value of the counter be always accurate,
+- * but it would entail using an extra interface
+- * function. This cost seems higher than the benefit,
+- * being the frequency of non-elevator-private
++ * bfq_finish_request hook, if defined, is probably
++ * invoked also on this request. So, by exploiting
++ * this hook, we could 1) increment rq_in_driver here,
++ * and 2) decrement it in bfq_finish_request. Such a
++ * solution would let the value of the counter be
++ * always accurate, but it would entail using an extra
++ * interface function. This cost seems higher than the
++ * benefit, being the frequency of non-elevator-private
+ * requests very low.
+ */
+ goto start_rq;
+@@ -4963,7 +4963,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ }
+ }
+
+-static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
++static void bfq_finish_request_body(struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "put_request_body: allocated %d", bfqq->allocated);
+@@ -5019,7 +5019,7 @@ static void bfq_finish_request(struct request *rq)
+ spin_lock_irqsave(&bfqd->lock, flags);
+
+ bfq_completed_request(bfqq, bfqd);
+- bfq_put_rq_priv_body(bfqq);
++ bfq_finish_request_body(bfqq);
+
+ spin_unlock_irqrestore(&bfqd->lock, flags);
+ } else {
+@@ -5042,7 +5042,7 @@ static void bfq_finish_request(struct request *rq)
+ bfqg_stats_update_io_remove(bfqq_group(bfqq),
+ rq->cmd_flags);
+ }
+- bfq_put_rq_priv_body(bfqq);
++ bfq_finish_request_body(bfqq);
+ }
+
+ rq->elv.priv[0] = NULL;
+
+From 4df19943c3a767df453abea3d2ac3433c3326ce0 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 16 Nov 2017 18:38:13 +0100
+Subject: [PATCH 04/23] block, bfq-sq, bfq-mq: add missing rq_pos_tree update
+ on rq removal
+
+If two processes do I/O close to each other, then BFQ merges the
+bfq_queues associated with these processes, to get a more sequential
+I/O, and thus a higher throughput. In this respect, to detect whether
+two processes are doing I/O close to each other, BFQ keeps a list of
+the head-of-line I/O requests of all active bfq_queues. The list is
+ordered by initial sectors, and implemented through a red-black tree
+(rq_pos_tree).
+
+Unfortunately, the update of the rq_pos_tree was incomplete, because
+the tree was not updated on the removal of the head-of-line I/O
+request of a bfq_queue, in case the queue did not remain empty. This
+commit adds the missing update.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+---
+ block/bfq-mq-iosched.c | 3 +++
+ block/bfq-sq-iosched.c | 3 +++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 8f8d5eccb016..603191c9008f 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1729,6 +1729,9 @@ static void bfq_remove_request(struct request_queue *q,
+ rb_erase(&bfqq->pos_node, bfqq->pos_root);
+ bfqq->pos_root = NULL;
+ }
++ } else {
++ BUG_ON(!bfqq->next_rq);
++ bfq_pos_tree_add_move(bfqd, bfqq);
+ }
+
+ if (rq->cmd_flags & REQ_META) {
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 987dc255c82c..ea90ace79e49 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -1669,6 +1669,9 @@ static void bfq_remove_request(struct request *rq)
+ rb_erase(&bfqq->pos_node, bfqq->pos_root);
+ bfqq->pos_root = NULL;
+ }
++ } else {
++ BUG_ON(!bfqq->next_rq);
++ bfq_pos_tree_add_move(bfqd, bfqq);
+ }
+
+ if (rq->cmd_flags & REQ_META) {
+
+From b844e345140aaea957d84a21d2aa67588b020cd5 Mon Sep 17 00:00:00 2001
+From: Angelo Ruocco <angeloruocco90@gmail.com>
+Date: Mon, 18 Dec 2017 08:28:08 +0100
+Subject: [PATCH 05/23] block, bfq-sq, bfq-mq: check low_latency flag in
+ bfq_bfqq_save_state()
+
+A just-created bfq_queue will certainly be deemed as interactive on
+the arrival of its first I/O request, if the low_latency flag is
+set. Yet, if the queue is merged with another queue on the arrival of
+its first I/O request, it will not have the chance to be flagged as
+interactive. Nevertheless, if the queue is then split soon enough, it
+has to be flagged as interactive after the split.
+
+To handle this early-merge scenario correctly, BFQ saves the state of
+the queue, on the merge, as if the latter had already been deemed
+interactive. So, if the queue is split soon, it will get
+weight-raised, because the previous state of the queue is resumed on
+the split.
+
+Unfortunately, in the act of saving the state of the newly-created
+queue, BFQ doesn't check whether the low_latency flag is set, and this
+causes early-merged queues to be then weight-raised, on queue splits,
+even if low_latency is off. This commit addresses this problem by
+adding the missing check.
+
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 3 ++-
+ block/bfq-sq-iosched.c | 3 ++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 603191c9008f..ff9776c8836a 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -2231,7 +2231,8 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
+ if (unlikely(bfq_bfqq_just_created(bfqq) &&
+- !bfq_bfqq_in_large_burst(bfqq))) {
++ !bfq_bfqq_in_large_burst(bfqq) &&
++ bfqq->bfqd->low_latency)) {
+ /*
+ * bfqq being merged ritgh after being created: bfqq
+ * would have deserved interactive weight raising, but
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index ea90ace79e49..3a2d764e760c 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -2109,7 +2109,8 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
+ if (unlikely(bfq_bfqq_just_created(bfqq) &&
+- !bfq_bfqq_in_large_burst(bfqq))) {
++ !bfq_bfqq_in_large_burst(bfqq) &&
++ bfqq->bfqd->low_latency)) {
+ /*
+ * bfqq being merged ritgh after being created: bfqq
+ * would have deserved interactive weight raising, but
+
+From 4cc6896fe1de2e0b4de151a6e70658f10b9ec2fa Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 27 Oct 2017 11:12:14 +0200
+Subject: [PATCH 06/23] block, bfq-sq, bfq-mq: let a queue be merged only
+ shortly after starting I/O
+
+In BFQ and CFQ, two processes are said to be cooperating if they do
+I/O in such a way that the union of their I/O requests yields a
+sequential I/O pattern. To get such a sequential I/O pattern out of
+the non-sequential pattern of each cooperating process, BFQ and CFQ
+merge the queues associated with these processes. In more detail,
+cooperating processes, and thus their associated queues, usually
+start, or restart, to do I/O shortly after each other. This is the
+case, e.g., for the I/O threads of KVM/QEMU and of the dump
+utility. Basing on this assumption, this commit allows a bfq_queue to
+be merged only during a short time interval (100ms) after it starts,
+or re-starts, to do I/O. This filtering provides two important
+benefits.
+
+First, it greatly reduces the probability that two non-cooperating
+processes have their queues merged by mistake, if they just happen to
+do I/O close to each other for a short time interval. These spurious
+merges cause loss of service guarantees. A low-weight bfq_queue may
+unjustly get more than its expected share of the throughput: if such a
+low-weight queue is merged with a high-weight queue, then the I/O for
+the low-weight queue is served as if the queue had a high weight. This
+may damage other high-weight queues unexpectedly. For instance,
+because of this issue, lxterminal occasionally took 7.5 seconds to
+start, instead of 6.5 seconds, when some sequential readers and
+writers did I/O in the background on a FUJITSU MHX2300BT HDD. The
+reason is that the bfq_queues associated with some of the readers or
+the writers were merged with the high-weight queues of some processes
+that had to do some urgent but little I/O. The readers then exploited
+the inherited high weight for all or most of their I/O, during the
+start-up of terminal. The filtering introduced by this commit
+eliminated any outlier caused by spurious queue merges in our start-up
+time tests.
+
+This filtering also provides a little boost of the throughput
+sustainable by BFQ: 3-4%, depending on the CPU. The reason is that,
+once a bfq_queue cannot be merged any longer, this commit makes BFQ
+stop updating the data needed to handle merging for the queue.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+---
+ block/bfq-mq-iosched.c | 64 +++++++++++++++++++++++++++++++++++++++++---------
+ block/bfq-mq.h | 1 +
+ block/bfq-sched.c | 4 ++++
+ block/bfq-sq-iosched.c | 64 +++++++++++++++++++++++++++++++++++++++++---------
+ block/bfq.h | 2 ++
+ 5 files changed, 113 insertions(+), 22 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index ff9776c8836a..8b17b25a3c30 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -119,6 +119,20 @@ static const int bfq_async_charge_factor = 10;
+ /* Default timeout values, in jiffies, approximating CFQ defaults. */
+ static const int bfq_timeout = (HZ / 8);
+
++/*
++ * Time limit for merging (see comments in bfq_setup_cooperator). Set
++ * to the slowest value that, in our tests, proved to be effective in
++ * removing false positives, while not causing true positives to miss
++ * queue merging.
++ *
++ * As can be deduced from the low time limit below, queue merging, if
++ * successful, happens at the very beggining of the I/O of the involved
++ * cooperating processes, as a consequence of the arrival of the very
++ * first requests from each cooperator. After that, there is very
++ * little chance to find cooperators.
++ */
++static const unsigned long bfq_merge_time_limit = HZ/10;
++
+ static struct kmem_cache *bfq_pool;
+
+ /* Below this threshold (in ns), we consider thinktime immediate. */
+@@ -389,6 +403,13 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
+ return bfqq;
+ }
+
++static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
++{
++ return bfqq->service_from_backlogged > 0 &&
++ time_is_before_jiffies(bfqq->first_IO_time +
++ bfq_merge_time_limit);
++}
++
+ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ {
+ struct rb_node **p, *parent;
+@@ -399,6 +420,14 @@ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfqq->pos_root = NULL;
+ }
+
++ /*
++ * bfqq cannot be merged any longer (see comments in
++ * bfq_setup_cooperator): no point in adding bfqq into the
++ * position tree.
++ */
++ if (bfq_too_late_for_merging(bfqq))
++ return;
++
+ if (bfq_class_idle(bfqq))
+ return;
+ if (!bfqq->next_rq)
+@@ -2081,6 +2110,13 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
+ struct bfq_queue *new_bfqq)
+ {
++ if (bfq_too_late_for_merging(new_bfqq)) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "[%s] too late for bfq%d to be merged",
++ __func__, new_bfqq->pid);
++ return false;
++ }
++
+ if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
+ (bfqq->ioprio_class != new_bfqq->ioprio_class))
+ return false;
+@@ -2149,6 +2185,23 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ {
+ struct bfq_queue *in_service_bfqq, *new_bfqq;
+
++ /*
++ * Prevent bfqq from being merged if it has been created too
++ * long ago. The idea is that true cooperating processes, and
++ * thus their associated bfq_queues, are supposed to be
++ * created shortly after each other. This is the case, e.g.,
++ * for KVM/QEMU and dump I/O threads. Basing on this
++ * assumption, the following filtering greatly reduces the
++ * probability that two non-cooperating processes, which just
++ * happen to do close I/O for some short time interval, have
++ * their queues merged by mistake.
++ */
++ if (bfq_too_late_for_merging(bfqq)) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have looked for coop, but too late");
++ return NULL;
++ }
++
+ if (bfqq->new_bfqq)
+ return bfqq->new_bfqq;
+
+@@ -3338,17 +3391,6 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
+ */
+ slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
+
+- /*
+- * Increase service_from_backlogged before next statement,
+- * because the possible next invocation of
+- * bfq_bfqq_charge_time would likely inflate
+- * entity->service. In contrast, service_from_backlogged must
+- * contain real service, to enable the soft real-time
+- * heuristic to correctly compute the bandwidth consumed by
+- * bfqq.
+- */
+- bfqq->service_from_backlogged += entity->service;
+-
+ /*
+ * As above explained, charge slow (typically seeky) and
+ * timed-out queues with the time and not the service
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 1cb05bb853d2..a5947b203ef2 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -337,6 +337,7 @@ struct bfq_queue {
+ unsigned long wr_start_at_switch_to_srt;
+
+ unsigned long split_time; /* time of last split */
++ unsigned long first_IO_time; /* time of first I/O for this queue */
+ };
+
+ /**
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 616c0692335a..9d261dd428e4 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -939,6 +939,10 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
+ struct bfq_entity *entity = &bfqq->entity;
+ struct bfq_service_tree *st;
+
++ if (!bfqq->service_from_backlogged)
++ bfqq->first_IO_time = jiffies;
++
++ bfqq->service_from_backlogged += served;
+ for_each_entity(entity) {
+ st = bfq_entity_service_tree(entity);
+
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 3a2d764e760c..cd00a41ca35d 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -113,6 +113,20 @@ static const int bfq_async_charge_factor = 10;
+ /* Default timeout values, in jiffies, approximating CFQ defaults. */
+ static const int bfq_timeout = (HZ / 8);
+
++/*
++ * Time limit for merging (see comments in bfq_setup_cooperator). Set
++ * to the slowest value that, in our tests, proved to be effective in
++ * removing false positives, while not causing true positives to miss
++ * queue merging.
++ *
++ * As can be deduced from the low time limit below, queue merging, if
++ * successful, happens at the very beggining of the I/O of the involved
++ * cooperating processes, as a consequence of the arrival of the very
++ * first requests from each cooperator. After that, there is very
++ * little chance to find cooperators.
++ */
++static const unsigned long bfq_merge_time_limit = HZ/10;
++
+ static struct kmem_cache *bfq_pool;
+
+ /* Below this threshold (in ns), we consider thinktime immediate. */
+@@ -351,6 +365,13 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
+ return bfqq;
+ }
+
++static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
++{
++ return bfqq->service_from_backlogged > 0 &&
++ time_is_before_jiffies(bfqq->first_IO_time +
++ bfq_merge_time_limit);
++}
++
+ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ {
+ struct rb_node **p, *parent;
+@@ -361,6 +382,14 @@ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfqq->pos_root = NULL;
+ }
+
++ /*
++ * bfqq cannot be merged any longer (see comments in
++ * bfq_setup_cooperator): no point in adding bfqq into the
++ * position tree.
++ */
++ if (bfq_too_late_for_merging(bfqq))
++ return;
++
+ if (bfq_class_idle(bfqq))
+ return;
+ if (!bfqq->next_rq)
+@@ -1960,6 +1989,13 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
+ struct bfq_queue *new_bfqq)
+ {
++ if (bfq_too_late_for_merging(new_bfqq)) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "[%s] too late for bfq%d to be merged",
++ __func__, new_bfqq->pid);
++ return false;
++ }
++
+ if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
+ (bfqq->ioprio_class != new_bfqq->ioprio_class))
+ return false;
+@@ -2028,6 +2064,23 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ {
+ struct bfq_queue *in_service_bfqq, *new_bfqq;
+
++ /*
++ * Prevent bfqq from being merged if it has been created too
++ * long ago. The idea is that true cooperating processes, and
++ * thus their associated bfq_queues, are supposed to be
++ * created shortly after each other. This is the case, e.g.,
++ * for KVM/QEMU and dump I/O threads. Basing on this
++ * assumption, the following filtering greatly reduces the
++ * probability that two non-cooperating processes, which just
++ * happen to do close I/O for some short time interval, have
++ * their queues merged by mistake.
++ */
++ if (bfq_too_late_for_merging(bfqq)) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have looked for coop, but too late");
++ return NULL;
++ }
++
+ if (bfqq->new_bfqq)
+ return bfqq->new_bfqq;
+
+@@ -3226,17 +3279,6 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
+ */
+ slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
+
+- /*
+- * Increase service_from_backlogged before next statement,
+- * because the possible next invocation of
+- * bfq_bfqq_charge_time would likely inflate
+- * entity->service. In contrast, service_from_backlogged must
+- * contain real service, to enable the soft real-time
+- * heuristic to correctly compute the bandwidth consumed by
+- * bfqq.
+- */
+- bfqq->service_from_backlogged += entity->service;
+-
+ /*
+ * As above explained, charge slow (typically seeky) and
+ * timed-out queues with the time and not the service
+diff --git a/block/bfq.h b/block/bfq.h
+index 47cd4d5a8c32..59539adc00a5 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -329,6 +329,8 @@ struct bfq_queue {
+ unsigned long wr_start_at_switch_to_srt;
+
+ unsigned long split_time; /* time of last split */
++
++ unsigned long first_IO_time; /* time of first I/O for this queue */
+ };
+
+ /**
+
+From 157f39c43ab182280634cd4f6335d0187b3741a0 Mon Sep 17 00:00:00 2001
+From: Angelo Ruocco <angeloruocco90@gmail.com>
+Date: Mon, 11 Dec 2017 14:19:54 +0100
+Subject: [PATCH 07/23] block, bfq-sq, bfq-mq: remove superfluous check in
+ queue-merging setup
+
+When two or more processes do I/O in a way that the their requests are
+sequential in respect to one another, BFQ merges the bfq_queues associated
+with the processes. This way the overall I/O pattern becomes sequential,
+and thus there is a boost in througput.
+These cooperating processes usually start or restart to do I/O shortly
+after each other. So, in order to avoid merging non-cooperating processes,
+BFQ ensures that none of these queues has been in weight raising for too
+long.
+
+In this respect, from commit "block, bfq-sq, bfq-mq: let a queue be merged
+only shortly after being created", BFQ checks whether any queue (and not
+only weight-raised ones) is doing I/O continuously from too long to be
+merged.
+
+This new additional check makes the first one useless: a queue doing
+I/O from long enough, if being weight-raised, is also a queue in
+weight raising for too long to be merged. Accordingly, this commit
+removes the first check.
+
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.com>
+---
+ block/bfq-mq-iosched.c | 53 ++++----------------------------------------------
+ block/bfq-sq-iosched.c | 53 ++++----------------------------------------------
+ 2 files changed, 8 insertions(+), 98 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 8b17b25a3c30..f5db8613a70f 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -2140,20 +2140,6 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
+ return true;
+ }
+
+-/*
+- * If this function returns true, then bfqq cannot be merged. The idea
+- * is that true cooperation happens very early after processes start
+- * to do I/O. Usually, late cooperations are just accidental false
+- * positives. In case bfqq is weight-raised, such false positives
+- * would evidently degrade latency guarantees for bfqq.
+- */
+-static bool wr_from_too_long(struct bfq_queue *bfqq)
+-{
+- return bfqq->wr_coeff > 1 &&
+- time_is_before_jiffies(bfqq->last_wr_start_finish +
+- msecs_to_jiffies(100));
+-}
+-
+ /*
+ * Attempt to schedule a merge of bfqq with the currently in-service
+ * queue or with a close queue among the scheduled queues. Return
+@@ -2167,11 +2153,6 @@ static bool wr_from_too_long(struct bfq_queue *bfqq)
+ * to maintain. Besides, in such a critical condition as an out of memory,
+ * the benefits of queue merging may be little relevant, or even negligible.
+ *
+- * Weight-raised queues can be merged only if their weight-raising
+- * period has just started. In fact cooperating processes are usually
+- * started together. Thus, with this filter we avoid false positives
+- * that would jeopardize low-latency guarantees.
+- *
+ * WARNING: queue merging may impair fairness among non-weight raised
+ * queues, for at least two reasons: 1) the original weight of a
+ * merged queue may change during the merged state, 2) even being the
+@@ -2205,15 +2186,7 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ if (bfqq->new_bfqq)
+ return bfqq->new_bfqq;
+
+- if (io_struct && wr_from_too_long(bfqq) &&
+- likely(bfqq != &bfqd->oom_bfqq))
+- bfq_log_bfqq(bfqd, bfqq,
+- "would have looked for coop, but bfq%d wr",
+- bfqq->pid);
+-
+- if (!io_struct ||
+- wr_from_too_long(bfqq) ||
+- unlikely(bfqq == &bfqd->oom_bfqq))
++ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
+ return NULL;
+
+ /* If there is only one backlogged queue, don't search. */
+@@ -2223,17 +2196,8 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ in_service_bfqq = bfqd->in_service_queue;
+
+ if (in_service_bfqq && in_service_bfqq != bfqq &&
+- wr_from_too_long(in_service_bfqq)
+- && likely(in_service_bfqq == &bfqd->oom_bfqq))
+- bfq_log_bfqq(bfqd, bfqq,
+- "would have tried merge with in-service-queue, but wr");
+-
+- if (!in_service_bfqq || in_service_bfqq == bfqq
+- || wr_from_too_long(in_service_bfqq) ||
+- unlikely(in_service_bfqq == &bfqd->oom_bfqq))
+- goto check_scheduled;
+-
+- if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++ likely(in_service_bfqq != &bfqd->oom_bfqq) &&
++ bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
+ bfqq->entity.parent == in_service_bfqq->entity.parent &&
+ bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
+ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
+@@ -2245,21 +2209,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * queues. The only thing we need is that the bio/request is not
+ * NULL, as we need it to establish whether a cooperator exists.
+ */
+-check_scheduled:
+ new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
+ bfq_io_struct_pos(io_struct, request));
+
+ BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent);
+
+- if (new_bfqq && wr_from_too_long(new_bfqq) &&
+- likely(new_bfqq != &bfqd->oom_bfqq) &&
+- bfq_may_be_close_cooperator(bfqq, new_bfqq))
+- bfq_log_bfqq(bfqd, bfqq,
+- "would have merged with bfq%d, but wr",
+- new_bfqq->pid);
+-
+- if (new_bfqq && !wr_from_too_long(new_bfqq) &&
+- likely(new_bfqq != &bfqd->oom_bfqq) &&
++ if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
+ bfq_may_be_close_cooperator(bfqq, new_bfqq))
+ return bfq_setup_merge(bfqq, new_bfqq);
+
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index cd00a41ca35d..d8a358e5e284 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -2019,20 +2019,6 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
+ return true;
+ }
+
+-/*
+- * If this function returns true, then bfqq cannot be merged. The idea
+- * is that true cooperation happens very early after processes start
+- * to do I/O. Usually, late cooperations are just accidental false
+- * positives. In case bfqq is weight-raised, such false positives
+- * would evidently degrade latency guarantees for bfqq.
+- */
+-static bool wr_from_too_long(struct bfq_queue *bfqq)
+-{
+- return bfqq->wr_coeff > 1 &&
+- time_is_before_jiffies(bfqq->last_wr_start_finish +
+- msecs_to_jiffies(100));
+-}
+-
+ /*
+ * Attempt to schedule a merge of bfqq with the currently in-service
+ * queue or with a close queue among the scheduled queues. Return
+@@ -2046,11 +2032,6 @@ static bool wr_from_too_long(struct bfq_queue *bfqq)
+ * to maintain. Besides, in such a critical condition as an out of memory,
+ * the benefits of queue merging may be little relevant, or even negligible.
+ *
+- * Weight-raised queues can be merged only if their weight-raising
+- * period has just started. In fact cooperating processes are usually
+- * started together. Thus, with this filter we avoid false positives
+- * that would jeopardize low-latency guarantees.
+- *
+ * WARNING: queue merging may impair fairness among non-weight raised
+ * queues, for at least two reasons: 1) the original weight of a
+ * merged queue may change during the merged state, 2) even being the
+@@ -2084,15 +2065,7 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ if (bfqq->new_bfqq)
+ return bfqq->new_bfqq;
+
+- if (io_struct && wr_from_too_long(bfqq) &&
+- likely(bfqq != &bfqd->oom_bfqq))
+- bfq_log_bfqq(bfqd, bfqq,
+- "would have looked for coop, but bfq%d wr",
+- bfqq->pid);
+-
+- if (!io_struct ||
+- wr_from_too_long(bfqq) ||
+- unlikely(bfqq == &bfqd->oom_bfqq))
++ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
+ return NULL;
+
+ /* If there is only one backlogged queue, don't search. */
+@@ -2102,17 +2075,8 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ in_service_bfqq = bfqd->in_service_queue;
+
+ if (in_service_bfqq && in_service_bfqq != bfqq &&
+- bfqd->in_service_bic && wr_from_too_long(in_service_bfqq)
+- && likely(in_service_bfqq == &bfqd->oom_bfqq))
+- bfq_log_bfqq(bfqd, bfqq,
+- "would have tried merge with in-service-queue, but wr");
+-
+- if (!in_service_bfqq || in_service_bfqq == bfqq ||
+- !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) ||
+- unlikely(in_service_bfqq == &bfqd->oom_bfqq))
+- goto check_scheduled;
+-
+- if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++ likely(in_service_bfqq != &bfqd->oom_bfqq) &&
++ bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
+ bfqq->entity.parent == in_service_bfqq->entity.parent &&
+ bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
+ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
+@@ -2124,21 +2088,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * queues. The only thing we need is that the bio/request is not
+ * NULL, as we need it to establish whether a cooperator exists.
+ */
+-check_scheduled:
+ new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
+ bfq_io_struct_pos(io_struct, request));
+
+ BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent);
+
+- if (new_bfqq && wr_from_too_long(new_bfqq) &&
+- likely(new_bfqq != &bfqd->oom_bfqq) &&
+- bfq_may_be_close_cooperator(bfqq, new_bfqq))
+- bfq_log_bfqq(bfqd, bfqq,
+- "would have merged with bfq%d, but wr",
+- new_bfqq->pid);
+-
+- if (new_bfqq && !wr_from_too_long(new_bfqq) &&
+- likely(new_bfqq != &bfqd->oom_bfqq) &&
++ if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
+ bfq_may_be_close_cooperator(bfqq, new_bfqq))
+ return bfq_setup_merge(bfqq, new_bfqq);
+
+
+From b82eb91d87f172aba7eb5eb98e8d5e2a621adf51 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 30 Nov 2017 17:48:28 +0100
+Subject: [PATCH 08/23] block, bfq-sq, bfq-mq: increase threshold to deem I/O
+ as random
+
+If two processes do I/O close to each other, i.e., are cooperating
+processes in BFQ (and CFQ'S) nomenclature, then BFQ merges their
+associated bfq_queues, so as to get sequential I/O from the union of
+the I/O requests of the processes, and thus reach a higher
+throughput. A merged queue is then split if its I/O stops being
+sequential. In this respect, BFQ deems the I/O of a bfq_queue as
+(mostly) sequential only if less than 4 I/O requests are random, out
+of the last 32 requests inserted into the queue.
+
+Unfortunately, extensive testing (with the interleaved_io benchmark of
+the S suite [1], and with real applications spawning cooperating
+processes) has clearly shown that, with such a low threshold, only a
+rather low I/O throughput may be reached when several cooperating
+processes do I/O. In particular, the outcome of each test run was
+bimodal: if queue merging occurred and was stable during the test,
+then the throughput was close to the peak rate of the storage device,
+otherwise the throughput was arbitrarily low (usually around 1/10 of
+the peak rate with a rotational device). The probability to get the
+unlucky outcomes grew with the number of cooperating processes: it was
+already significant with 5 processes, and close to one with 7 or more
+processes.
+
+The cause of the low throughput in the unlucky runs was that the
+merged queues containing the I/O of these cooperating processes were
+soon split, because they contained more random I/O requests than those
+tolerated by the 4/32 threshold, but
+- that I/O would have however allowed the storage device to reach
+ peak throughput or almost peak throughput;
+- in contrast, the I/O of these processes, if served individually
+ (from separate queues) yielded a rather low throughput.
+
+So we repeated our tests with increasing values of the threshold,
+until we found the minimum value (19) for which we obtained maximum
+throughput, reliably, with at least up to 9 cooperating
+processes. Then we checked that the use of that higher threshold value
+did not cause any regression for any other benchmark in the suite [1].
+This commit raises the threshold to such a higher value.
+
+[1] https://github.com/Algodev-github/S
+
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 2 +-
+ block/bfq-sq-iosched.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index f5db8613a70f..cb5f49ddecb6 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -145,7 +145,7 @@ static struct kmem_cache *bfq_pool;
+ #define BFQQ_SEEK_THR (sector_t)(8 * 100)
+ #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
+ #define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
+-#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
++#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
+
+ /* Min number of samples required to perform peak-rate update */
+ #define BFQ_RATE_MIN_SAMPLES 32
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index d8a358e5e284..e1c6dc651be1 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -139,7 +139,7 @@ static struct kmem_cache *bfq_pool;
+ #define BFQQ_SEEK_THR (sector_t)(8 * 100)
+ #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
+ #define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
+-#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
++#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
+
+ /* Min number of samples required to perform peak-rate update */
+ #define BFQ_RATE_MIN_SAMPLES 32
+
+From b739dda4e4b3a1cbbc905f86f9fbb0860b068ce7 Mon Sep 17 00:00:00 2001
+From: Chiara Bruschi <bruschi.chiara@outlook.it>
+Date: Mon, 11 Dec 2017 18:55:26 +0100
+Subject: [PATCH 09/23] block, bfq-sq, bfq-mq: specify usage condition of
+ delta_us in bfq_log_bfqq call
+
+Inside the function bfq_completed_request the value of a variable
+called delta_us is computed as current request completion time.
+delta_us is used inside a call to the function bfq_log_bfqq as divisor
+in a division operation to compute a rate value, but no check makes
+sure that delta_us has non-zero value. A divisor with value 0 leads
+to a division error that could result in a kernel oops (therefore
+unstable/unreliable system state) and consequently cause kernel panic
+if resources are unavailable after the system fault.
+
+This commit fixes this call to bfq_log_bfqq specifying the condition
+that allows delta_us to be safely used as divisor.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Chiara Bruschi <bruschi.chiara@outlook.it>
+---
+ block/bfq-mq-iosched.c | 5 ++++-
+ block/bfq-sq-iosched.c | 5 ++++-
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index cb5f49ddecb6..6ce2c0789046 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4904,9 +4904,12 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ bfq_log_bfqq(bfqd, bfqq,
+ "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
++ delta_us > 0 ?
+ (USEC_PER_SEC*
+ (u64)((bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us))
+- >>BFQ_RATE_SHIFT,
++ >>BFQ_RATE_SHIFT :
++ (USEC_PER_SEC*
++ (u64)(bfqd->last_rq_max_size<<BFQ_RATE_SHIFT))>>BFQ_RATE_SHIFT,
+ (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT);
+
+ /*
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index e1c6dc651be1..eff4c4edf5a0 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -4565,9 +4565,12 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
+
+ bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
++ delta_us > 0 ?
+ (USEC_PER_SEC*
+ (u64)((bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us))
+- >>BFQ_RATE_SHIFT,
++ >>BFQ_RATE_SHIFT :
++ (USEC_PER_SEC*
++ (u64)(bfqd->last_rq_max_size<<BFQ_RATE_SHIFT))>>BFQ_RATE_SHIFT,
+ (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT);
+
+ /*
+
+From ae4310c13eca762644734d53074d8456c85e2dec Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Tue, 19 Dec 2017 12:07:12 +0100
+Subject: [PATCH 10/23] block, bfq-mq: limit tags for writes and async I/O
+
+Asynchronous I/O can easily starve synchronous I/O (both sync reads
+and sync writes), by consuming all request tags. Similarly, storms of
+synchronous writes, such as those that sync(2) may trigger, can starve
+synchronous reads. In their turn, these two problems may also cause
+BFQ to loose control on latency for interactive and soft real-time
+applications. For example, on a PLEXTOR PX-256M5S SSD, LibreOffice
+Writer takes 0.6 seconds to start if the device is idle, but it takes
+more than 45 seconds (!) if there are sequential writes in the
+background.
+
+This commit addresses this issue by limiting the maximum percentage of
+tags that asynchronous I/O requests and synchronous write requests can
+consume. In particular, this commit grants a higher threshold to
+synchronous writes, to prevent the latter from being starved by
+asynchronous I/O.
+
+According to the above test, LibreOffice Writer now starts in about
+1.2 seconds on average, regardless of the background workload, and
+apart from some rare outlier. To check this improvement, run, e.g.,
+sudo ./comm_startup_lat.sh bfq-mq 5 5 seq 10 "lowriter --terminate_after_init"
+for the comm_startup_lat benchmark in the S suite [1].
+
+[1] https://github.com/Algodev-github/S
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ block/bfq-mq.h | 12 ++++++++
+ 2 files changed, 89 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 6ce2c0789046..f384f5566672 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -362,6 +362,82 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
+ }
+ }
+
++/*
++ * See the comments on bfq_limit_depth for the purpose of
++ * the depths set in the function.
++ */
++static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
++{
++ bfqd->sb_shift = bt->sb.shift;
++
++ /*
++ * In-word depths if no bfq_queue is being weight-raised:
++ * leaving 25% of tags only for sync reads.
++ *
++ * In next formulas, right-shift the value
++ * (1U<<bfqd->sb_shift), instead of computing directly
++ * (1U<<(bfqd->sb_shift - something)), to be robust against
++ * any possible value of bfqd->sb_shift, without having to
++ * limit 'something'.
++ */
++ /* no more than 50% of tags for async I/O */
++ bfqd->word_depths[0][0] = max((1U<<bfqd->sb_shift)>>1, 1U);
++ /*
++ * no more than 75% of tags for sync writes (25% extra tags
++ * w.r.t. async I/O, to prevent async I/O from starving sync
++ * writes)
++ */
++ bfqd->word_depths[0][1] = max(((1U<<bfqd->sb_shift) * 3)>>2, 1U);
++
++ /*
++ * In-word depths in case some bfq_queue is being weight-
++ * raised: leaving ~63% of tags for sync reads. This is the
++ * highest percentage for which, in our tests, application
++ * start-up times didn't suffer from any regression due to tag
++ * shortage.
++ */
++ /* no more than ~18% of tags for async I/O */
++ bfqd->word_depths[1][0] = max(((1U<<bfqd->sb_shift) * 3)>>4, 1U);
++ /* no more than ~37% of tags for sync writes (~20% extra tags) */
++ bfqd->word_depths[1][1] = max(((1U<<bfqd->sb_shift) * 6)>>4, 1U);
++}
++
++/*
++ * Async I/O can easily starve sync I/O (both sync reads and sync
++ * writes), by consuming all tags. Similarly, storms of sync writes,
++ * such as those that sync(2) may trigger, can starve sync reads.
++ * Limit depths of async I/O and sync writes so as to counter both
++ * problems.
++ */
++static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
++{
++ struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
++ struct bfq_data *bfqd = data->q->elevator->elevator_data;
++ struct sbitmap_queue *bt;
++
++ if (op_is_sync(op) && !op_is_write(op))
++ return;
++
++ if (data->flags & BLK_MQ_REQ_RESERVED) {
++ if (unlikely(!tags->nr_reserved_tags)) {
++ WARN_ON_ONCE(1);
++ return;
++ }
++ bt = &tags->breserved_tags;
++ } else
++ bt = &tags->bitmap_tags;
++
++ if (unlikely(bfqd->sb_shift != bt->sb.shift))
++ bfq_update_depths(bfqd, bt);
++
++ data->shallow_depth =
++ bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
++
++ bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
++ __func__, bfqd->wr_busy_queues, op_is_sync(op),
++ data->shallow_depth);
++}
++
+ static struct bfq_queue *
+ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
+ sector_t sector, struct rb_node **ret_parent,
+@@ -5812,6 +5888,7 @@ static struct elv_fs_entry bfq_attrs[] = {
+
+ static struct elevator_type iosched_bfq_mq = {
+ .ops.mq = {
++ .limit_depth = bfq_limit_depth,
+ .prepare_request = bfq_prepare_request,
+ .finish_request = bfq_finish_request,
+ .exit_icq = bfq_exit_icq,
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index a5947b203ef2..458099ee0308 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -619,6 +619,18 @@ struct bfq_data {
+ struct bfq_queue *bio_bfqq;
+ /* Extra flag used only for TESTING */
+ bool bio_bfqq_set;
++
++ /*
++ * Cached sbitmap shift, used to compute depth limits in
++ * bfq_update_depths.
++ */
++ unsigned int sb_shift;
++
++ /*
++ * Depth limits used in bfq_limit_depth (see comments on the
++ * function)
++ */
++ unsigned int word_depths[2][2];
+ };
+
+ enum bfqq_state_flags {
+
+From 402e5f6b59662d290ab2b3c10b0016207a63ad21 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 21 Dec 2017 15:51:39 +0100
+Subject: [PATCH 11/23] bfq-sq, bfq-mq: limit sectors served with interactive
+ weight raising
+
+To maximise responsiveness, BFQ raises the weight, and performs device
+idling, for bfq_queues associated with processes deemed as
+interactive. In particular, weight raising has a maximum duration,
+equal to the time needed to start a large application. If a
+weight-raised process goes on doing I/O beyond this maximum duration,
+it loses weight-raising.
+
+This mechanism is evidently vulnerable to the following false
+positives: I/O-bound applications that will go on doing I/O for much
+longer than the duration of weight-raising. These applications have
+basically no benefit from being weight-raised at the beginning of
+their I/O. On the opposite end, while being weight-raised, these
+applications
+a) unjustly steal throughput to applications that may truly need
+low latency;
+b) make BFQ uselessly perform device idling; device idling results
+in loss of device throughput with most flash-based storage, and may
+increase latencies when used purposelessly.
+
+This commit adds a countermeasure to reduce both the above
+problems. To introduce this countermeasure, we provide the following
+extra piece of information (full details in the comments added by this
+commit). During the start-up of the large application used as a
+reference to set the duration of weight-raising, involved processes
+transfer at most ~110K sectors each. Accordingly, a process initially
+deemed as interactive has no right to be weight-raised any longer,
+once transferred 110K sectors or more.
+
+Basing on this consideration, this commit early-ends weight-raising
+for a bfq_queue if the latter happens to have received an amount of
+service at least equal to 110K sectors (actually, a little bit more,
+to keep a safety margin). I/O-bound applications that reach a high
+throughput, such as file copy, get to this threshold much before the
+allowed weight-raising period finishes. Thus this early ending of
+weight-raising reduces the amount of time during which these
+applications cause the problems described above.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 84 ++++++++++++++++++++++++++++++++++++++++++++------
+ block/bfq-mq.h | 5 +++
+ block/bfq-sched.c | 3 ++
+ block/bfq-sq-iosched.c | 84 ++++++++++++++++++++++++++++++++++++++++++++------
+ block/bfq.h | 5 +++
+ 5 files changed, 163 insertions(+), 18 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index f384f5566672..63fdd16dec3c 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -162,15 +162,17 @@ static struct kmem_cache *bfq_pool;
+ * interactive applications automatically, using the following formula:
+ * duration = (R / r) * T, where r is the peak rate of the device, and
+ * R and T are two reference parameters.
+- * In particular, R is the peak rate of the reference device (see below),
+- * and T is a reference time: given the systems that are likely to be
+- * installed on the reference device according to its speed class, T is
+- * about the maximum time needed, under BFQ and while reading two files in
+- * parallel, to load typical large applications on these systems.
+- * In practice, the slower/faster the device at hand is, the more/less it
+- * takes to load applications with respect to the reference device.
+- * Accordingly, the longer/shorter BFQ grants weight raising to interactive
+- * applications.
++ * In particular, R is the peak rate of the reference device (see
++ * below), and T is a reference time: given the systems that are
++ * likely to be installed on the reference device according to its
++ * speed class, T is about the maximum time needed, under BFQ and
++ * while reading two files in parallel, to load typical large
++ * applications on these systems (see the comments on
++ * max_service_from_wr below, for more details on how T is obtained).
++ * In practice, the slower/faster the device at hand is, the more/less
++ * it takes to load applications with respect to the reference device.
++ * Accordingly, the longer/shorter BFQ grants weight raising to
++ * interactive applications.
+ *
+ * BFQ uses four different reference pairs (R, T), depending on:
+ * . whether the device is rotational or non-rotational;
+@@ -207,6 +209,60 @@ static int T_slow[2];
+ static int T_fast[2];
+ static int device_speed_thresh[2];
+
++/*
++ * BFQ uses the above-detailed, time-based weight-raising mechanism to
++ * privilege interactive tasks. This mechanism is vulnerable to the
++ * following false positives: I/O-bound applications that will go on
++ * doing I/O for much longer than the duration of weight
++ * raising. These applications have basically no benefit from being
++ * weight-raised at the beginning of their I/O. On the opposite end,
++ * while being weight-raised, these applications
++ * a) unjustly steal throughput to applications that may actually need
++ * low latency;
++ * b) make BFQ uselessly perform device idling; device idling results
++ * in loss of device throughput with most flash-based storage, and may
++ * increase latencies when used purposelessly.
++ *
++ * BFQ tries to reduce these problems, by adopting the following
++ * countermeasure. To introduce this countermeasure, we need first to
++ * finish explaining how the duration of weight-raising for
++ * interactive tasks is computed.
++ *
++ * For a bfq_queue deemed as interactive, the duration of weight
++ * raising is dynamically adjusted, as a function of the estimated
++ * peak rate of the device, so as to be equal to the time needed to
++ * execute the 'largest' interactive task we benchmarked so far. By
++ * largest task, we mean the task for which each involved process has
++ * to do more I/O than for any of the other tasks we benchmarked. This
++ * reference interactive task is the start-up of LibreOffice Writer,
++ * and in this task each process/bfq_queue needs to have at most ~110K
++ * sectors transferred.
++ *
++ * This last piece of information enables BFQ to reduce the actual
++ * duration of weight-raising for at least one class of I/O-bound
++ * applications: those doing sequential or quasi-sequential I/O. An
++ * example is file copy. In fact, once started, the main I/O-bound
++ * processes of these applications usually consume the above 110K
++ * sectors in much less time than the processes of an application that
++ * is starting, because these I/O-bound processes will greedily devote
++ * almost all their CPU cycles only to their target,
++ * throughput-friendly I/O operations. This is even more true if BFQ
++ * happens to be underestimating the device peak rate, and thus
++ * overestimating the duration of weight raising. But, according to
++ * our measurements, once transferred 110K sectors, these processes
++ * have no right to be weight-raised any longer.
++ *
++ * Basing on the last consideration, BFQ ends weight-raising for a
++ * bfq_queue if the latter happens to have received an amount of
++ * service at least equal to the following constant. The constant is
++ * set to slightly more than 110K, to have a minimum safety margin.
++ *
++ * This early ending of weight-raising reduces the amount of time
++ * during which interactive false positives cause the two problems
++ * described at the beginning of these comments.
++ */
++static const unsigned long max_service_from_wr = 120000;
++
+ #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
+
+@@ -1361,6 +1417,7 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ if (old_wr_coeff == 1 && wr_or_deserves_wr) {
+ /* start a weight-raising period */
+ if (interactive) {
++ bfqq->service_from_wr = 0;
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+ } else {
+@@ -3980,6 +4037,15 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ "back to interactive wr");
+ }
+ }
++ if (bfqq->wr_coeff > 1 &&
++ bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
++ bfqq->service_from_wr > max_service_from_wr) {
++ /* see comments on max_service_from_wr */
++ bfq_bfqq_end_wr(bfqq);
++ bfq_log_bfqq(bfqd, bfqq,
++ "[%s] too much service",
++ __func__);
++ }
+ }
+ /*
+ * To improve latency (for this or other queues), immediately
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 458099ee0308..9a5ce1168ff5 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -331,6 +331,11 @@ struct bfq_queue {
+ * last transition from idle to backlogged.
+ */
+ unsigned long service_from_backlogged;
++ /*
++ * Cumulative service received from the @bfq_queue since its
++ * last transition to weight-raised state.
++ */
++ unsigned long service_from_wr;
+ /*
+ * Value of wr start time when switching to soft rt
+ */
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 9d261dd428e4..4e6c5232e2fb 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -942,6 +942,9 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
+ if (!bfqq->service_from_backlogged)
+ bfqq->first_IO_time = jiffies;
+
++ if (bfqq->wr_coeff > 1)
++ bfqq->service_from_wr += served;
++
+ bfqq->service_from_backlogged += served;
+ for_each_entity(entity) {
+ st = bfq_entity_service_tree(entity);
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index eff4c4edf5a0..486493aafaf8 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -156,15 +156,17 @@ static struct kmem_cache *bfq_pool;
+ * interactive applications automatically, using the following formula:
+ * duration = (R / r) * T, where r is the peak rate of the device, and
+ * R and T are two reference parameters.
+- * In particular, R is the peak rate of the reference device (see below),
+- * and T is a reference time: given the systems that are likely to be
+- * installed on the reference device according to its speed class, T is
+- * about the maximum time needed, under BFQ and while reading two files in
+- * parallel, to load typical large applications on these systems.
+- * In practice, the slower/faster the device at hand is, the more/less it
+- * takes to load applications with respect to the reference device.
+- * Accordingly, the longer/shorter BFQ grants weight raising to interactive
+- * applications.
++ * In particular, R is the peak rate of the reference device (see
++ * below), and T is a reference time: given the systems that are
++ * likely to be installed on the reference device according to its
++ * speed class, T is about the maximum time needed, under BFQ and
++ * while reading two files in parallel, to load typical large
++ * applications on these systems (see the comments on
++ * max_service_from_wr below, for more details on how T is obtained).
++ * In practice, the slower/faster the device at hand is, the more/less
++ * it takes to load applications with respect to the reference device.
++ * Accordingly, the longer/shorter BFQ grants weight raising to
++ * interactive applications.
+ *
+ * BFQ uses four different reference pairs (R, T), depending on:
+ * . whether the device is rotational or non-rotational;
+@@ -201,6 +203,60 @@ static int T_slow[2];
+ static int T_fast[2];
+ static int device_speed_thresh[2];
+
++/*
++ * BFQ uses the above-detailed, time-based weight-raising mechanism to
++ * privilege interactive tasks. This mechanism is vulnerable to the
++ * following false positives: I/O-bound applications that will go on
++ * doing I/O for much longer than the duration of weight
++ * raising. These applications have basically no benefit from being
++ * weight-raised at the beginning of their I/O. On the opposite end,
++ * while being weight-raised, these applications
++ * a) unjustly steal throughput to applications that may actually need
++ * low latency;
++ * b) make BFQ uselessly perform device idling; device idling results
++ * in loss of device throughput with most flash-based storage, and may
++ * increase latencies when used purposelessly.
++ *
++ * BFQ tries to reduce these problems, by adopting the following
++ * countermeasure. To introduce this countermeasure, we need first to
++ * finish explaining how the duration of weight-raising for
++ * interactive tasks is computed.
++ *
++ * For a bfq_queue deemed as interactive, the duration of weight
++ * raising is dynamically adjusted, as a function of the estimated
++ * peak rate of the device, so as to be equal to the time needed to
++ * execute the 'largest' interactive task we benchmarked so far. By
++ * largest task, we mean the task for which each involved process has
++ * to do more I/O than for any of the other tasks we benchmarked. This
++ * reference interactive task is the start-up of LibreOffice Writer,
++ * and in this task each process/bfq_queue needs to have at most ~110K
++ * sectors transfered.
++ *
++ * This last piece of information enables BFQ to reduce the actual
++ * duration of weight-raising for at least one class of I/O-bound
++ * applications: those doing sequential or quasi-sequential I/O. An
++ * example is file copy. In fact, once started, the main I/O-bound
++ * processes of these applications usually consume the above 110K
++ * sectors in much less time than the processes of an application that
++ * is starting, because these I/O-bound processes will greedily devote
++ * almost all their CPU cycles only to their target,
++ * throughput-friendly I/O operations. This is even more true if BFQ
++ * happens to be underestimating the device peak rate, and thus
++ * overestimating the duration of weight raising. But, according to
++ * our measurements, once transferred 110K sectors, these processes
++ * have no right to be weight-raised any longer.
++ *
++ * Basing on the last consideration, BFQ ends weight-raising for a
++ * bfq_queue if the latter happens to have received an amount of
++ * service at least equal to the following constant. The constant is
++ * set to slightly more than 110K, to have a minimum safety margin.
++ *
++ * This early ending of weight-raising reduces the amount of time
++ * during which interactive false positives cause the two problems
++ * described at the beginning of these comments.
++ */
++static const unsigned long max_service_from_wr = 120000;
++
+ #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
+
+@@ -1246,6 +1302,7 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ if (old_wr_coeff == 1 && wr_or_deserves_wr) {
+ /* start a weight-raising period */
+ if (interactive) {
++ bfqq->service_from_wr = 0;
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+ } else {
+@@ -3794,6 +3851,15 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ "back to interactive wr");
+ }
+ }
++ if (bfqq->wr_coeff > 1 &&
++ bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
++ bfqq->service_from_wr > max_service_from_wr) {
++ /* see comments on max_service_from_wr */
++ bfq_bfqq_end_wr(bfqq);
++ bfq_log_bfqq(bfqd, bfqq,
++ "[%s] too much service",
++ __func__);
++ }
+ }
+ /*
+ * To improve latency (for this or other queues), immediately
+diff --git a/block/bfq.h b/block/bfq.h
+index 59539adc00a5..0cd7a3f251a7 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -323,6 +323,11 @@ struct bfq_queue {
+ * last transition from idle to backlogged.
+ */
+ unsigned long service_from_backlogged;
++ /*
++ * Cumulative service received from the @bfq_queue since its
++ * last transition to weight-raised state.
++ */
++ unsigned long service_from_wr;
+ /*
+ * Value of wr start time when switching to soft rt
+ */
+
+From 59efebb94b2f9bac653faf62dadb45b83bd27fa7 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 4 Jan 2018 16:29:58 +0100
+Subject: [PATCH 12/23] bfq-sq, bfq-mq: put async queues for root bfq groups
+ too
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+For each pair [device for which bfq is selected as I/O scheduler,
+group in blkio/io], bfq maintains a corresponding bfq group. Each such
+bfq group contains a set of async queues, with each async queue
+created on demand, i.e., when some I/O request arrives for it. On
+creation, an async queue gets an extra reference, to make sure that
+the queue is not freed as long as its bfq group exists. Accordingly,
+to allow the queue to be freed after the group exited, this extra
+reference must released on group exit.
+
+The above holds also for a bfq root group, i.e., for the bfq group
+corresponding to the root blkio/io root for a given device. Yet, by
+mistake, the references to the existing async queues of a root group
+are not released when the latter exits. This causes a memory leak when
+the instance of bfq for a given device exits. In a similar vein,
+bfqg_stats_xfer_dead is not executed for a root group.
+
+This commit fixes bfq_pd_offline so that the latter executes the above
+missing operations for a root group too.
+
+Reported-by: Holger Hoffstätte <holger@applied-asynchrony.com>
+Reported-by: Guoqing Jiang <gqjiang@suse.com>
+Signed-off-by: Davide Ferrari <davideferrari8@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 562b0ce581a7..45fefb2e2d57 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -885,13 +885,13 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+
+ entity = bfqg->my_entity;
+
+- if (!entity) /* root group */
+- return;
+-
+ #ifdef BFQ_MQ
+ spin_lock_irqsave(&bfqd->lock, flags);
+ #endif
+
++ if (!entity) /* root group */
++ goto put_async_queues;
++
+ /*
+ * Empty all service_trees belonging to this group before
+ * deactivating the group itself.
+@@ -926,6 +926,8 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ BUG_ON(bfqg->sched_data.in_service_entity);
+
+ __bfq_deactivate_entity(entity, false);
++
++put_async_queues:
+ bfq_put_async_queues(bfqd, bfqg);
+
+ #ifdef BFQ_MQ
+
+From 2dfbaaaf95054e2da3ededc0deb1ba5a4f589e53 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 8 Jan 2018 19:38:45 +0100
+Subject: [PATCH 13/23] bfq-sq, bfq-mq: release oom-queue ref to root group on
+ exit
+
+On scheduler init, a reference to the root group, and a reference to
+its corresponding blkg are taken for the oom queue. Yet these
+references are not released on scheduler exit, which prevents these
+objects from be freed. This commit adds the missing reference
+releases.
+
+Reported-by: Davide Ferrari <davideferrari8@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 3 +++
+ block/bfq-sq-iosched.c | 3 +++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 63fdd16dec3c..b82c52fabf91 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -5507,6 +5507,9 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
++ /* release oom-queue reference to root group */
++ bfqg_and_blkg_put(bfqd->root_group);
++
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
+ #else
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 486493aafaf8..851af055664d 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -5052,6 +5052,9 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
++ /* release oom-queue reference to root group */
++ bfqg_put(bfqd->root_group);
++
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
+ #else
+
+From 13efe00c8292d78d223e1090a7f36426e360eb38 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 8 Jan 2018 19:40:38 +0100
+Subject: [PATCH 14/23] block, bfq-sq, bfq-mq: trace get and put of bfq groups
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 15 +++++++++++++++
+ block/bfq-mq-iosched.c | 3 ++-
+ 2 files changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 45fefb2e2d57..f94743fb2e7d 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -267,6 +267,8 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
+
+ static void bfqg_get(struct bfq_group *bfqg)
+ {
++ trace_printk("bfqg %p\n", bfqg);
++
+ #ifdef BFQ_MQ
+ bfqg->ref++;
+ #else
+@@ -280,6 +282,9 @@ static void bfqg_put(struct bfq_group *bfqg)
+ bfqg->ref--;
+
+ BUG_ON(bfqg->ref < 0);
++ trace_printk("putting bfqg %p %s\n", bfqg,
++ bfqg->ref == 0 ? "and freeing it" : "");
++
+ if (bfqg->ref == 0)
+ kfree(bfqg);
+ #else
+@@ -293,6 +298,7 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
+ /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
+ bfqg_get(bfqg);
+
++ trace_printk("getting blkg for bfqg %p\n", bfqg);
+ blkg_get(bfqg_to_blkg(bfqg));
+ }
+
+@@ -300,6 +306,7 @@ static void bfqg_and_blkg_put(struct bfq_group *bfqg)
+ {
+ bfqg_put(bfqg);
+
++ trace_printk("putting blkg for bfqg %p\n", bfqg);
+ blkg_put(bfqg_to_blkg(bfqg));
+ }
+ #endif
+@@ -382,6 +389,8 @@ static void bfq_init_entity(struct bfq_entity *entity,
+ * Make sure that bfqg and its associated blkg do not
+ * disappear before entity.
+ */
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] getting bfqg %p and blkg\n", __func__, bfqg);
++
+ bfqg_and_blkg_get(bfqg);
+ #else
+ bfqg_get(bfqg);
+@@ -475,6 +484,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
+ kfree(bfqg);
+ return NULL;
+ }
++ trace_printk("bfqg %p\n", bfqg);
+
+ #ifdef BFQ_MQ
+ /* see comments in bfq_bic_update_cgroup for why refcounting */
+@@ -513,6 +523,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
+ static void bfq_pd_free(struct blkg_policy_data *pd)
+ {
+ struct bfq_group *bfqg = pd_to_bfqg(pd);
++ trace_printk("bfqg %p\n", bfqg);
+
+ bfqg_stats_exit(&bfqg->stats);
+ #ifdef BFQ_MQ
+@@ -650,6 +661,8 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
+ }
+ #ifdef BFQ_MQ
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] putting blkg and bfqg %p\n", __func__, bfqg);
++
+ bfqg_and_blkg_put(bfqq_group(bfqq));
+ #else
+ bfqg_put(bfqq_group(bfqq));
+@@ -658,6 +671,8 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ entity->parent = bfqg->my_entity;
+ entity->sched_data = &bfqg->sched_data;
+ #ifdef BFQ_MQ
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] getting blkg and bfqg %p\n", __func__, bfqg);
++
+ /* pin down bfqg and its associated blkg */
+ bfqg_and_blkg_get(bfqg);
+ #else
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index b82c52fabf91..d5b7a6b985d7 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4385,10 +4385,11 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ if (bfqq->bfqd)
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+
+- kmem_cache_free(bfq_pool, bfqq);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] putting blkg and bfqg %p\n", __func__, bfqg);
+ bfqg_and_blkg_put(bfqg);
+ #endif
++ kmem_cache_free(bfq_pool, bfqq);
+ }
+
+ static void bfq_put_cooperator(struct bfq_queue *bfqq)
+
+From 816b77fba966171974eb5ee25d81bc4e19eaf1b4 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 10 Jan 2018 09:08:22 +0100
+Subject: [PATCH 15/23] bfq-sq, bfq-mq: compile group put for oom queue only if
+ BFQ_GROUP_IOSCHED is set
+
+Commit ("bfq-sq, bfq-mq: release oom-queue ref to root group on exit")
+added a missing put of the root bfq group for the oom queue. That put
+has to be, and can be, performed only if CONFIG_BFQ_GROUP_IOSCHED is
+defined: the function doing the put is even not defined at all if
+CONFIG_BFQ_GROUP_IOSCHED is not defined. But that commit makes that
+put be invoked regardless of whether CONFIG_BFQ_GROUP_IOSCHED is
+defined. This commit fixes this mistake, by making that invocation be
+compiled only if CONFIG_BFQ_GROUP_IOSCHED is actually defined.
+
+Fixes ("block, bfq: release oom-queue ref to root group on exit")
+Reported-by: Jan Alexander Steffens <jan.steffens@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 2 +-
+ block/bfq-sq-iosched.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index d5b7a6b985d7..2581fe0f6f2f 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -5508,10 +5508,10 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ /* release oom-queue reference to root group */
+ bfqg_and_blkg_put(bfqd->root_group);
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
+ #else
+ spin_lock_irq(&bfqd->lock);
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 851af055664d..c4df156b1fb4 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -5052,10 +5052,10 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ /* release oom-queue reference to root group */
+ bfqg_put(bfqd->root_group);
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
+ #else
+ bfq_put_async_queues(bfqd, bfqd->root_group);
+
+From 643a89c659172b2c9ae16adfe03af4e3e88e1326 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Sat, 13 Jan 2018 18:48:41 +0100
+Subject: [PATCH 16/23] block, bfq-sq, bfq-mq: remove trace_printks
+
+Commit ("block, bfq-sq, bfq-mq: trace get and put of bfq groups")
+unwisely added some invocations of the function trace_printk, which
+is inappropriate in production kernels. This commit removes those
+invocations.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index f94743fb2e7d..a4f8a03edfc9 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -267,8 +267,6 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
+
+ static void bfqg_get(struct bfq_group *bfqg)
+ {
+- trace_printk("bfqg %p\n", bfqg);
+-
+ #ifdef BFQ_MQ
+ bfqg->ref++;
+ #else
+@@ -282,9 +280,6 @@ static void bfqg_put(struct bfq_group *bfqg)
+ bfqg->ref--;
+
+ BUG_ON(bfqg->ref < 0);
+- trace_printk("putting bfqg %p %s\n", bfqg,
+- bfqg->ref == 0 ? "and freeing it" : "");
+-
+ if (bfqg->ref == 0)
+ kfree(bfqg);
+ #else
+@@ -298,7 +293,6 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
+ /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
+ bfqg_get(bfqg);
+
+- trace_printk("getting blkg for bfqg %p\n", bfqg);
+ blkg_get(bfqg_to_blkg(bfqg));
+ }
+
+@@ -306,7 +300,6 @@ static void bfqg_and_blkg_put(struct bfq_group *bfqg)
+ {
+ bfqg_put(bfqg);
+
+- trace_printk("putting blkg for bfqg %p\n", bfqg);
+ blkg_put(bfqg_to_blkg(bfqg));
+ }
+ #endif
+@@ -484,8 +477,6 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
+ kfree(bfqg);
+ return NULL;
+ }
+- trace_printk("bfqg %p\n", bfqg);
+-
+ #ifdef BFQ_MQ
+ /* see comments in bfq_bic_update_cgroup for why refcounting */
+ bfqg_get(bfqg);
+@@ -523,7 +514,6 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
+ static void bfq_pd_free(struct blkg_policy_data *pd)
+ {
+ struct bfq_group *bfqg = pd_to_bfqg(pd);
+- trace_printk("bfqg %p\n", bfqg);
+
+ bfqg_stats_exit(&bfqg->stats);
+ #ifdef BFQ_MQ
+
+From ce050275e24fecec800f346c09d9494563e9fc8a Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 15 Jan 2018 15:07:05 +0100
+Subject: [PATCH 17/23] block, bfq-mq: add requeue-request hook
+
+Commit 'a6a252e64914 ("blk-mq-sched: decide how to handle flush rq via
+RQF_FLUSH_SEQ")' makes all non-flush re-prepared requests for a device
+be re-inserted into the active I/O scheduler for that device. As a
+consequence, I/O schedulers may get the same request inserted again,
+even several times, without a finish_request invoked on that request
+before each re-insertion.
+
+This fact is the cause of the failure reported in [1]. For an I/O
+scheduler, every re-insertion of the same re-prepared request is
+equivalent to the insertion of a new request. For schedulers like
+mq-deadline or kyber, this fact causes no harm. In contrast, it
+confuses a stateful scheduler like BFQ, which keeps state for an I/O
+request, until the finish_request hook is invoked on the request. In
+particular, BFQ may get stuck, waiting forever for the number of
+request dispatches, of the same request, to be balanced by an equal
+number of request completions (while there will be one completion for
+that request). In this state, BFQ may refuse to serve I/O requests
+from other bfq_queues. The hang reported in [1] then follows.
+
+However, the above re-prepared requests undergo a requeue, thus the
+requeue_request hook of the active elevator is invoked for these
+requests, if set. This commit then addresses the above issue by
+properly implementing the hook requeue_request in BFQ.
+
+[1] https://marc.info/?l=linux-block&m=151211117608676
+
+Reported-by: Ivan Kozik <ivan@ludios.org>
+Reported-by: Alban Browaeys <alban.browaeys@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Serena Ziviani <ziviani.serena@gmail.com>
+---
+ block/bfq-mq-iosched.c | 90 ++++++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 73 insertions(+), 17 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 2581fe0f6f2f..bb7ccc2f1165 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4162,9 +4162,9 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ * TESTING: reset DISP_LIST flag, because: 1)
+ * this rq this request has passed through
+ * bfq_prepare_request, 2) then it will have
+- * bfq_finish_request invoked on it, and 3) in
+- * bfq_finish_request we use this flag to check
+- * that bfq_finish_request is not invoked on
++ * bfq_finish_requeue_request invoked on it, and 3) in
++ * bfq_finish_requeue_request we use this flag to check
++ * that bfq_finish_requeue_request is not invoked on
+ * requests for which bfq_prepare_request has
+ * been invoked.
+ */
+@@ -4173,8 +4173,8 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ }
+
+ /*
+- * We exploit the bfq_finish_request hook to decrement
+- * rq_in_driver, but bfq_finish_request will not be
++ * We exploit the bfq_finish_requeue_request hook to decrement
++ * rq_in_driver, but bfq_finish_requeue_request will not be
+ * invoked on this request. So, to avoid unbalance,
+ * just start this request, without incrementing
+ * rq_in_driver. As a negative consequence,
+@@ -4183,10 +4183,10 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ * bfq_schedule_dispatch to be invoked uselessly.
+ *
+ * As for implementing an exact solution, the
+- * bfq_finish_request hook, if defined, is probably
++ * bfq_finish_requeue_request hook, if defined, is probably
+ * invoked also on this request. So, by exploiting
+ * this hook, we could 1) increment rq_in_driver here,
+- * and 2) decrement it in bfq_finish_request. Such a
++ * and 2) decrement it in bfq_finish_requeue_request. Such a
+ * solution would let the value of the counter be
+ * always accurate, but it would entail using an extra
+ * interface function. This cost seems higher than the
+@@ -4878,6 +4878,8 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ return idle_timer_disabled;
+ }
+
++static void bfq_prepare_request(struct request *rq, struct bio *bio);
++
+ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ bool at_head)
+ {
+@@ -4919,6 +4921,20 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ BUG_ON(!(rq->rq_flags & RQF_GOT));
+ rq->rq_flags &= ~RQF_GOT;
+
++ if (!bfqq) {
++ /*
++ * This should never happen. Most likely rq is
++ * a requeued regular request, being
++ * re-inserted without being first
++ * re-prepared. Do a prepare, to avoid
++ * failure.
++ */
++ pr_warn("Regular request associated with no queue");
++ WARN_ON(1);
++ bfq_prepare_request(rq, rq->bio);
++ bfqq = RQ_BFQQ(rq);
++ }
++
+ #if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ idle_timer_disabled = __bfq_insert_request(bfqd, rq);
+ /*
+@@ -5110,7 +5126,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ }
+ }
+
+-static void bfq_finish_request_body(struct bfq_queue *bfqq)
++static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "put_request_body: allocated %d", bfqq->allocated);
+@@ -5120,7 +5136,13 @@ static void bfq_finish_request_body(struct bfq_queue *bfqq)
+ bfq_put_queue(bfqq);
+ }
+
+-static void bfq_finish_request(struct request *rq)
++/*
++ * Handle either a requeue or a finish for rq. The things to do are
++ * the same in both cases: all references to rq are to be dropped. In
++ * particular, rq is considered completed from the point of view of
++ * the scheduler.
++ */
++static void bfq_finish_requeue_request(struct request *rq)
+ {
+ struct bfq_queue *bfqq;
+ struct bfq_data *bfqd;
+@@ -5128,11 +5150,27 @@ static void bfq_finish_request(struct request *rq)
+
+ BUG_ON(!rq);
+
+- if (!rq->elv.icq)
++ bfqq = RQ_BFQQ(rq);
++
++ /*
++ * Requeue and finish hooks are invoked in blk-mq without
++ * checking whether the involved request is actually still
++ * referenced in the scheduler. To handle this fact, the
++ * following two checks make this function exit in case of
++ * spurious invocations, for which there is nothing to do.
++ *
++ * First, check whether rq has nothing to do with an elevator.
++ */
++ if (unlikely(!(rq->rq_flags & RQF_ELVPRIV)))
+ return;
+
+- bfqq = RQ_BFQQ(rq);
+- BUG_ON(!bfqq);
++ /*
++ * rq either is not associated with any icq, or is an already
++ * requeued request that has not (yet) been re-inserted into
++ * a bfq_queue.
++ */
++ if (!rq->elv.icq || !bfqq)
++ return;
+
+ bic = RQ_BIC(rq);
+ BUG_ON(!bic);
+@@ -5145,7 +5183,6 @@ static void bfq_finish_request(struct request *rq)
+ BUG();
+ }
+ BUG_ON(rq->rq_flags & RQF_QUEUED);
+- BUG_ON(!(rq->rq_flags & RQF_ELVPRIV));
+
+ bfq_log_bfqq(bfqd, bfqq,
+ "putting rq %p with %u sects left, STARTED %d",
+@@ -5166,13 +5203,14 @@ static void bfq_finish_request(struct request *rq)
+ spin_lock_irqsave(&bfqd->lock, flags);
+
+ bfq_completed_request(bfqq, bfqd);
+- bfq_finish_request_body(bfqq);
++ bfq_finish_requeue_request_body(bfqq);
+
+ spin_unlock_irqrestore(&bfqd->lock, flags);
+ } else {
+ /*
+ * Request rq may be still/already in the scheduler,
+- * in which case we need to remove it. And we cannot
++ * in which case we need to remove it (this should
++ * never happen in case of requeue). And we cannot
+ * defer such a check and removal, to avoid
+ * inconsistencies in the time interval from the end
+ * of this function to the start of the deferred work.
+@@ -5189,9 +5227,26 @@ static void bfq_finish_request(struct request *rq)
+ bfqg_stats_update_io_remove(bfqq_group(bfqq),
+ rq->cmd_flags);
+ }
+- bfq_finish_request_body(bfqq);
++ bfq_finish_requeue_request_body(bfqq);
+ }
+
++ /*
++ * Reset private fields. In case of a requeue, this allows
++ * this function to correctly do nothing if it is spuriously
++ * invoked again on this same request (see the check at the
++ * beginning of the function). Probably, a better general
++ * design would be to prevent blk-mq from invoking the requeue
++ * or finish hooks of an elevator, for a request that is not
++ * referred by that elevator.
++ *
++ * Resetting the following fields would break the
++ * request-insertion logic if rq is re-inserted into a bfq
++ * internal queue, without a re-preparation. Here we assume
++ * that re-insertions of requeued requests, without
++ * re-preparation, can happen only for pass_through or at_head
++ * requests (which are not re-inserted into bfq internal
++ * queues).
++ */
+ rq->elv.priv[0] = NULL;
+ rq->elv.priv[1] = NULL;
+ }
+@@ -5960,7 +6015,8 @@ static struct elevator_type iosched_bfq_mq = {
+ .ops.mq = {
+ .limit_depth = bfq_limit_depth,
+ .prepare_request = bfq_prepare_request,
+- .finish_request = bfq_finish_request,
++ .requeue_request = bfq_finish_requeue_request,
++ .finish_request = bfq_finish_requeue_request,
+ .exit_icq = bfq_exit_icq,
+ .insert_requests = bfq_insert_requests,
+ .dispatch_request = bfq_dispatch_request,
+
+From 3e4f292191cc62b3844316b9741534c3f1b36f0a Mon Sep 17 00:00:00 2001
+From: Davide Paganelli <paga.david@gmail.com>
+Date: Thu, 8 Feb 2018 12:19:24 +0100
+Subject: [PATCH 18/23] block, bfq-mq, bfq-sq: make log functions print names
+ of calling functions
+
+Add the macro __func__ as a parameter to the invocations of the functions
+pr_crit, blk_add_trace_msg and blk_add_cgroup_trace_msg in bfq_log*
+functions, in order to include automatically in the log messages
+the names of the functions that call the log functions.
+The programmer can then avoid doing it.
+
+Signed-off-by: Davide Paganelli <paga.david@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 9 +--
+ block/bfq-mq-iosched.c | 167 ++++++++++++++++++++++----------------------
+ block/bfq-mq.h | 33 ++++-----
+ block/bfq-sched.c | 54 +++++++-------
+ block/bfq-sq-iosched.c | 134 +++++++++++++++++------------------
+ block/bfq.h | 33 ++++-----
+ 6 files changed, 214 insertions(+), 216 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index a4f8a03edfc9..613f154e9da5 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -382,7 +382,8 @@ static void bfq_init_entity(struct bfq_entity *entity,
+ * Make sure that bfqg and its associated blkg do not
+ * disappear before entity.
+ */
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] getting bfqg %p and blkg\n", __func__, bfqg);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "getting bfqg %p and blkg\n",
++ bfqg);
+
+ bfqg_and_blkg_get(bfqg);
+ #else
+@@ -651,7 +652,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
+ }
+ #ifdef BFQ_MQ
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] putting blkg and bfqg %p\n", __func__, bfqg);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "putting blkg and bfqg %p\n", bfqg);
+
+ bfqg_and_blkg_put(bfqq_group(bfqq));
+ #else
+@@ -661,7 +662,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ entity->parent = bfqg->my_entity;
+ entity->sched_data = &bfqg->sched_data;
+ #ifdef BFQ_MQ
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] getting blkg and bfqg %p\n", __func__, bfqg);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "getting blkg and bfqg %p\n", bfqg);
+
+ /* pin down bfqg and its associated blkg */
+ bfqg_and_blkg_get(bfqg);
+@@ -721,7 +722,7 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
+ if (entity->sched_data != &bfqg->sched_data) {
+ bic_set_bfqq(bic, NULL, 0);
+ bfq_log_bfqq(bfqd, async_bfqq,
+- "bic_change_group: %p %d",
++ "%p %d",
+ async_bfqq,
+ async_bfqq->ref);
+ bfq_put_queue(async_bfqq);
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index bb7ccc2f1165..edc93b6af186 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -310,7 +310,7 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
+ static void bfq_schedule_dispatch(struct bfq_data *bfqd)
+ {
+ if (bfqd->queued != 0) {
+- bfq_log(bfqd, "schedule dispatch");
++ bfq_log(bfqd, "");
+ blk_mq_run_hw_queues(bfqd->queue, true);
+ }
+ }
+@@ -489,8 +489,8 @@ static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
+ data->shallow_depth =
+ bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
+
+- bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
+- __func__, bfqd->wr_busy_queues, op_is_sync(op),
++ bfq_log(bfqd, "wr_busy %d sync %d depth %u",
++ bfqd->wr_busy_queues, op_is_sync(op),
+ data->shallow_depth);
+ }
+
+@@ -528,7 +528,7 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
+ if (rb_link)
+ *rb_link = p;
+
+- bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++ bfq_log(bfqd, "%llu: returning %d",
+ (unsigned long long) sector,
+ bfqq ? bfqq->pid : 0);
+
+@@ -749,7 +749,7 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
+ if (rq == last || ktime_get_ns() < rq->fifo_time)
+ return NULL;
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "returned %p", rq);
+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
+ return rq;
+ }
+@@ -842,7 +842,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd,
+ bfq_serv_to_charge(next_rq, bfqq));
+ if (entity->budget != new_budget) {
+ entity->budget = new_budget;
+- bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
++ bfq_log_bfqq(bfqd, bfqq, "new budget %lu",
+ new_budget);
+ bfq_requeue_bfqq(bfqd, bfqq, false);
+ }
+@@ -915,8 +915,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
+- __func__,
++ "bic %p wr_coeff %d start_finish %lu max_time %lu",
+ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish,
+ bfqq->wr_cur_max_time);
+
+@@ -929,11 +928,11 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ bfq_wr_duration(bfqd))) {
+ switch_back_to_interactive_wr(bfqq, bfqd);
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "resume state: switching back to interactive");
++ "switching back to interactive");
+ } else {
+ bfqq->wr_coeff = 1;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "resume state: switching off wr (%lu + %lu < %lu)",
++ "switching off wr (%lu + %lu < %lu)",
+ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time,
+ jiffies);
+ }
+@@ -985,7 +984,7 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ /* Increment burst size to take into account also bfqq */
+ bfqd->burst_size++;
+
+- bfq_log_bfqq(bfqd, bfqq, "add_to_burst %d", bfqd->burst_size);
++ bfq_log_bfqq(bfqd, bfqq, "%d", bfqd->burst_size);
+
+ BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh);
+
+@@ -998,7 +997,7 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ * other to consider this burst as large.
+ */
+ bfqd->large_burst = true;
+- bfq_log_bfqq(bfqd, bfqq, "add_to_burst: large burst started");
++ bfq_log_bfqq(bfqd, bfqq, "large burst started");
+
+ /*
+ * We can now mark all queues in the burst list as
+@@ -1170,7 +1169,7 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfqd->large_burst = false;
+ bfq_reset_burst_list(bfqd, bfqq);
+ bfq_log_bfqq(bfqd, bfqq,
+- "handle_burst: late activation or different group");
++ "late activation or different group");
+ goto end;
+ }
+
+@@ -1180,7 +1179,7 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ * bfqq as belonging to this large burst immediately.
+ */
+ if (bfqd->large_burst) {
+- bfq_log_bfqq(bfqd, bfqq, "handle_burst: marked in burst");
++ bfq_log_bfqq(bfqd, bfqq, "marked in burst");
+ bfq_mark_bfqq_in_large_burst(bfqq);
+ goto end;
+ }
+@@ -1686,7 +1685,7 @@ static void bfq_add_request(struct request *rq)
+ unsigned int old_wr_coeff = bfqq->wr_coeff;
+ bool interactive = false;
+
+- bfq_log_bfqq(bfqd, bfqq, "add_request: size %u %s",
++ bfq_log_bfqq(bfqd, bfqq, "size %u %s",
+ blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A");
+
+ if (bfqq->wr_coeff > 1) /* queue is being weight-raised */
+@@ -1952,7 +1951,7 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
+ __rq = bfq_find_rq_fmerge(bfqd, bio, q);
+ if (__rq && elv_bio_merge_ok(__rq, bio)) {
+ *req = __rq;
+- bfq_log(bfqd, "request_merge: req %p", __rq);
++ bfq_log(bfqd, "req %p", __rq);
+
+ return ELEVATOR_FRONT_MERGE;
+ }
+@@ -1989,7 +1988,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ bfqq->next_rq = next_rq;
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "request_merged: req %p prev %p next_rq %p bfqq %p",
++ "req %p prev %p next_rq %p bfqq %p",
+ req, prev, next_rq, bfqq);
+
+ /*
+@@ -2018,7 +2017,7 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+ goto end;
+
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "requests_merged: rq %p next %p bfqq %p next_bfqq %p",
++ "rq %p next %p bfqq %p next_bfqq %p",
+ rq, next, bfqq, next_bfqq);
+
+ spin_lock_irq(&bfqq->bfqd->lock);
+@@ -2069,10 +2068,10 @@ static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
+ */
+ bfqq->entity.prio_changed = 1;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "end_wr: wrais ending at %lu, rais_max_time %u",
++ "wrais ending at %lu, rais_max_time %u",
+ bfqq->last_wr_start_finish,
+ jiffies_to_msecs(bfqq->wr_cur_max_time));
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "end_wr: wr_busy %d",
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "wr_busy %d",
+ bfqq->bfqd->wr_busy_queues);
+ }
+
+@@ -2245,8 +2244,8 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
+ {
+ if (bfq_too_late_for_merging(new_bfqq)) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "[%s] too late for bfq%d to be merged",
+- __func__, new_bfqq->pid);
++ "too late for bfq%d to be merged",
++ new_bfqq->pid);
+ return false;
+ }
+
+@@ -2395,8 +2394,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ }
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
+- __func__,
++ "bic %p wr_coeff %d start_finish %lu max_time %lu",
+ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish,
+ bfqq->wr_cur_max_time);
+ }
+@@ -2453,7 +2451,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+
+ }
+
+- bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
++ bfq_log_bfqq(bfqd, new_bfqq, "wr_busy %d",
+ bfqd->wr_busy_queues);
+
+ /*
+@@ -2554,7 +2552,7 @@ static void bfq_set_budget_timeout(struct bfq_data *bfqd,
+ bfqq->budget_timeout = jiffies +
+ bfqd->bfq_timeout * timeout_coeff;
+
+- bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++ bfq_log_bfqq(bfqd, bfqq, "%u",
+ jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff));
+ }
+
+@@ -2620,10 +2618,10 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
+
+ bfq_set_budget_timeout(bfqd, bfqq);
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_in_service_queue, cur-budget = %d",
++ "cur-budget = %d",
+ bfqq->entity.budget);
+ } else
+- bfq_log(bfqd, "set_in_service_queue: NULL");
++ bfq_log(bfqd, "NULL");
+
+ bfqd->in_service_queue = bfqq;
+ }
+@@ -2746,7 +2744,7 @@ static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq
+ bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
+
+ bfq_log(bfqd,
+- "reset_rate_computation at end, sample %u/%u tot_sects %llu",
++ "at end, sample %u/%u tot_sects %llu",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ bfqd->tot_sectors_dispatched);
+ }
+@@ -2766,7 +2764,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
+ bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) {
+ bfq_log(bfqd,
+- "update_rate_reset: only resetting, delta_first %lluus samples %d",
++ "only resetting, delta_first %lluus samples %d",
+ bfqd->delta_from_first>>10, bfqd->peak_rate_samples);
+ goto reset_computation;
+ }
+@@ -2790,7 +2788,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
+
+ bfq_log(bfqd,
+-"update_rate_reset: tot_sects %llu delta_first %lluus rate %llu sects/s (%d)",
++"tot_sects %llu delta_first %lluus rate %llu sects/s (%d)",
+ bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10,
+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
+ rate > 20<<BFQ_RATE_SHIFT);
+@@ -2805,14 +2803,14 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ rate <= bfqd->peak_rate) ||
+ rate > 20<<BFQ_RATE_SHIFT) {
+ bfq_log(bfqd,
+- "update_rate_reset: goto reset, samples %u/%u rate/peak %llu/%llu",
++ "goto reset, samples %u/%u rate/peak %llu/%llu",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
+ goto reset_computation;
+ } else {
+ bfq_log(bfqd,
+- "update_rate_reset: do update, samples %u/%u rate/peak %llu/%llu",
++ "do update, samples %u/%u rate/peak %llu/%llu",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
+@@ -2868,7 +2866,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ rate /= divisor; /* smoothing constant alpha = 1/divisor */
+
+ bfq_log(bfqd,
+- "update_rate_reset: divisor %d tmp_peak_rate %llu tmp_rate %u",
++ "divisor %d tmp_peak_rate %llu tmp_rate %u",
+ divisor,
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT),
+ (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT));
+@@ -2922,7 +2920,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+
+ if (bfqd->peak_rate_samples == 0) { /* first dispatch */
+ bfq_log(bfqd,
+- "update_peak_rate: goto reset, samples %d",
++ "goto reset, samples %d",
+ bfqd->peak_rate_samples) ;
+ bfq_reset_rate_computation(bfqd, rq);
+ goto update_last_values; /* will add one sample */
+@@ -2943,7 +2941,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
+ bfqd->rq_in_driver == 0) {
+ bfq_log(bfqd,
+-"update_peak_rate: jumping to updating&resetting delta_last %lluus samples %d",
++"jumping to updating&resetting delta_last %lluus samples %d",
+ (now_ns - bfqd->last_dispatch)>>10,
+ bfqd->peak_rate_samples) ;
+ goto update_rate_and_reset;
+@@ -2969,7 +2967,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
+
+ bfq_log(bfqd,
+- "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus",
++ "added samples %u/%u tot_sects %llu delta_first %lluus",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ bfqd->tot_sectors_dispatched,
+ bfqd->delta_from_first>>10);
+@@ -2985,12 +2983,12 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ bfqd->last_dispatch = now_ns;
+
+ bfq_log(bfqd,
+- "update_peak_rate: delta_first %lluus last_pos %llu peak_rate %llu",
++ "delta_first %lluus last_pos %llu peak_rate %llu",
+ (now_ns - bfqd->first_dispatch)>>10,
+ (unsigned long long) bfqd->last_position,
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
+ bfq_log(bfqd,
+- "update_peak_rate: samples at end %d", bfqd->peak_rate_samples);
++ "samples at end %d", bfqd->peak_rate_samples);
+ }
+
+ /*
+@@ -3088,11 +3086,11 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
+ */
+ budget = 2 * min_budget;
+
+- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
++ bfq_log_bfqq(bfqd, bfqq, "last budg %d, budg left %d",
+ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
+- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
++ bfq_log_bfqq(bfqd, bfqq, "last max_budg %d, min budg %d",
+ budget, bfq_min_budget(bfqd));
+- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++ bfq_log_bfqq(bfqd, bfqq, "sync %d, seeky %d",
+ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
+
+ if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
+@@ -3294,7 +3292,7 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ else /* charge at least one seek */
+ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
+
+- bfq_log(bfqd, "bfq_bfqq_is_slow: too short %u", delta_usecs);
++ bfq_log(bfqd, "too short %u", delta_usecs);
+
+ return slow;
+ }
+@@ -3317,11 +3315,11 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * peak rate.
+ */
+ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
+- bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d",
++ bfq_log(bfqd, "relative rate %d/%d",
+ bfqq->entity.service, bfqd->bfq_max_budget);
+ }
+
+- bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
++ bfq_log_bfqq(bfqd, bfqq, "slow %d", slow);
+
+ return slow;
+ }
+@@ -3423,7 +3421,7 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqd, bfqq,
+-"softrt_next_start: service_blkg %lu soft_rate %u sects/sec interval %u",
++"service_blkg %lu soft_rate %u sects/sec interval %u",
+ bfqq->service_from_backlogged,
+ bfqd->bfq_wr_max_softrt_rate,
+ jiffies_to_msecs(HZ * bfqq->service_from_backlogged /
+@@ -3602,7 +3600,7 @@ static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
+ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "may_budget_timeout: wait_request %d left %d timeout %d",
++ "wait_request %d left %d timeout %d",
+ bfq_bfqq_wait_request(bfqq),
+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
+ bfq_bfqq_budget_timeout(bfqq));
+@@ -3863,11 +3861,11 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ * either boosts the throughput (without issues), or is
+ * necessary to preserve service guarantees.
+ */
+- bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d",
++ bfq_log_bfqq(bfqd, bfqq, "sync %d idling_boosts_thr %d",
+ bfq_bfqq_sync(bfqq), idling_boosts_thr);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "may_idle: wr_busy %d boosts %d IO-bound %d guar %d",
++ "wr_busy %d boosts %d IO-bound %d guar %d",
+ bfqd->wr_busy_queues,
+ idling_boosts_thr_without_issues,
+ bfq_bfqq_IO_bound(bfqq),
+@@ -3907,7 +3905,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ if (!bfqq)
+ goto new_queue;
+
+- bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
++ bfq_log_bfqq(bfqd, bfqq, "already in-service queue");
+
+ if (bfq_may_expire_for_budg_timeout(bfqq) &&
+ !bfq_bfqq_wait_request(bfqq) &&
+@@ -3983,14 +3981,14 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ new_queue:
+ bfqq = bfq_set_in_service_queue(bfqd);
+ if (bfqq) {
+- bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
++ bfq_log_bfqq(bfqd, bfqq, "checking new queue");
+ goto check_queue;
+ }
+ keep_queue:
+ if (bfqq)
+- bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
++ bfq_log_bfqq(bfqd, bfqq, "returned this queue");
+ else
+- bfq_log(bfqd, "select_queue: no queue returned");
++ bfq_log(bfqd, "no queue returned");
+
+ return bfqq;
+ }
+@@ -4043,8 +4041,7 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ /* see comments on max_service_from_wr */
+ bfq_bfqq_end_wr(bfqq);
+ bfq_log_bfqq(bfqd, bfqq,
+- "[%s] too much service",
+- __func__);
++ "too much service");
+ }
+ }
+ /*
+@@ -4122,7 +4119,7 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
+ {
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+
+- bfq_log(bfqd, "has_work, dispatch_non_empty %d busy_queues %d",
++ bfq_log(bfqd, "dispatch_non_empty %d busy_queues %d",
+ !list_empty_careful(&bfqd->dispatch), bfqd->busy_queues > 0);
+
+ /*
+@@ -4146,7 +4143,7 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ rq->rq_flags &= ~RQF_DISP_LIST;
+
+ bfq_log(bfqd,
+- "dispatch requests: picked %p from dispatch list", rq);
++ "picked %p from dispatch list", rq);
+ bfqq = RQ_BFQQ(rq);
+
+ if (bfqq) {
+@@ -4196,7 +4193,7 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ goto start_rq;
+ }
+
+- bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++ bfq_log(bfqd, "%d busy queues", bfqd->busy_queues);
+
+ if (bfqd->busy_queues == 0)
+ goto exit;
+@@ -4236,13 +4233,13 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ rq->rq_flags |= RQF_STARTED;
+ if (bfqq)
+ bfq_log_bfqq(bfqd, bfqq,
+- "dispatched %s request %p, rq_in_driver %d",
++ "%s request %p, rq_in_driver %d",
+ bfq_bfqq_sync(bfqq) ? "sync" : "async",
+ rq,
+ bfqd->rq_in_driver);
+ else
+ bfq_log(bfqd,
+- "dispatched request %p from dispatch list, rq_in_driver %d",
++ "request %p from dispatch list, rq_in_driver %d",
+ rq, bfqd->rq_in_driver);
+ } else
+ bfq_log(bfqd,
+@@ -4339,7 +4336,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ BUG_ON(bfqq->ref <= 0);
+
+ if (bfqq->bfqd)
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p %d", bfqq, bfqq->ref);
+
+ bfqq->ref--;
+ if (bfqq->ref)
+@@ -4383,10 +4380,10 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ }
+
+ if (bfqq->bfqd)
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p freed", bfqq);
+
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] putting blkg and bfqg %p\n", __func__, bfqg);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "putting blkg and bfqg %p\n", bfqg);
+ bfqg_and_blkg_put(bfqg);
+ #endif
+ kmem_cache_free(bfq_pool, bfqq);
+@@ -4418,7 +4415,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_schedule_dispatch(bfqd);
+ }
+
+- bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "%p, %d", bfqq, bfqq->ref);
+
+ bfq_put_cooperator(bfqq);
+
+@@ -4502,7 +4499,7 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
+ bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
+ bfqq->entity.prio_changed = 1;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "set_next_ioprio_data: bic_class %d prio %d class %d",
++ "bic_class %d prio %d class %d",
+ ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class);
+ }
+
+@@ -4529,7 +4526,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
+ bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
+ bic_set_bfqq(bic, bfqq, false);
+ bfq_log_bfqq(bfqd, bfqq,
+- "check_ioprio_change: bfqq %p %d",
++ "bfqq %p %d",
+ bfqq, bfqq->ref);
+ }
+
+@@ -4667,14 +4664,14 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
+ * guarantee that this queue is not freed
+ * until its group goes away.
+ */
+- bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++ bfq_log_bfqq(bfqd, bfqq, "bfqq not in async: %p, %d",
+ bfqq, bfqq->ref);
+ *async_bfqq = bfqq;
+ }
+
+ out:
+ bfqq->ref++; /* get a process reference to this queue */
+- bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "at end: %p, %d", bfqq, bfqq->ref);
+ rcu_read_unlock();
+ return bfqq;
+ }
+@@ -4733,7 +4730,7 @@ static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
+ bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
+ has_short_ttime = false;
+
+- bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
++ bfq_log_bfqq(bfqd, bfqq, "has_short_ttime %d",
+ has_short_ttime);
+
+ if (has_short_ttime)
+@@ -4759,7 +4756,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfq_update_io_seektime(bfqd, bfqq, rq);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "rq_enqueued: has_short_ttime=%d (seeky %d)",
++ "has_short_ttime=%d (seeky %d)",
+ bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
+
+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
+@@ -4818,7 +4815,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+
+ assert_spin_locked(&bfqd->lock);
+
+- bfq_log_bfqq(bfqd, bfqq, "__insert_req: rq %p bfqq %p", rq, bfqq);
++ bfq_log_bfqq(bfqd, bfqq, "rq %p bfqq %p", rq, bfqq);
+
+ /*
+ * An unplug may trigger a requeue of a request from the device
+@@ -4837,9 +4834,9 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ new_bfqq->allocated++;
+ bfqq->allocated--;
+ bfq_log_bfqq(bfqd, bfqq,
+- "insert_request: new allocated %d", bfqq->allocated);
++ "new allocated %d", bfqq->allocated);
+ bfq_log_bfqq(bfqd, new_bfqq,
+- "insert_request: new_bfqq new allocated %d",
++ "new_bfqq new allocated %d",
+ bfqq->allocated);
+
+ new_bfqq->ref++;
+@@ -4911,11 +4908,11 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ rq->rq_flags |= RQF_DISP_LIST;
+ if (bfqq)
+ bfq_log_bfqq(bfqd, bfqq,
+- "insert_request %p in disp: at_head %d",
++ "%p in disp: at_head %d",
+ rq, at_head);
+ else
+ bfq_log(bfqd,
+- "insert_request %p in disp: at_head %d",
++ "%p in disp: at_head %d",
+ rq, at_head);
+ } else {
+ BUG_ON(!(rq->rq_flags & RQF_GOT));
+@@ -5033,7 +5030,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ bfqq->dispatched--;
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "completed_requests: new disp %d, new rq_in_driver %d",
++ "new disp %d, new rq_in_driver %d",
+ bfqq->dispatched, bfqd->rq_in_driver);
+
+ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
+@@ -5061,7 +5058,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
++ "delta %uus/%luus max_size %u rate %llu/%llu",
+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
+ delta_us > 0 ?
+ (USEC_PER_SEC*
+@@ -5129,7 +5126,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "put_request_body: allocated %d", bfqq->allocated);
++ "allocated %d", bfqq->allocated);
+ BUG_ON(!bfqq->allocated);
+ bfqq->allocated--;
+
+@@ -5406,10 +5403,10 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
+
+ bfqq->allocated++;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "get_request: new allocated %d", bfqq->allocated);
++ "new allocated %d", bfqq->allocated);
+
+ bfqq->ref++;
+- bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", rq, bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "%p: bfqq %p, %d", rq, bfqq, bfqq->ref);
+
+ rq->elv.priv[0] = bic;
+ rq->elv.priv[1] = bfqq;
+@@ -5493,7 +5490,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
+ idle_slice_timer);
+ struct bfq_queue *bfqq = bfqd->in_service_queue;
+
+- bfq_log(bfqd, "slice_timer expired");
++ bfq_log(bfqd, "expired");
+
+ /*
+ * Theoretical race here: the in-service queue can be NULL or
+@@ -5515,10 +5512,10 @@ static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
+ struct bfq_group *root_group = bfqd->root_group;
+ struct bfq_queue *bfqq = *bfqq_ptr;
+
+- bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++ bfq_log(bfqd, "%p", bfqq);
+ if (bfqq) {
+ bfq_bfqq_move(bfqd, bfqq, root_group);
+- bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++ bfq_log_bfqq(bfqd, bfqq, "putting %p, %d",
+ bfqq, bfqq->ref);
+ bfq_put_queue(bfqq);
+ *bfqq_ptr = NULL;
+@@ -5547,7 +5544,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ struct bfq_data *bfqd = e->elevator_data;
+ struct bfq_queue *bfqq, *n;
+
+- bfq_log(bfqd, "exit_queue: starting ...");
++ bfq_log(bfqd, "starting ...");
+
+ hrtimer_cancel(&bfqd->idle_slice_timer);
+
+@@ -5575,7 +5572,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ spin_unlock_irq(&bfqd->lock);
+ #endif
+
+- bfq_log(bfqd, "exit_queue: finished ...");
++ bfq_log(bfqd, "finished ...");
+ kfree(bfqd);
+ }
+
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 9a5ce1168ff5..e2ae11bf8f76 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -712,34 +712,34 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+- pr_crit("%s bfq%d%c %s " fmt "\n", \
++ pr_crit("%s bfq%d%c %s [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+ (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- bfqq_group(bfqq)->blkg_path, ##args); \
++ bfqq_group(bfqq)->blkg_path, __func__, ##args); \
+ } while (0)
+
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
+- pr_crit("%s %s " fmt "\n", \
++ pr_crit("%s %s [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+- bfqg->blkg_path, ##args); \
++ bfqg->blkg_path, __func__, ##args); \
+ } while (0)
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
+- pr_crit("%s bfq%d%c " fmt "\n", \
++ pr_crit("%s bfq%d%c [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+ (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- ##args)
++ __func__, ##args)
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
+
+ #endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log(bfqd, fmt, args...) \
+- pr_crit("%s bfq " fmt "\n", \
++ pr_crit("%s bfq [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+- ##args)
++ __func__, ##args)
+
+ #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+
+@@ -762,28 +762,29 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+- blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s [%s] " fmt, \
+ (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- bfqq_group(bfqq)->blkg_path, ##args); \
++ bfqq_group(bfqq)->blkg_path, __func__, ##args); \
+ } while (0)
+
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
+- blk_add_trace_msg((bfqd)->queue, "%s " fmt, bfqg->blkg_path, ##args);\
++ blk_add_trace_msg((bfqd)->queue, "%s [%s] " fmt, bfqg->blkg_path, \
++ __func__, ##args);\
+ } while (0)
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
+- blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c [%s] " fmt, (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- ##args)
++ __func__, ##args)
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
+
+ #endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log(bfqd, fmt, args...) \
+- blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++ blk_add_trace_msg((bfqd)->queue, "bfq [%s] " fmt, __func__, ##args)
+
+ #endif /* CONFIG_BLK_DEV_IO_TRACE */
+ #endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+@@ -938,7 +939,7 @@ bfq_entity_service_tree(struct bfq_entity *entity)
+
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "entity_service_tree %p %d",
++ "%p %d",
+ sched_data->service_tree + idx, idx);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+@@ -946,7 +947,7 @@ bfq_entity_service_tree(struct bfq_entity *entity)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "entity_service_tree %p %d",
++ "%p %d",
+ sched_data->service_tree + idx, idx);
+ }
+ #endif
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 4e6c5232e2fb..ead34c30a7c2 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -119,7 +119,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "update_next_in_service: chose without lookup");
++ "chose without lookup");
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+@@ -127,7 +127,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data*)bfqg->bfqd, bfqg,
+- "update_next_in_service: chose without lookup");
++ "chose without lookup");
+ }
+ #endif
+ }
+@@ -148,7 +148,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ bfqq = bfq_entity_to_bfqq(next_in_service);
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "update_next_in_service: chosen this queue");
++ "chosen this queue");
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+@@ -156,7 +156,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "update_next_in_service: chosen this entity");
++ "chosen this entity");
+ }
+ #endif
+ return parent_sched_may_change;
+@@ -331,10 +331,10 @@ static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
+
+ if (bfqq) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "calc_finish: serv %lu, w %d",
++ "serv %lu, w %d",
+ service, entity->weight);
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "calc_finish: start %llu, finish %llu, delta %llu",
++ "start %llu, finish %llu, delta %llu",
+ start, finish, delta);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+@@ -342,10 +342,10 @@ static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "calc_finish group: serv %lu, w %d",
++ "group: serv %lu, w %d",
+ service, entity->weight);
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "calc_finish group: start %llu, finish %llu, delta %llu",
++ "group: start %llu, finish %llu, delta %llu",
+ start, finish, delta);
+ #endif
+ }
+@@ -484,7 +484,7 @@ static void bfq_update_active_node(struct rb_node *node)
+
+ if (bfqq) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "update_active_node: new min_start %llu",
++ "new min_start %llu",
+ ((entity->min_start>>10)*1000)>>12);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+@@ -492,7 +492,7 @@ static void bfq_update_active_node(struct rb_node *node)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "update_active_node: new min_start %llu",
++ "new min_start %llu",
+ ((entity->min_start>>10)*1000)>>12);
+ #endif
+ }
+@@ -620,7 +620,7 @@ static void bfq_get_entity(struct bfq_entity *entity)
+
+ if (bfqq) {
+ bfqq->ref++;
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p %d",
+ bfqq, bfqq->ref);
+ }
+ }
+@@ -748,7 +748,7 @@ static void bfq_forget_entity(struct bfq_service_tree *st,
+ entity->on_st = false;
+ st->wsum -= entity->weight;
+ if (bfqq && !is_in_service) {
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity (before): %p %d",
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "(before): %p %d",
+ bfqq, bfqq->ref);
+ bfq_put_queue(bfqq);
+ }
+@@ -1008,7 +1008,7 @@ static void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ tot_serv_to_charge = entity->service;
+
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "charge_time: %lu/%u ms, %d/%d/%d sectors",
++ "%lu/%u ms, %d/%d/%d sectors",
+ time_ms, timeout_ms, entity->service,
+ tot_serv_to_charge, entity->budget);
+
+@@ -1080,7 +1080,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+
+ if (bfqq) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "update_fin_time_enqueue: new queue finish %llu",
++ "new queue finish %llu",
+ ((entity->finish>>10)*1000)>>12);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+@@ -1088,7 +1088,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "update_fin_time_enqueue: new group finish %llu",
++ "new group finish %llu",
+ ((entity->finish>>10)*1000)>>12);
+ #endif
+ }
+@@ -1098,7 +1098,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+
+ if (bfqq) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "update_fin_time_enqueue: queue %seligible in st %p",
++ "queue %seligible in st %p",
+ entity->start <= st->vtime ? "" : "non ", st);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+@@ -1106,7 +1106,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "update_fin_time_enqueue: group %seligible in st %p",
++ "group %seligible in st %p",
+ entity->start <= st->vtime ? "" : "non ", st);
+ #endif
+ }
+@@ -1550,7 +1550,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
+
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "calc_vtime_jump: new value %llu",
++ "new value %llu",
+ ((root_entity->min_start>>10)*1000)>>12);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+@@ -1559,7 +1559,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
+ entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "calc_vtime_jump: new value %llu",
++ "new value %llu",
+ ((root_entity->min_start>>10)*1000)>>12);
+ }
+ #endif
+@@ -1677,7 +1677,7 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
+ bfqq = bfq_entity_to_bfqq(entity);
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "__lookup_next: start %llu vtime %llu st %p",
++ "start %llu vtime %llu st %p",
+ ((entity->start>>10)*1000)>>12,
+ ((new_vtime>>10)*1000)>>12, st);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+@@ -1686,7 +1686,7 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "__lookup_next: start %llu vtime %llu (%llu) st %p",
++ "start %llu vtime %llu (%llu) st %p",
+ ((entity->start>>10)*1000)>>12,
+ ((st->vtime>>10)*1000)>>12,
+ ((new_vtime>>10)*1000)>>12, st);
+@@ -1821,14 +1821,14 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg(bfqd, bfqg,
+- "get_next_queue: lookup in this group");
++ "lookup in this group");
+ if (!sd->next_in_service)
+- pr_crit("get_next_queue: lookup in this group");
++ pr_crit("lookup in this group");
+ } else {
+ bfq_log_bfqg(bfqd, bfqd->root_group,
+- "get_next_queue: lookup in root group");
++ "lookup in root group");
+ if (!sd->next_in_service)
+- pr_crit("get_next_queue: lookup in root group");
++ pr_crit("lookup in root group");
+ }
+ #endif
+
+@@ -1903,7 +1903,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ bfqq = bfq_entity_to_bfqq(entity);
+ if (bfqq)
+ bfq_log_bfqq(bfqd, bfqq,
+- "get_next_queue: this queue, finish %llu",
++ "this queue, finish %llu",
+ (((entity->finish>>10)*1000)>>10)>>2);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+@@ -1911,7 +1911,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg(bfqd, bfqg,
+- "get_next_queue: this entity, finish %llu",
++ "this entity, finish %llu",
+ (((entity->finish>>10)*1000)>>10)>>2);
+ }
+ #endif
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index c4df156b1fb4..e49e8ac882b3 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -281,7 +281,7 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd);
+ static void bfq_schedule_dispatch(struct bfq_data *bfqd)
+ {
+ if (bfqd->queued != 0) {
+- bfq_log(bfqd, "schedule dispatch");
++ bfq_log(bfqd, "");
+ kblockd_schedule_work(&bfqd->unplug_work);
+ }
+ }
+@@ -414,7 +414,7 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
+ if (rb_link)
+ *rb_link = p;
+
+- bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++ bfq_log(bfqd, "%llu: returning %d",
+ (unsigned long long) sector,
+ bfqq ? bfqq->pid : 0);
+
+@@ -635,7 +635,7 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
+ if (rq == last || ktime_get_ns() < rq->fifo_time)
+ return NULL;
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "returned %p", rq);
+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
+ return rq;
+ }
+@@ -728,7 +728,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd,
+ bfq_serv_to_charge(next_rq, bfqq));
+ if (entity->budget != new_budget) {
+ entity->budget = new_budget;
+- bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
++ bfq_log_bfqq(bfqd, bfqq, "new budget %lu",
+ new_budget);
+ bfq_requeue_bfqq(bfqd, bfqq, false);
+ }
+@@ -800,8 +800,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
+- __func__,
++ "bic %p wr_coeff %d start_finish %lu max_time %lu",
+ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish,
+ bfqq->wr_cur_max_time);
+
+@@ -814,11 +813,11 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ bfq_wr_duration(bfqd))) {
+ switch_back_to_interactive_wr(bfqq, bfqd);
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "resume state: switching back to interactive");
++ "switching back to interactive");
+ } else {
+ bfqq->wr_coeff = 1;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "resume state: switching off wr (%lu + %lu < %lu)",
++ "switching off wr (%lu + %lu < %lu)",
+ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time,
+ jiffies);
+ }
+@@ -870,7 +869,7 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ /* Increment burst size to take into account also bfqq */
+ bfqd->burst_size++;
+
+- bfq_log_bfqq(bfqd, bfqq, "add_to_burst %d", bfqd->burst_size);
++ bfq_log_bfqq(bfqd, bfqq, "%d", bfqd->burst_size);
+
+ BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh);
+
+@@ -883,7 +882,7 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ * other to consider this burst as large.
+ */
+ bfqd->large_burst = true;
+- bfq_log_bfqq(bfqd, bfqq, "add_to_burst: large burst started");
++ bfq_log_bfqq(bfqd, bfqq, "large burst started");
+
+ /*
+ * We can now mark all queues in the burst list as
+@@ -1055,7 +1054,7 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfqd->large_burst = false;
+ bfq_reset_burst_list(bfqd, bfqq);
+ bfq_log_bfqq(bfqd, bfqq,
+- "handle_burst: late activation or different group");
++ "late activation or different group");
+ goto end;
+ }
+
+@@ -1065,7 +1064,7 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ * bfqq as belonging to this large burst immediately.
+ */
+ if (bfqd->large_burst) {
+- bfq_log_bfqq(bfqd, bfqq, "handle_burst: marked in burst");
++ bfq_log_bfqq(bfqd, bfqq, "marked in burst");
+ bfq_mark_bfqq_in_large_burst(bfqq);
+ goto end;
+ }
+@@ -1572,7 +1571,7 @@ static void bfq_add_request(struct request *rq)
+ unsigned int old_wr_coeff = bfqq->wr_coeff;
+ bool interactive = false;
+
+- bfq_log_bfqq(bfqd, bfqq, "add_request: size %u %s",
++ bfq_log_bfqq(bfqd, bfqq, "size %u %s",
+ blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A");
+
+ if (bfqq->wr_coeff > 1) /* queue is being weight-raised */
+@@ -1870,10 +1869,10 @@ static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
+ */
+ bfqq->entity.prio_changed = 1;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "end_wr: wrais ending at %lu, rais_max_time %u",
++ "wrais ending at %lu, rais_max_time %u",
+ bfqq->last_wr_start_finish,
+ jiffies_to_msecs(bfqq->wr_cur_max_time));
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "end_wr: wr_busy %d",
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "wr_busy %d",
+ bfqq->bfqd->wr_busy_queues);
+ }
+
+@@ -2048,8 +2047,8 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
+ {
+ if (bfq_too_late_for_merging(new_bfqq)) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "[%s] too late for bfq%d to be merged",
+- __func__, new_bfqq->pid);
++ "too late for bfq%d to be merged",
++ new_bfqq->pid);
+ return false;
+ }
+
+@@ -2258,7 +2257,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+
+ }
+
+- bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
++ bfq_log_bfqq(bfqd, new_bfqq, "wr_busy %d",
+ bfqd->wr_busy_queues);
+
+ /*
+@@ -2359,7 +2358,7 @@ static void bfq_set_budget_timeout(struct bfq_data *bfqd,
+ bfqq->budget_timeout = jiffies +
+ bfqd->bfq_timeout * timeout_coeff;
+
+- bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++ bfq_log_bfqq(bfqd, bfqq, "%u",
+ jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff));
+ }
+
+@@ -2427,10 +2426,10 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
+
+ bfq_set_budget_timeout(bfqd, bfqq);
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_in_service_queue, cur-budget = %d",
++ "cur-budget = %d",
+ bfqq->entity.budget);
+ } else
+- bfq_log(bfqd, "set_in_service_queue: NULL");
++ bfq_log(bfqd, "NULL");
+
+ bfqd->in_service_queue = bfqq;
+ }
+@@ -2559,7 +2558,7 @@ static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq
+ bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
+
+ bfq_log(bfqd,
+- "reset_rate_computation at end, sample %u/%u tot_sects %llu",
++ "at end, sample %u/%u tot_sects %llu",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ bfqd->tot_sectors_dispatched);
+ }
+@@ -2579,7 +2578,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
+ bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) {
+ bfq_log(bfqd,
+- "update_rate_reset: only resetting, delta_first %lluus samples %d",
++ "only resetting, delta_first %lluus samples %d",
+ bfqd->delta_from_first>>10, bfqd->peak_rate_samples);
+ goto reset_computation;
+ }
+@@ -2603,7 +2602,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
+
+ bfq_log(bfqd,
+-"update_rate_reset: tot_sects %llu delta_first %lluus rate %llu sects/s (%d)",
++"tot_sects %llu delta_first %lluus rate %llu sects/s (%d)",
+ bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10,
+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
+ rate > 20<<BFQ_RATE_SHIFT);
+@@ -2618,14 +2617,14 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ rate <= bfqd->peak_rate) ||
+ rate > 20<<BFQ_RATE_SHIFT) {
+ bfq_log(bfqd,
+- "update_rate_reset: goto reset, samples %u/%u rate/peak %llu/%llu",
++ "goto reset, samples %u/%u rate/peak %llu/%llu",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
+ goto reset_computation;
+ } else {
+ bfq_log(bfqd,
+- "update_rate_reset: do update, samples %u/%u rate/peak %llu/%llu",
++ "do update, samples %u/%u rate/peak %llu/%llu",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
+@@ -2681,7 +2680,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ rate /= divisor; /* smoothing constant alpha = 1/divisor */
+
+ bfq_log(bfqd,
+- "update_rate_reset: divisor %d tmp_peak_rate %llu tmp_rate %u",
++ "divisor %d tmp_peak_rate %llu tmp_rate %u",
+ divisor,
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT),
+ (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT));
+@@ -2735,7 +2734,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+
+ if (bfqd->peak_rate_samples == 0) { /* first dispatch */
+ bfq_log(bfqd,
+- "update_peak_rate: goto reset, samples %d",
++ "goto reset, samples %d",
+ bfqd->peak_rate_samples) ;
+ bfq_reset_rate_computation(bfqd, rq);
+ goto update_last_values; /* will add one sample */
+@@ -2756,7 +2755,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
+ bfqd->rq_in_driver == 0) {
+ bfq_log(bfqd,
+-"update_peak_rate: jumping to updating&resetting delta_last %lluus samples %d",
++"jumping to updating&resetting delta_last %lluus samples %d",
+ (now_ns - bfqd->last_dispatch)>>10,
+ bfqd->peak_rate_samples) ;
+ goto update_rate_and_reset;
+@@ -2782,7 +2781,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
+
+ bfq_log(bfqd,
+- "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus",
++ "added samples %u/%u tot_sects %llu delta_first %lluus",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ bfqd->tot_sectors_dispatched,
+ bfqd->delta_from_first>>10);
+@@ -2798,12 +2797,12 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ bfqd->last_dispatch = now_ns;
+
+ bfq_log(bfqd,
+- "update_peak_rate: delta_first %lluus last_pos %llu peak_rate %llu",
++ "delta_first %lluus last_pos %llu peak_rate %llu",
+ (now_ns - bfqd->first_dispatch)>>10,
+ (unsigned long long) bfqd->last_position,
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
+ bfq_log(bfqd,
+- "update_peak_rate: samples at end %d", bfqd->peak_rate_samples);
++ "samples at end %d", bfqd->peak_rate_samples);
+ }
+
+ /*
+@@ -2900,11 +2899,11 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
+ */
+ budget = 2 * min_budget;
+
+- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
++ bfq_log_bfqq(bfqd, bfqq, "last budg %d, budg left %d",
+ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
+- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
++ bfq_log_bfqq(bfqd, bfqq, "last max_budg %d, min budg %d",
+ budget, bfq_min_budget(bfqd));
+- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++ bfq_log_bfqq(bfqd, bfqq, "sync %d, seeky %d",
+ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
+
+ if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
+@@ -3106,7 +3105,7 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ else /* charge at least one seek */
+ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
+
+- bfq_log(bfqd, "bfq_bfqq_is_slow: too short %u", delta_usecs);
++ bfq_log(bfqd, "too short %u", delta_usecs);
+
+ return slow;
+ }
+@@ -3129,11 +3128,11 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * peak rate.
+ */
+ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
+- bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d",
++ bfq_log(bfqd, "relative rate %d/%d",
+ bfqq->entity.service, bfqd->bfq_max_budget);
+ }
+
+- bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
++ bfq_log_bfqq(bfqd, bfqq, "slow %d", slow);
+
+ return slow;
+ }
+@@ -3235,7 +3234,7 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqd, bfqq,
+-"softrt_next_start: service_blkg %lu soft_rate %u sects/sec interval %u",
++"service_blkg %lu soft_rate %u sects/sec interval %u",
+ bfqq->service_from_backlogged,
+ bfqd->bfq_wr_max_softrt_rate,
+ jiffies_to_msecs(HZ * bfqq->service_from_backlogged /
+@@ -3414,7 +3413,7 @@ static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
+ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "may_budget_timeout: wait_request %d left %d timeout %d",
++ "wait_request %d left %d timeout %d",
+ bfq_bfqq_wait_request(bfqq),
+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
+ bfq_bfqq_budget_timeout(bfqq));
+@@ -3675,11 +3674,11 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ * either boosts the throughput (without issues), or is
+ * necessary to preserve service guarantees.
+ */
+- bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d",
++ bfq_log_bfqq(bfqd, bfqq, "sync %d idling_boosts_thr %d",
+ bfq_bfqq_sync(bfqq), idling_boosts_thr);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "may_idle: wr_busy %d boosts %d IO-bound %d guar %d",
++ "wr_busy %d boosts %d IO-bound %d guar %d",
+ bfqd->wr_busy_queues,
+ idling_boosts_thr_without_issues,
+ bfq_bfqq_IO_bound(bfqq),
+@@ -3719,7 +3718,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ if (!bfqq)
+ goto new_queue;
+
+- bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
++ bfq_log_bfqq(bfqd, bfqq, "already in-service queue");
+
+ if (bfq_may_expire_for_budg_timeout(bfqq) &&
+ !hrtimer_active(&bfqd->idle_slice_timer) &&
+@@ -3797,14 +3796,14 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ new_queue:
+ bfqq = bfq_set_in_service_queue(bfqd);
+ if (bfqq) {
+- bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
++ bfq_log_bfqq(bfqd, bfqq, "checking new queue");
+ goto check_queue;
+ }
+ keep_queue:
+ if (bfqq)
+- bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
++ bfq_log_bfqq(bfqd, bfqq, "returned this queue");
+ else
+- bfq_log(bfqd, "select_queue: no queue returned");
++ bfq_log(bfqd, "no queue returned");
+
+ return bfqq;
+ }
+@@ -3857,8 +3856,7 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ /* see comments on max_service_from_wr */
+ bfq_bfqq_end_wr(bfqq);
+ bfq_log_bfqq(bfqd, bfqq,
+- "[%s] too much service",
+- __func__);
++ "too much service");
+ }
+ }
+ /*
+@@ -3987,7 +3985,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_queue *bfqq;
+
+- bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++ bfq_log(bfqd, "%d busy queues", bfqd->busy_queues);
+
+ if (bfqd->busy_queues == 0)
+ return 0;
+@@ -4021,7 +4019,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
+ if (!bfq_dispatch_request(bfqd, bfqq))
+ return 0;
+
+- bfq_log_bfqq(bfqd, bfqq, "dispatched %s request",
++ bfq_log_bfqq(bfqd, bfqq, "%s request",
+ bfq_bfqq_sync(bfqq) ? "sync" : "async");
+
+ BUG_ON(bfqq->next_rq == NULL &&
+@@ -4044,7 +4042,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+
+ BUG_ON(bfqq->ref <= 0);
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p %d", bfqq, bfqq->ref);
+ bfqq->ref--;
+ if (bfqq->ref)
+ return;
+@@ -4086,7 +4084,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ bfqq->bfqd->burst_size--;
+ }
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p freed", bfqq);
+
+ kmem_cache_free(bfq_pool, bfqq);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+@@ -4120,7 +4118,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_schedule_dispatch(bfqd);
+ }
+
+- bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "%p, %d", bfqq, bfqq->ref);
+
+ bfq_put_cooperator(bfqq);
+
+@@ -4200,7 +4198,7 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
+ bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
+ bfqq->entity.prio_changed = 1;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "set_next_ioprio_data: bic_class %d prio %d class %d",
++ "bic_class %d prio %d class %d",
+ ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class);
+ }
+
+@@ -4227,7 +4225,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
+ bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
+ bic_set_bfqq(bic, bfqq, false);
+ bfq_log_bfqq(bfqd, bfqq,
+- "check_ioprio_change: bfqq %p %d",
++ "bfqq %p %d",
+ bfqq, bfqq->ref);
+ }
+
+@@ -4362,14 +4360,14 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
+ * guarantee that this queue is not freed
+ * until its group goes away.
+ */
+- bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++ bfq_log_bfqq(bfqd, bfqq, "bfqq not in async: %p, %d",
+ bfqq, bfqq->ref);
+ *async_bfqq = bfqq;
+ }
+
+ out:
+ bfqq->ref++; /* get a process reference to this queue */
+- bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "at end: %p, %d", bfqq, bfqq->ref);
+ rcu_read_unlock();
+ return bfqq;
+ }
+@@ -4428,7 +4426,7 @@ static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
+ bic->ttime.ttime_mean > bfqd->bfq_slice_idle))
+ has_short_ttime = false;
+
+- bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
++ bfq_log_bfqq(bfqd, bfqq, "has_short_ttime %d",
+ has_short_ttime);
+
+ if (has_short_ttime)
+@@ -4454,7 +4452,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfq_update_io_seektime(bfqd, bfqq, rq);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "rq_enqueued: has_short_ttime=%d (seeky %d)",
++ "has_short_ttime=%d (seeky %d)",
+ bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
+
+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
+@@ -4629,7 +4627,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
+ */
+ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
+
+- bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
++ bfq_log(bfqd, "delta %uus/%luus max_size %u rate %llu/%llu",
+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
+ delta_us > 0 ?
+ (USEC_PER_SEC*
+@@ -4750,7 +4748,7 @@ static void bfq_put_request(struct request *rq)
+ rq->elv.priv[0] = NULL;
+ rq->elv.priv[1] = NULL;
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p, %d",
+ bfqq, bfqq->ref);
+ bfq_put_queue(bfqq);
+ }
+@@ -4816,7 +4814,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ bic_set_bfqq(bic, bfqq, is_sync);
+ if (split && is_sync) {
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_request: was_in_list %d "
++ "was_in_list %d "
+ "was_in_large_burst %d "
+ "large burst in progress %d",
+ bic->was_in_burst_list,
+@@ -4826,12 +4824,12 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ if ((bic->was_in_burst_list && bfqd->large_burst) ||
+ bic->saved_in_large_burst) {
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_request: marking in "
++ "marking in "
+ "large burst");
+ bfq_mark_bfqq_in_large_burst(bfqq);
+ } else {
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_request: clearing in "
++ "clearing in "
+ "large burst");
+ bfq_clear_bfqq_in_large_burst(bfqq);
+ if (bic->was_in_burst_list)
+@@ -4888,7 +4886,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+
+ bfqq->allocated[rw]++;
+ bfqq->ref++;
+- bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "bfqq %p, %d", bfqq, bfqq->ref);
+
+ rq->elv.priv[0] = bic;
+ rq->elv.priv[1] = bfqq;
+@@ -4962,7 +4960,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
+ * case we just expire a queue too early.
+ */
+ if (bfqq) {
+- bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
++ bfq_log_bfqq(bfqd, bfqq, "expired");
+ bfq_clear_bfqq_wait_request(bfqq);
+
+ if (bfq_bfqq_budget_timeout(bfqq))
+@@ -5005,10 +5003,10 @@ static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
+ struct bfq_group *root_group = bfqd->root_group;
+ struct bfq_queue *bfqq = *bfqq_ptr;
+
+- bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++ bfq_log(bfqd, "%p", bfqq);
+ if (bfqq) {
+ bfq_bfqq_move(bfqd, bfqq, root_group);
+- bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++ bfq_log_bfqq(bfqd, bfqq, "putting %p, %d",
+ bfqq, bfqq->ref);
+ bfq_put_queue(bfqq);
+ *bfqq_ptr = NULL;
+diff --git a/block/bfq.h b/block/bfq.h
+index 0cd7a3f251a7..4d2fe7f77af1 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -698,37 +698,37 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ \
+ assert_spin_locked((bfqd)->queue->queue_lock); \
+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
+- pr_crit("%s bfq%d%c %s " fmt "\n", \
++ pr_crit("%s bfq%d%c %s [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+ (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- __pbuf, ##args); \
++ __pbuf, __func__, ##args); \
+ } while (0)
+
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
+- pr_crit("%s %s " fmt "\n", \
++ pr_crit("%s %s [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+- __pbuf, ##args); \
++ __pbuf, __func__, ##args); \
+ } while (0)
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
+- pr_crit("%s bfq%d%c " fmt "\n", \
++ pr_crit("%s bfq%d%c [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+ (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- ##args)
++ __func__, ##args)
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
+
+ #endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log(bfqd, fmt, args...) \
+- pr_crit("%s bfq " fmt "\n", \
++ pr_crit("%s bfq [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+- ##args)
++ __func__, ##args)
+
+ #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+
+@@ -755,31 +755,32 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ \
+ assert_spin_locked((bfqd)->queue->queue_lock); \
+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
+- blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s [%s] " fmt, \
+ (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- __pbuf, ##args); \
++ __pbuf, __func__, ##args); \
+ } while (0)
+
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
+- blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
++ blk_add_trace_msg((bfqd)->queue, "%s [%s] " fmt, __pbuf, \
++ __func__, ##args); \
+ } while (0)
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
+- blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c [%s] " fmt, (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- ##args)
++ __func__, ##args)
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
+
+ #endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log(bfqd, fmt, args...) \
+- blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++ blk_add_trace_msg((bfqd)->queue, "bfq [%s] " fmt, __func__, ##args)
+
+ #endif /* CONFIG_BLK_DEV_IO_TRACE */
+ #endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+@@ -928,7 +929,7 @@ bfq_entity_service_tree(struct bfq_entity *entity)
+
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "entity_service_tree %p %d",
++ "%p %d",
+ sched_data->service_tree + idx, idx);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+@@ -936,7 +937,7 @@ bfq_entity_service_tree(struct bfq_entity *entity)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "entity_service_tree %p %d",
++ "%p %d",
+ sched_data->service_tree + idx, idx);
+ }
+ #endif
+
+From 673a457e8a54d1c4b66e61b1a50956ba0b8c6a60 Mon Sep 17 00:00:00 2001
+From: Davide Paganelli <paga.david@gmail.com>
+Date: Thu, 8 Feb 2018 11:49:58 +0100
+Subject: [PATCH 19/23] block, bfq-mq, bfq-sq: make bfq_bfqq_expire print
+ expiration reason
+
+Improve readability of the log messages related to the expiration
+reasons of the function bfq_bfqq_expire.
+Change the printing of the number that represents the reason for
+expiration with an actual textual description of the reason.
+
+Signed-off-by: Davide Paganelli <paga.david@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 10 ++++++++--
+ block/bfq-sq-iosched.c | 10 ++++++++--
+ 2 files changed, 16 insertions(+), 4 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index edc93b6af186..9268dd47a4e5 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -133,6 +133,12 @@ static const int bfq_timeout = (HZ / 8);
+ */
+ static const unsigned long bfq_merge_time_limit = HZ/10;
+
++#define MAX_LENGTH_REASON_NAME 25
++
++static const char reason_name[][MAX_LENGTH_REASON_NAME] = {"TOO_IDLE",
++"BUDGET_TIMEOUT", "BUDGET_EXHAUSTED", "NO_MORE_REQUESTS",
++"PREEMPTED"};
++
+ static struct kmem_cache *bfq_pool;
+
+ /* Below this threshold (in ns), we consider thinktime immediate. */
+@@ -3553,8 +3559,8 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
+ }
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "expire (%d, slow %d, num_disp %d, short_ttime %d, weight %d)",
+- reason, slow, bfqq->dispatched,
++ "expire (%s, slow %d, num_disp %d, short_ttime %d, weight %d)",
++ reason_name[reason], slow, bfqq->dispatched,
+ bfq_bfqq_has_short_ttime(bfqq), entity->weight);
+
+ /*
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index e49e8ac882b3..f95deaab49a1 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -127,6 +127,12 @@ static const int bfq_timeout = (HZ / 8);
+ */
+ static const unsigned long bfq_merge_time_limit = HZ/10;
+
++#define MAX_LENGTH_REASON_NAME 25
++
++static const char reason_name[][MAX_LENGTH_REASON_NAME] = {"TOO_IDLE",
++"BUDGET_TIMEOUT", "BUDGET_EXHAUSTED", "NO_MORE_REQUESTS",
++"PREEMPTED"};
++
+ static struct kmem_cache *bfq_pool;
+
+ /* Below this threshold (in ns), we consider thinktime immediate. */
+@@ -3366,8 +3372,8 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
+ }
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "expire (%d, slow %d, num_disp %d, short_ttime %d, weight %d)",
+- reason, slow, bfqq->dispatched,
++ "expire (%s, slow %d, num_disp %d, short_ttime %d, weight %d)",
++ reason_name[reason], slow, bfqq->dispatched,
+ bfq_bfqq_has_short_ttime(bfqq), entity->weight);
+
+ /*
+
+From 62e80623fbb58367c3f667dab22fea0804001f3b Mon Sep 17 00:00:00 2001
+From: Melzani Alessandro <melzani.alessandro@gmail.com>
+Date: Mon, 26 Feb 2018 22:21:59 +0100
+Subject: [PATCH 20/23] bfq-mq: port of "block, bfq: remove batches of
+ confusing ifdefs"
+
+Commit a33801e8b473 ("block, bfq: move debug blkio stats behind
+CONFIG_DEBUG_BLK_CGROUP") introduced two batches of confusing ifdefs:
+one reported in [1], plus a similar one in another function. This
+commit removes both batches, in the way suggested in [1].
+
+[1] https://www.spinics.net/lists/linux-block/msg20043.html
+
+Fixes: a33801e8b473 ("block, bfq: move debug blkio stats behind CONFIG_DEBUG_BLK_CGROUP")
+
+Signed-off-by: Alessandro Melzani <melzani.alessandro@gmail.com>
+---
+ block/bfq-mq-iosched.c | 128 ++++++++++++++++++++++++++++---------------------
+ 1 file changed, 73 insertions(+), 55 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 9268dd47a4e5..5a211620f316 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4256,35 +4256,17 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ return rq;
+ }
+
+-static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+-{
+- struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+- struct request *rq;
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+- struct bfq_queue *in_serv_queue, *bfqq;
+- bool waiting_rq, idle_timer_disabled;
+-#endif
+
+- spin_lock_irq(&bfqd->lock);
+-
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+- in_serv_queue = bfqd->in_service_queue;
+- waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
+-
+- rq = __bfq_dispatch_request(hctx);
+-
+- idle_timer_disabled =
+- waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
+-
+-#else
+- rq = __bfq_dispatch_request(hctx);
+-#endif
+- spin_unlock_irq(&bfqd->lock);
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
++static void bfq_update_dispatch_stats(struct request_queue *q,
++ struct request *rq,
++ struct bfq_queue *in_serv_queue,
++ bool idle_timer_disabled)
++{
++ struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
+
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+- bfqq = rq ? RQ_BFQQ(rq) : NULL;
+ if (!idle_timer_disabled && !bfqq)
+- return rq;
++ return;
+
+ /*
+ * rq and bfqq are guaranteed to exist until this function
+@@ -4299,7 +4281,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ * In addition, the following queue lock guarantees that
+ * bfqq_group(bfqq) exists as well.
+ */
+- spin_lock_irq(hctx->queue->queue_lock);
++ spin_lock_irq(q->queue_lock);
+ if (idle_timer_disabled)
+ /*
+ * Since the idle timer has been disabled,
+@@ -4318,8 +4300,35 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ bfqg_stats_set_start_empty_time(bfqg);
+ bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
+ }
+- spin_unlock_irq(hctx->queue->queue_lock);
++ spin_unlock_irq(q->queue_lock);
++}
++#else
++static inline void bfq_update_dispatch_stats(struct request_queue *q,
++ struct request *rq,
++ struct bfq_queue *in_serv_queue,
++ bool idle_timer_disabled) {}
+ #endif
++static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
++{
++ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
++ struct request *rq;
++ struct bfq_queue *in_serv_queue;
++ bool waiting_rq, idle_timer_disabled;
++
++ spin_lock_irq(&bfqd->lock);
++
++ in_serv_queue = bfqd->in_service_queue;
++ waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
++
++ rq = __bfq_dispatch_request(hctx);
++
++ idle_timer_disabled =
++ waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
++
++ spin_unlock_irq(&bfqd->lock);
++
++ bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue,
++ idle_timer_disabled);
+
+ return rq;
+ }
+@@ -4881,6 +4890,38 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ return idle_timer_disabled;
+ }
+
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
++static void bfq_update_insert_stats(struct request_queue *q,
++ struct bfq_queue *bfqq,
++ bool idle_timer_disabled,
++ unsigned int cmd_flags)
++{
++ if (!bfqq)
++ return;
++
++ /*
++ * bfqq still exists, because it can disappear only after
++ * either it is merged with another queue, or the process it
++ * is associated with exits. But both actions must be taken by
++ * the same process currently executing this flow of
++ * instructions.
++ *
++ * In addition, the following queue lock guarantees that
++ * bfqq_group(bfqq) exists as well.
++ */
++ spin_lock_irq(q->queue_lock);
++ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
++ if (idle_timer_disabled)
++ bfqg_stats_update_idle_time(bfqq_group(bfqq));
++ spin_unlock_irq(q->queue_lock);
++}
++#else
++static inline void bfq_update_insert_stats(struct request_queue *q,
++ struct bfq_queue *bfqq,
++ bool idle_timer_disabled,
++ unsigned int cmd_flags) {}
++#endif
++
+ static void bfq_prepare_request(struct request *rq, struct bio *bio);
+
+ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+@@ -4889,10 +4930,8 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ struct request_queue *q = hctx->queue;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ bool idle_timer_disabled = false;
+ unsigned int cmd_flags;
+-#endif
+
+ spin_lock_irq(&bfqd->lock);
+ if (blk_mq_sched_try_insert_merge(q, rq)) {
+@@ -4938,7 +4977,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ bfqq = RQ_BFQQ(rq);
+ }
+
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ idle_timer_disabled = __bfq_insert_request(bfqd, rq);
+ /*
+ * Update bfqq, because, if a queue merge has occurred
+@@ -4946,9 +4984,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ * redirected into a new queue.
+ */
+ bfqq = RQ_BFQQ(rq);
+-#else
+- __bfq_insert_request(bfqd, rq);
+-#endif
+
+ if (rq_mergeable(rq)) {
+ elv_rqhash_add(q, rq);
+@@ -4956,34 +4991,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ q->last_merge = rq;
+ }
+ }
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
++
+ /*
+ * Cache cmd_flags before releasing scheduler lock, because rq
+ * may disappear afterwards (for example, because of a request
+ * merge).
+ */
+ cmd_flags = rq->cmd_flags;
+-#endif
++
+ spin_unlock_irq(&bfqd->lock);
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+- if (!bfqq)
+- return;
+- /*
+- * bfqq still exists, because it can disappear only after
+- * either it is merged with another queue, or the process it
+- * is associated with exits. But both actions must be taken by
+- * the same process currently executing this flow of
+- * instruction.
+- *
+- * In addition, the following queue lock guarantees that
+- * bfqq_group(bfqq) exists as well.
+- */
+- spin_lock_irq(q->queue_lock);
+- bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
+- if (idle_timer_disabled)
+- bfqg_stats_update_idle_time(bfqq_group(bfqq));
+- spin_unlock_irq(q->queue_lock);
+-#endif
++ bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
++ cmd_flags);
+ }
+
+ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
+
+From 0d0d05632872b226f4fae5e56af8736a4c24bf57 Mon Sep 17 00:00:00 2001
+From: Melzani Alessandro <melzani.alessandro@gmail.com>
+Date: Mon, 26 Feb 2018 22:43:30 +0100
+Subject: [PATCH 21/23] bfq-sq, bfq-mq: port of "bfq: Use icq_to_bic()
+ consistently"
+
+Some code uses icq_to_bic() to convert an io_cq pointer to a
+bfq_io_cq pointer while other code uses a direct cast. Convert
+the code that uses a direct cast such that it uses icq_to_bic().
+
+Signed-off-by: Alessandro Melzani <melzani.alessandro@gmail.com>
+---
+ block/bfq-mq-iosched.c | 2 +-
+ block/bfq-sq-iosched.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 5a211620f316..7b1269558c47 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -272,7 +272,7 @@ static const unsigned long max_service_from_wr = 120000;
+ #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
+
+-#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0])
+ #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
+
+ /**
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index f95deaab49a1..c4aff8d55fc4 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -266,7 +266,7 @@ static const unsigned long max_service_from_wr = 120000;
+ #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
+
+-#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0])
+ #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
+
+ static void bfq_schedule_dispatch(struct bfq_data *bfqd);
+
+From 4cb5de6add7d6ad0d25d73cb95dc871305db1522 Mon Sep 17 00:00:00 2001
+From: Melzani Alessandro <melzani.alessandro@gmail.com>
+Date: Mon, 26 Feb 2018 22:59:30 +0100
+Subject: [PATCH 22/23] bfq-sq, bfq-mq: port of "block, bfq: fix error handle
+ in bfq_init"
+
+if elv_register fail, bfq_pool should be free.
+
+Signed-off-by: Alessandro Melzani <melzani.alessandro@gmail.com>
+---
+ block/bfq-mq-iosched.c | 4 +++-
+ block/bfq-sq-iosched.c | 4 +++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 7b1269558c47..964e88c2ce59 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -6129,7 +6129,7 @@ static int __init bfq_init(void)
+
+ ret = elv_register(&iosched_bfq_mq);
+ if (ret)
+- goto err_pol_unreg;
++ goto slab_kill;
+
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ strcat(msg, " (with cgroups support)");
+@@ -6138,6 +6138,8 @@ static int __init bfq_init(void)
+
+ return 0;
+
++slab_kill:
++ bfq_slab_kill();
+ err_pol_unreg:
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index c4aff8d55fc4..7f0cf1f01ffc 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -5590,7 +5590,7 @@ static int __init bfq_init(void)
+
+ ret = elv_register(&iosched_bfq);
+ if (ret)
+- goto err_pol_unreg;
++ goto slab_kill;
+
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ strcat(msg, " (with cgroups support)");
+@@ -5599,6 +5599,8 @@ static int __init bfq_init(void)
+
+ return 0;
+
++slab_kill:
++ bfq_slab_kill();
+ err_pol_unreg:
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+
+From 1f77c173aaa87ffb22c9f062a6449245d14311e4 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 4 Apr 2018 11:28:16 +0200
+Subject: [PATCH 23/23] block, bfq-sq, bfq-mq: lower-bound the estimated peak
+ rate to 1
+
+If a storage device handled by BFQ happens to be slower than 7.5 KB/s
+for a certain amount of time (in the order of a second), then the
+estimated peak rate of the device, maintained in BFQ, becomes equal to
+0. The reason is the limited precision with which the rate is
+represented (details on the range of representable values in the
+comments introduced by this commit). This leads to a division-by-zero
+error where the estimated peak rate is used as divisor. Such a type of
+failure has been reported in [1].
+
+This commit addresses this issue by:
+1. Lower-bounding the estimated peak rate to 1
+2. Adding and improving comments on the range of rates representable
+
+[1] https://www.spinics.net/lists/kernel/msg2739205.html
+
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 25 ++++++++++++++++++++++++-
+ block/bfq-mq.h | 7 ++++++-
+ block/bfq-sq-iosched.c | 25 ++++++++++++++++++++++++-
+ block/bfq.h | 7 ++++++-
+ 4 files changed, 60 insertions(+), 4 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 964e88c2ce59..03efd90c5d20 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -160,7 +160,20 @@ static struct kmem_cache *bfq_pool;
+ /* Target observation time interval for a peak-rate update (ns) */
+ #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
+
+-/* Shift used for peak rate fixed precision calculations. */
++/*
++ * Shift used for peak-rate fixed precision calculations.
++ * With
++ * - the current shift: 16 positions
++ * - the current type used to store rate: u32
++ * - the current unit of measure for rate: [sectors/usec], or, more precisely,
++ * [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift,
++ * the range of rates that can be stored is
++ * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec =
++ * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec =
++ * [15, 65G] sectors/sec
++ * Which, assuming a sector size of 512B, corresponds to a range of
++ * [7.5K, 33T] B/sec
++ */
+ #define BFQ_RATE_SHIFT 16
+
+ /*
+@@ -2881,6 +2894,16 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
+
+ bfqd->peak_rate += rate;
++
++ /*
++ * For a very slow device, bfqd->peak_rate can reach 0 (see
++ * the minimum representable values reported in the comments
++ * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid
++ * divisions by zero where bfqd->peak_rate is used as a
++ * divisor.
++ */
++ bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate);
++
+ update_thr_responsiveness_params(bfqd);
+ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
+
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index e2ae11bf8f76..4a54e5076863 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -490,7 +490,12 @@ struct bfq_data {
+ u32 last_rq_max_size;
+ /* time elapsed from first dispatch in current observ. interval (us) */
+ u64 delta_from_first;
+- /* current estimate of device peak rate */
++ /*
++ * Current estimate of the device peak rate, measured in
++ * [(sectors/usec) / 2^BFQ_RATE_SHIFT]. The left-shift by
++ * BFQ_RATE_SHIFT is performed to increase precision in
++ * fixed-point calculations.
++ */
+ u32 peak_rate;
+
+ /* maximum budget allotted to a bfq_queue before rescheduling */
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 7f0cf1f01ffc..e96213865fc2 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -154,7 +154,20 @@ static struct kmem_cache *bfq_pool;
+ /* Target observation time interval for a peak-rate update (ns) */
+ #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
+
+-/* Shift used for peak rate fixed precision calculations. */
++/*
++ * Shift used for peak-rate fixed precision calculations.
++ * With
++ * - the current shift: 16 positions
++ * - the current type used to store rate: u32
++ * - the current unit of measure for rate: [sectors/usec], or, more precisely,
++ * [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift,
++ * the range of rates that can be stored is
++ * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec =
++ * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec =
++ * [15, 65G] sectors/sec
++ * Which, assuming a sector size of 512B, corresponds to a range of
++ * [7.5K, 33T] B/sec
++ */
+ #define BFQ_RATE_SHIFT 16
+
+ /*
+@@ -2695,6 +2708,16 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
+
+ bfqd->peak_rate += rate;
++
++ /*
++ * For a very slow device, bfqd->peak_rate can reach 0 (see
++ * the minimum representable values reported in the comments
++ * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid
++ * divisions by zero where bfqd->peak_rate is used as a
++ * divisor.
++ */
++ bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate);
++
+ update_thr_responsiveness_params(bfqd);
+ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
+
+diff --git a/block/bfq.h b/block/bfq.h
+index 4d2fe7f77af1..a25e76c906d9 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -498,7 +498,12 @@ struct bfq_data {
+ u32 last_rq_max_size;
+ /* time elapsed from first dispatch in current observ. interval (us) */
+ u64 delta_from_first;
+- /* current estimate of device peak rate */
++ /*
++ * Current estimate of the device peak rate, measured in
++ * [(sectors/usec) / 2^BFQ_RATE_SHIFT]. The left-shift by
++ * BFQ_RATE_SHIFT is performed to increase precision in
++ * fixed-point calculations.
++ */
+ u32 peak_rate;
+
+ /* maximum budget allotted to a bfq_queue before rescheduling */
diff --git a/sys-kernel/linux-image-redcore-lts/files/0002-Make-preemptible-kernel-default.patch b/sys-kernel/linux-image-redcore-lts/files/0002-Make-preemptible-kernel-default.patch
new file mode 100644
index 00000000..69abb373
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0002-Make-preemptible-kernel-default.patch
@@ -0,0 +1,733 @@
+From e8e37da685f7988182d7920a711e00dd2457af65 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Sat, 29 Oct 2016 11:20:37 +1100
+Subject: [PATCH 02/16] Make preemptible kernel default.
+
+Make full preempt default on all arches.
+---
+ arch/arc/configs/tb10x_defconfig | 2 +-
+ arch/arm/configs/bcm2835_defconfig | 2 +-
+ arch/arm/configs/imx_v6_v7_defconfig | 2 +-
+ arch/arm/configs/mps2_defconfig | 2 +-
+ arch/arm/configs/mxs_defconfig | 2 +-
+ arch/blackfin/configs/BF518F-EZBRD_defconfig | 2 +-
+ arch/blackfin/configs/BF526-EZBRD_defconfig | 2 +-
+ arch/blackfin/configs/BF527-EZKIT-V2_defconfig | 2 +-
+ arch/blackfin/configs/BF527-EZKIT_defconfig | 2 +-
+ arch/blackfin/configs/BF527-TLL6527M_defconfig | 2 +-
+ arch/blackfin/configs/BF533-EZKIT_defconfig | 2 +-
+ arch/blackfin/configs/BF533-STAMP_defconfig | 2 +-
+ arch/blackfin/configs/BF537-STAMP_defconfig | 2 +-
+ arch/blackfin/configs/BF538-EZKIT_defconfig | 2 +-
+ arch/blackfin/configs/BF548-EZKIT_defconfig | 2 +-
+ arch/blackfin/configs/BF561-ACVILON_defconfig | 2 +-
+ arch/blackfin/configs/BF561-EZKIT-SMP_defconfig | 2 +-
+ arch/blackfin/configs/BF561-EZKIT_defconfig | 2 +-
+ arch/blackfin/configs/BF609-EZKIT_defconfig | 2 +-
+ arch/blackfin/configs/BlackStamp_defconfig | 2 +-
+ arch/blackfin/configs/CM-BF527_defconfig | 2 +-
+ arch/blackfin/configs/PNAV-10_defconfig | 2 +-
+ arch/blackfin/configs/SRV1_defconfig | 2 +-
+ arch/blackfin/configs/TCM-BF518_defconfig | 2 +-
+ arch/mips/configs/fuloong2e_defconfig | 3 ++-
+ arch/mips/configs/gpr_defconfig | 3 ++-
+ arch/mips/configs/ip22_defconfig | 3 ++-
+ arch/mips/configs/ip28_defconfig | 3 ++-
+ arch/mips/configs/jazz_defconfig | 3 ++-
+ arch/mips/configs/mtx1_defconfig | 3 ++-
+ arch/mips/configs/nlm_xlr_defconfig | 2 +-
+ arch/mips/configs/pic32mzda_defconfig | 2 +-
+ arch/mips/configs/pistachio_defconfig | 2 +-
+ arch/mips/configs/pnx8335_stb225_defconfig | 2 +-
+ arch/mips/configs/rm200_defconfig | 3 ++-
+ arch/parisc/configs/712_defconfig | 2 +-
+ arch/parisc/configs/c3000_defconfig | 2 +-
+ arch/parisc/configs/default_defconfig | 2 +-
+ arch/powerpc/configs/c2k_defconfig | 2 +-
+ arch/powerpc/configs/ppc6xx_defconfig | 2 +-
+ arch/score/configs/spct6600_defconfig | 2 +-
+ arch/sh/configs/se7712_defconfig | 2 +-
+ arch/sh/configs/se7721_defconfig | 2 +-
+ arch/sh/configs/titan_defconfig | 2 +-
+ arch/sparc/configs/sparc64_defconfig | 2 +-
+ arch/tile/configs/tilegx_defconfig | 2 +-
+ arch/tile/configs/tilepro_defconfig | 2 +-
+ arch/x86/configs/i386_defconfig | 2 +-
+ arch/x86/configs/x86_64_defconfig | 2 +-
+ kernel/Kconfig.preempt | 7 ++++---
+ 50 files changed, 60 insertions(+), 52 deletions(-)
+
+diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
+index f30182549395..42910f628869 100644
+--- a/arch/arc/configs/tb10x_defconfig
++++ b/arch/arc/configs/tb10x_defconfig
+@@ -28,7 +28,7 @@ CONFIG_ARC_PLAT_TB10X=y
+ CONFIG_ARC_CACHE_LINE_SHIFT=5
+ CONFIG_HZ=250
+ CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_COMPACTION is not set
+ CONFIG_NET=y
+ CONFIG_PACKET=y
+diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
+index 43dab4890ad3..44a52166ca5e 100644
+--- a/arch/arm/configs/bcm2835_defconfig
++++ b/arch/arm/configs/bcm2835_defconfig
+@@ -29,7 +29,7 @@ CONFIG_MODULE_UNLOAD=y
+ CONFIG_ARCH_MULTI_V6=y
+ CONFIG_ARCH_BCM=y
+ CONFIG_ARCH_BCM2835=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_AEABI=y
+ CONFIG_KSM=y
+ CONFIG_CLEANCACHE=y
+diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
+index 32acac9ab81a..1482bb312987 100644
+--- a/arch/arm/configs/imx_v6_v7_defconfig
++++ b/arch/arm/configs/imx_v6_v7_defconfig
+@@ -47,7 +47,7 @@ CONFIG_PCI_MSI=y
+ CONFIG_PCI_IMX6=y
+ CONFIG_SMP=y
+ CONFIG_ARM_PSCI=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_AEABI=y
+ CONFIG_HIGHMEM=y
+ CONFIG_CMA=y
+diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig
+index 0bcdec7cc169..10ceaefa51e0 100644
+--- a/arch/arm/configs/mps2_defconfig
++++ b/arch/arm/configs/mps2_defconfig
+@@ -18,7 +18,7 @@ CONFIG_ARCH_MPS2=y
+ CONFIG_SET_MEM_PARAM=y
+ CONFIG_DRAM_BASE=0x21000000
+ CONFIG_DRAM_SIZE=0x1000000
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_ATAGS is not set
+ CONFIG_ZBOOT_ROM_TEXT=0x0
+ CONFIG_ZBOOT_ROM_BSS=0x0
+diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
+index e5822ab01b7d..3e77e02f678f 100644
+--- a/arch/arm/configs/mxs_defconfig
++++ b/arch/arm/configs/mxs_defconfig
+@@ -27,7 +27,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
+ # CONFIG_ARCH_MULTI_V7 is not set
+ CONFIG_ARCH_MXS=y
+ # CONFIG_ARM_THUMB is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_AEABI=y
+ CONFIG_NET=y
+ CONFIG_PACKET=y
+diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
+index 99c00d835f47..39b91dfa55b5 100644
+--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
++++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF518=y
+ CONFIG_IRQ_TIMER0=12
+ # CONFIG_CYCLES_CLOCKSOURCE is not set
+diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
+index e66ba31ef84d..675cadb3a0c4 100644
+--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
++++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF526=y
+ CONFIG_IRQ_TIMER0=12
+ CONFIG_BFIN526_EZBRD=y
+diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
+index 0207c588c19f..4c517c443af5 100644
+--- a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
++++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF527=y
+ CONFIG_BF_REV_0_2=y
+ CONFIG_BFIN527_EZKIT_V2=y
+diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
+index 99c131ba7d90..bf8df3e6cf02 100644
+--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF527=y
+ CONFIG_BF_REV_0_1=y
+ CONFIG_IRQ_USB_INT0=11
+diff --git a/arch/blackfin/configs/BF527-TLL6527M_defconfig b/arch/blackfin/configs/BF527-TLL6527M_defconfig
+index cdeb51856f26..0220b3b15c53 100644
+--- a/arch/blackfin/configs/BF527-TLL6527M_defconfig
++++ b/arch/blackfin/configs/BF527-TLL6527M_defconfig
+@@ -21,7 +21,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_LBDAF is not set
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF527=y
+ CONFIG_BF_REV_0_2=y
+ CONFIG_BFIN527_TLL6527M=y
+diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
+index ed7d2c096739..6023e3fd2c48 100644
+--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BFIN533_EZKIT=y
+ CONFIG_TIMER0=11
+ CONFIG_CLKIN_HZ=27000000
+diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
+index 0c241f4d28d7..f5cd0f18b711 100644
+--- a/arch/blackfin/configs/BF533-STAMP_defconfig
++++ b/arch/blackfin/configs/BF533-STAMP_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_TIMER0=11
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
+diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
+index e5360b30e39a..48085fde7f9e 100644
+--- a/arch/blackfin/configs/BF537-STAMP_defconfig
++++ b/arch/blackfin/configs/BF537-STAMP_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF537=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
+diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
+index 60f6fb86125c..12deeaaef3cb 100644
+--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
+@@ -21,7 +21,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF538=y
+ CONFIG_IRQ_TIMER0=12
+ CONFIG_IRQ_TIMER1=12
+diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
+index 38cb17d218d4..6a68ffc55b5a 100644
+--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF548_std=y
+ CONFIG_IRQ_TIMER0=11
+ # CONFIG_CYCLES_CLOCKSOURCE is not set
+diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
+index 78f6bc79f910..e9f3ba783a4e 100644
+--- a/arch/blackfin/configs/BF561-ACVILON_defconfig
++++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
+@@ -20,7 +20,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_LBDAF is not set
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF561=y
+ CONFIG_BF_REV_0_5=y
+ CONFIG_IRQ_TIMER0=10
+diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
+index fac8bb578249..89b75a6c3fab 100644
+--- a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
++++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF561=y
+ CONFIG_SMP=y
+ CONFIG_IRQ_TIMER0=10
+diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
+index 2a2e4d0cebc1..67b3d2f419ba 100644
+--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF561=y
+ CONFIG_IRQ_TIMER0=10
+ CONFIG_CLKIN_HZ=30000000
+diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
+index 3ce77f07208a..8cc75d4218fb 100644
+--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
+@@ -20,7 +20,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF609=y
+ CONFIG_PINT1_ASSIGN=0x01010000
+ CONFIG_PINT2_ASSIGN=0x07000101
+diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
+index f4a9200e1ab1..9faf0ec7007f 100644
+--- a/arch/blackfin/configs/BlackStamp_defconfig
++++ b/arch/blackfin/configs/BlackStamp_defconfig
+@@ -17,7 +17,7 @@ CONFIG_MODULE_UNLOAD=y
+ CONFIG_MODULE_FORCE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF532=y
+ CONFIG_BF_REV_0_5=y
+ CONFIG_BLACKSTAMP=y
+diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
+index 1902bb05d086..4a1ad4fd7bb2 100644
+--- a/arch/blackfin/configs/CM-BF527_defconfig
++++ b/arch/blackfin/configs/CM-BF527_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF527=y
+ CONFIG_BF_REV_0_1=y
+ CONFIG_IRQ_TIMER0=12
+diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
+index c7926812971c..9d787e28bbe8 100644
+--- a/arch/blackfin/configs/PNAV-10_defconfig
++++ b/arch/blackfin/configs/PNAV-10_defconfig
+@@ -15,7 +15,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF537=y
+ CONFIG_IRQ_TIMER0=12
+ CONFIG_PNAV10=y
+diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
+index 23fdc57d657a..225df32dc9a8 100644
+--- a/arch/blackfin/configs/SRV1_defconfig
++++ b/arch/blackfin/configs/SRV1_defconfig
+@@ -13,7 +13,7 @@ CONFIG_MMAP_ALLOW_UNINITIALIZED=y
+ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF537=y
+ CONFIG_IRQ_TIMER0=12
+ CONFIG_BOOT_LOAD=0x400000
+diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig
+index e28959479fe0..425c24e43c34 100644
+--- a/arch/blackfin/configs/TCM-BF518_defconfig
++++ b/arch/blackfin/configs/TCM-BF518_defconfig
+@@ -23,7 +23,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF518=y
+ CONFIG_BF_REV_0_1=y
+ CONFIG_BFIN518F_TCM=y
+diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
+index 499f51498ecb..f7cb39b0662c 100644
+--- a/arch/mips/configs/fuloong2e_defconfig
++++ b/arch/mips/configs/fuloong2e_defconfig
+@@ -2,7 +2,8 @@ CONFIG_MACH_LOONGSON64=y
+ CONFIG_64BIT=y
+ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_LOCALVERSION="-fuloong2e"
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_SYSVIPC=y
+diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
+index 55438fc9991e..db03ef4f737d 100644
+--- a/arch/mips/configs/gpr_defconfig
++++ b/arch/mips/configs/gpr_defconfig
+@@ -1,7 +1,8 @@
+ CONFIG_MIPS_ALCHEMY=y
+ CONFIG_MIPS_GPR=y
+ CONFIG_HIGH_RES_TIMERS=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
+index 83e8fe2064aa..93e7b167433b 100644
+--- a/arch/mips/configs/ip22_defconfig
++++ b/arch/mips/configs/ip22_defconfig
+@@ -3,7 +3,8 @@ CONFIG_CPU_R5000=y
+ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_HZ_1000=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_IKCONFIG=y
+ CONFIG_IKCONFIG_PROC=y
+diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
+index d0a4c2cfacf8..6f0600e99c25 100644
+--- a/arch/mips/configs/ip28_defconfig
++++ b/arch/mips/configs/ip28_defconfig
+@@ -1,6 +1,7 @@
+ CONFIG_SGI_IP28=y
+ CONFIG_ARC_CONSOLE=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_IKCONFIG=y
+ CONFIG_IKCONFIG_PROC=y
+diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
+index 9ad1c94376c8..1d62ce7ff5dc 100644
+--- a/arch/mips/configs/jazz_defconfig
++++ b/arch/mips/configs/jazz_defconfig
+@@ -1,6 +1,7 @@
+ CONFIG_MACH_JAZZ=y
+ CONFIG_OLIVETTI_M700=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+ CONFIG_BSD_PROCESS_ACCT=y
+diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
+index c3d0d0a6e044..aa3426d5f7d7 100644
+--- a/arch/mips/configs/mtx1_defconfig
++++ b/arch/mips/configs/mtx1_defconfig
+@@ -1,6 +1,7 @@
+ CONFIG_MIPS_ALCHEMY=y
+ CONFIG_MIPS_MTX1=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
+index 1e18fd7de209..b514e91e5426 100644
+--- a/arch/mips/configs/nlm_xlr_defconfig
++++ b/arch/mips/configs/nlm_xlr_defconfig
+@@ -5,7 +5,7 @@ CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+ CONFIG_SMP=y
+ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_KEXEC=y
+ CONFIG_CROSS_COMPILE=""
+ # CONFIG_LOCALVERSION_AUTO is not set
+diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig
+index 52192c632ae8..96b087498dab 100644
+--- a/arch/mips/configs/pic32mzda_defconfig
++++ b/arch/mips/configs/pic32mzda_defconfig
+@@ -1,7 +1,7 @@
+ CONFIG_MACH_PIC32=y
+ CONFIG_DTB_PIC32_MZDA_SK=y
+ CONFIG_HZ_100=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_SECCOMP is not set
+ CONFIG_SYSVIPC=y
+ CONFIG_NO_HZ=y
+diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig
+index b22a3cf149b6..cfffca3d37f4 100644
+--- a/arch/mips/configs/pistachio_defconfig
++++ b/arch/mips/configs/pistachio_defconfig
+@@ -5,7 +5,7 @@ CONFIG_MIPS_CPS=y
+ CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+ CONFIG_ZSMALLOC=y
+ CONFIG_NR_CPUS=4
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_DEFAULT_HOSTNAME="localhost"
+ CONFIG_SYSVIPC=y
+diff --git a/arch/mips/configs/pnx8335_stb225_defconfig b/arch/mips/configs/pnx8335_stb225_defconfig
+index 81b5eb89446c..19f8cea849a1 100644
+--- a/arch/mips/configs/pnx8335_stb225_defconfig
++++ b/arch/mips/configs/pnx8335_stb225_defconfig
+@@ -3,7 +3,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
+ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_HZ_128=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_SECCOMP is not set
+ # CONFIG_LOCALVERSION_AUTO is not set
+ # CONFIG_SWAP is not set
+diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
+index 99679e514042..2ced507a8ba7 100644
+--- a/arch/mips/configs/rm200_defconfig
++++ b/arch/mips/configs/rm200_defconfig
+@@ -2,7 +2,8 @@ CONFIG_SNI_RM=y
+ CONFIG_CPU_LITTLE_ENDIAN=y
+ CONFIG_ARC_CONSOLE=y
+ CONFIG_HZ_1000=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+ CONFIG_BSD_PROCESS_ACCT=y
+diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
+index ccc109761f44..a6a5b0b7a9c9 100644
+--- a/arch/parisc/configs/712_defconfig
++++ b/arch/parisc/configs/712_defconfig
+@@ -13,7 +13,7 @@ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ CONFIG_MODULE_FORCE_UNLOAD=y
+ CONFIG_PA7100LC=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_GSC_LASI=y
+ # CONFIG_PDC_CHASSIS is not set
+ CONFIG_BINFMT_MISC=m
+diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
+index 8d41a73bd71b..b8e0a6662ff9 100644
+--- a/arch/parisc/configs/c3000_defconfig
++++ b/arch/parisc/configs/c3000_defconfig
+@@ -13,7 +13,7 @@ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ CONFIG_MODULE_FORCE_UNLOAD=y
+ CONFIG_PA8X00=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_GSC is not set
+ CONFIG_PCI=y
+ CONFIG_PCI_LBA=y
+diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig
+index 52c9050a7c5c..8d86d2e989f4 100644
+--- a/arch/parisc/configs/default_defconfig
++++ b/arch/parisc/configs/default_defconfig
+@@ -14,7 +14,7 @@ CONFIG_MODULE_UNLOAD=y
+ CONFIG_MODULE_FORCE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ CONFIG_PA7100LC=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_IOMMU_CCIO=y
+ CONFIG_GSC_LASI=y
+ CONFIG_GSC_WAX=y
+diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
+index f1552af9eecc..f8505e6ec7b3 100644
+--- a/arch/powerpc/configs/c2k_defconfig
++++ b/arch/powerpc/configs/c2k_defconfig
+@@ -29,7 +29,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+ CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+ CONFIG_GEN_RTC=y
+ CONFIG_HIGHMEM=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BINFMT_MISC=y
+ CONFIG_PM=y
+ CONFIG_PCI_MSI=y
+diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
+index da0e8d535eb8..c016af41ab4f 100644
+--- a/arch/powerpc/configs/ppc6xx_defconfig
++++ b/arch/powerpc/configs/ppc6xx_defconfig
+@@ -74,7 +74,7 @@ CONFIG_QE_GPIO=y
+ CONFIG_MCU_MPC8349EMITX=y
+ CONFIG_HIGHMEM=y
+ CONFIG_HZ_1000=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BINFMT_MISC=y
+ CONFIG_HIBERNATION=y
+ CONFIG_PM_DEBUG=y
+diff --git a/arch/score/configs/spct6600_defconfig b/arch/score/configs/spct6600_defconfig
+index b2d8802f43b4..46434ca1fa10 100644
+--- a/arch/score/configs/spct6600_defconfig
++++ b/arch/score/configs/spct6600_defconfig
+@@ -1,5 +1,5 @@
+ CONFIG_HZ_100=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_EXPERIMENTAL=y
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_SYSVIPC=y
+diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
+index 5a1097641247..eb5fbf554e7f 100644
+--- a/arch/sh/configs/se7712_defconfig
++++ b/arch/sh/configs/se7712_defconfig
+@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y
+ CONFIG_SH_SOLUTION_ENGINE=y
+ CONFIG_SH_PCLK_FREQ=66666666
+ CONFIG_HEARTBEAT=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_CMDLINE_OVERWRITE=y
+ CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
+ CONFIG_NET=y
+diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
+index 9c0ef13bee10..cbaa65c8bf9e 100644
+--- a/arch/sh/configs/se7721_defconfig
++++ b/arch/sh/configs/se7721_defconfig
+@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y
+ CONFIG_SH_7721_SOLUTION_ENGINE=y
+ CONFIG_SH_PCLK_FREQ=33333333
+ CONFIG_HEARTBEAT=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_CMDLINE_OVERWRITE=y
+ CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda2"
+ CONFIG_NET=y
+diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
+index ceb48e9b70f4..1a69eda6610c 100644
+--- a/arch/sh/configs/titan_defconfig
++++ b/arch/sh/configs/titan_defconfig
+@@ -20,7 +20,7 @@ CONFIG_SH_TITAN=y
+ CONFIG_SH_PCLK_FREQ=30000000
+ CONFIG_SH_DMA=y
+ CONFIG_SH_DMA_API=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_CMDLINE_OVERWRITE=y
+ CONFIG_CMDLINE="console=ttySC1,38400N81 root=/dev/nfs ip=:::::eth1:autoconf rw"
+ CONFIG_PCI=y
+diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
+index 4d4e1cc6402f..04bea1d28ba7 100644
+--- a/arch/sparc/configs/sparc64_defconfig
++++ b/arch/sparc/configs/sparc64_defconfig
+@@ -22,7 +22,7 @@ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_NUMA=y
+ CONFIG_DEFAULT_MMAP_MIN_ADDR=8192
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_SUN_LDOMS=y
+ CONFIG_PCI=y
+ CONFIG_PCI_MSI=y
+diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
+index 9f94435cc44f..aa78ee6cd5eb 100644
+--- a/arch/tile/configs/tilegx_defconfig
++++ b/arch/tile/configs/tilegx_defconfig
+@@ -47,7 +47,7 @@ CONFIG_CFQ_GROUP_IOSCHED=y
+ CONFIG_NR_CPUS=100
+ CONFIG_HZ_100=y
+ # CONFIG_COMPACTION is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_TILE_PCI_IO=y
+ CONFIG_PCI_DEBUG=y
+ # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
+index 1c5bd4f8ffca..38005862062c 100644
+--- a/arch/tile/configs/tilepro_defconfig
++++ b/arch/tile/configs/tilepro_defconfig
+@@ -44,7 +44,7 @@ CONFIG_KARMA_PARTITION=y
+ CONFIG_CFQ_GROUP_IOSCHED=y
+ CONFIG_HZ_100=y
+ # CONFIG_COMPACTION is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_PCI_DEBUG=y
+ # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+ CONFIG_BINFMT_MISC=y
+diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
+index 0eb9f92f3717..e5890ae917e5 100644
+--- a/arch/x86/configs/i386_defconfig
++++ b/arch/x86/configs/i386_defconfig
+@@ -41,7 +41,7 @@ CONFIG_SMP=y
+ CONFIG_X86_GENERIC=y
+ CONFIG_HPET_TIMER=y
+ CONFIG_SCHED_SMT=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+ CONFIG_X86_MCE=y
+ CONFIG_X86_REBOOTFIXUPS=y
+diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
+index 4a4b16e56d35..7452dcadda74 100644
+--- a/arch/x86/configs/x86_64_defconfig
++++ b/arch/x86/configs/x86_64_defconfig
+@@ -40,7 +40,7 @@ CONFIG_SMP=y
+ CONFIG_CALGARY_IOMMU=y
+ CONFIG_NR_CPUS=64
+ CONFIG_SCHED_SMT=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+ CONFIG_X86_MCE=y
+ CONFIG_MICROCODE=y
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index 3f9c97419f02..1dc79ec7ad09 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -1,7 +1,7 @@
+
+ choice
+ prompt "Preemption Model"
+- default PREEMPT_NONE
++ default PREEMPT
+
+ config PREEMPT_NONE
+ bool "No Forced Preemption (Server)"
+@@ -17,7 +17,7 @@ config PREEMPT_NONE
+ latencies.
+
+ config PREEMPT_VOLUNTARY
+- bool "Voluntary Kernel Preemption (Desktop)"
++ bool "Voluntary Kernel Preemption (Nothing)"
+ help
+ This option reduces the latency of the kernel by adding more
+ "explicit preemption points" to the kernel code. These new
+@@ -31,7 +31,8 @@ config PREEMPT_VOLUNTARY
+ applications to run more 'smoothly' even when the system is
+ under load.
+
+- Select this if you are building a kernel for a desktop system.
++ Select this for no system in particular (choose Preemptible
++ instead on a desktop if you know what's good for you).
+
+ config PREEMPT
+ bool "Preemptible Kernel (Low-Latency Desktop)"
+--
+2.11.0
+
diff --git a/sys-kernel/linux-image-redcore-lts/files/0003-Expose-vmsplit-for-our-poor-32-bit-users.patch b/sys-kernel/linux-image-redcore-lts/files/0003-Expose-vmsplit-for-our-poor-32-bit-users.patch
new file mode 100644
index 00000000..b7897dbe
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0003-Expose-vmsplit-for-our-poor-32-bit-users.patch
@@ -0,0 +1,48 @@
+From 44fc740a3ff85d378c28a416a076cc7e019d7b8c Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Fri, 12 May 2017 13:07:37 +1000
+Subject: [PATCH 03/16] Expose vmsplit for our poor 32 bit users.
+
+---
+ arch/x86/Kconfig | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index e06a7b4e1dc4..931aba4fc567 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1361,7 +1361,7 @@ config HIGHMEM64G
+ endchoice
+
+ choice
+- prompt "Memory split" if EXPERT
++ prompt "Memory split"
+ default VMSPLIT_3G
+ depends on X86_32
+ ---help---
+@@ -1381,17 +1381,17 @@ choice
+ option alone!
+
+ config VMSPLIT_3G
+- bool "3G/1G user/kernel split"
++ bool "Default 896MB lowmem (3G/1G user/kernel split)"
+ config VMSPLIT_3G_OPT
+ depends on !X86_PAE
+- bool "3G/1G user/kernel split (for full 1G low memory)"
++ bool "1GB lowmem (3G/1G user/kernel split)"
+ config VMSPLIT_2G
+- bool "2G/2G user/kernel split"
++ bool "2GB lowmem (2G/2G user/kernel split)"
+ config VMSPLIT_2G_OPT
+ depends on !X86_PAE
+- bool "2G/2G user/kernel split (for full 2G low memory)"
++ bool "2GB lowmem (2G/2G user/kernel split)"
+ config VMSPLIT_1G
+- bool "1G/3G user/kernel split"
++ bool "3GB lowmem (1G/3G user/kernel split)"
+ endchoice
+
+ config PAGE_OFFSET
+--
+2.11.0
+
diff --git a/sys-kernel/linux-image-redcore-lts/files/0004-Create-highres-timeout-variants-of-schedule_timeout-.patch b/sys-kernel/linux-image-redcore-lts/files/0004-Create-highres-timeout-variants-of-schedule_timeout-.patch
new file mode 100644
index 00000000..3c182fbe
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0004-Create-highres-timeout-variants-of-schedule_timeout-.patch
@@ -0,0 +1,153 @@
+From d27b58b0707ac311be5a51594fc6f22ed1d109e5 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Sat, 12 Aug 2017 11:53:39 +1000
+Subject: [PATCH 04/16] Create highres timeout variants of schedule_timeout
+ functions.
+
+---
+ include/linux/freezer.h | 1 +
+ include/linux/sched.h | 31 +++++++++++++++++++--
+ kernel/time/hrtimer.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 101 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index 3995df1d068f..f8645e8f2444 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -297,6 +297,7 @@ static inline void set_freezable(void) {}
+ #define wait_event_freezekillable_unsafe(wq, condition) \
+ wait_event_killable(wq, condition)
+
++#define pm_freezing (false)
+ #endif /* !CONFIG_FREEZER */
+
+ #endif /* FREEZER_H_INCLUDED */
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 35dc91a0e2ed..38852ebfa864 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -173,13 +173,40 @@ extern cpumask_var_t cpu_isolated_map;
+
+ extern void scheduler_tick(void);
+
+-#define MAX_SCHEDULE_TIMEOUT LONG_MAX
+-
++#define MAX_SCHEDULE_TIMEOUT LONG_MAX
+ extern long schedule_timeout(long timeout);
+ extern long schedule_timeout_interruptible(long timeout);
+ extern long schedule_timeout_killable(long timeout);
+ extern long schedule_timeout_uninterruptible(long timeout);
+ extern long schedule_timeout_idle(long timeout);
++
++#ifdef CONFIG_HIGH_RES_TIMERS
++extern long schedule_msec_hrtimeout(long timeout);
++extern long schedule_min_hrtimeout(void);
++extern long schedule_msec_hrtimeout_interruptible(long timeout);
++extern long schedule_msec_hrtimeout_uninterruptible(long timeout);
++#else
++static inline long schedule_msec_hrtimeout(long timeout)
++{
++ return schedule_timeout(msecs_to_jiffies(timeout));
++}
++
++static inline long schedule_min_hrtimeout(void)
++{
++ return schedule_timeout(1);
++}
++
++static inline long schedule_msec_hrtimeout_interruptible(long timeout)
++{
++ return schedule_timeout_interruptible(msecs_to_jiffies(timeout));
++}
++
++static inline long schedule_msec_hrtimeout_uninterruptible(long timeout)
++{
++ return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout));
++}
++#endif
++
+ asmlinkage void schedule(void);
+ extern void schedule_preempt_disabled(void);
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 88f75f92ef36..13227cf2814c 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1787,3 +1787,74 @@ int __sched schedule_hrtimeout(ktime_t *expires,
+ return schedule_hrtimeout_range(expires, 0, mode);
+ }
+ EXPORT_SYMBOL_GPL(schedule_hrtimeout);
++
++/*
++ * As per schedule_hrtimeout but taskes a millisecond value and returns how
++ * many milliseconds are left.
++ */
++long __sched schedule_msec_hrtimeout(long timeout)
++{
++ struct hrtimer_sleeper t;
++ int delta, secs, jiffs;
++ ktime_t expires;
++
++ if (!timeout) {
++ __set_current_state(TASK_RUNNING);
++ return 0;
++ }
++
++ jiffs = msecs_to_jiffies(timeout);
++ /*
++ * If regular timer resolution is adequate or hrtimer resolution is not
++ * (yet) better than Hz, as would occur during startup, use regular
++ * timers.
++ */
++ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ)
++ return schedule_timeout(jiffs);
++
++ secs = timeout / 1000;
++ delta = (timeout % 1000) * NSEC_PER_MSEC;
++ expires = ktime_set(secs, delta);
++
++ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
++
++ hrtimer_init_sleeper(&t, current);
++
++ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL);
++
++ if (likely(t.task))
++ schedule();
++
++ hrtimer_cancel(&t.timer);
++ destroy_hrtimer_on_stack(&t.timer);
++
++ __set_current_state(TASK_RUNNING);
++
++ expires = hrtimer_expires_remaining(&t.timer);
++ timeout = ktime_to_ms(expires);
++ return timeout < 0 ? 0 : timeout;
++}
++
++EXPORT_SYMBOL(schedule_msec_hrtimeout);
++
++long __sched schedule_min_hrtimeout(void)
++{
++ return schedule_msec_hrtimeout(1);
++}
++
++EXPORT_SYMBOL(schedule_min_hrtimeout);
++
++long __sched schedule_msec_hrtimeout_interruptible(long timeout)
++{
++ __set_current_state(TASK_INTERRUPTIBLE);
++ return schedule_msec_hrtimeout(timeout);
++}
++EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible);
++
++long __sched schedule_msec_hrtimeout_uninterruptible(long timeout)
++{
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ return schedule_msec_hrtimeout(timeout);
++}
++EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible);
+--
+2.11.0
+
diff --git a/sys-kernel/linux-image-redcore-lts/files/0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch b/sys-kernel/linux-image-redcore-lts/files/0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch
new file mode 100644
index 00000000..3c889719
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch
@@ -0,0 +1,50 @@
+From 5da7d1778b96c514394334c92de9b3d8d71f4a29 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Sat, 5 Nov 2016 09:27:36 +1100
+Subject: [PATCH 05/16] Special case calls of schedule_timeout(1) to use the
+ min hrtimeout of 1ms, working around low Hz resolutions.
+
+---
+ kernel/time/timer.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 9c18e16059a3..dd4d1b193286 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1741,6 +1741,19 @@ signed long __sched schedule_timeout(signed long timeout)
+
+ expire = timeout + jiffies;
+
++#ifdef CONFIG_HIGH_RES_TIMERS
++ if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
++ /*
++ * Special case 1 as being a request for the minimum timeout
++ * and use highres timers to timeout after 1ms to workaround
++ * the granularity of low Hz tick timers.
++ */
++ if (!schedule_min_hrtimeout())
++ return 0;
++ goto out_timeout;
++ }
++#endif
++
+ setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
+ __mod_timer(&timer, expire, false);
+ schedule();
+@@ -1748,10 +1761,10 @@ signed long __sched schedule_timeout(signed long timeout)
+
+ /* Remove the timer from the object tracker */
+ destroy_timer_on_stack(&timer);
+-
++out_timeout:
+ timeout = expire - jiffies;
+
+- out:
++out:
+ return timeout < 0 ? 0 : timeout;
+ }
+ EXPORT_SYMBOL(schedule_timeout);
+--
+2.11.0
+
diff --git a/sys-kernel/linux-image-redcore-lts/files/0006-Convert-msleep-to-use-hrtimers-when-active.patch b/sys-kernel/linux-image-redcore-lts/files/0006-Convert-msleep-to-use-hrtimers-when-active.patch
new file mode 100644
index 00000000..2f065652
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0006-Convert-msleep-to-use-hrtimers-when-active.patch
@@ -0,0 +1,54 @@
+From 9df803c28bb8ccb2588c0ccaf857b9e673175fed Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Fri, 4 Nov 2016 09:25:54 +1100
+Subject: [PATCH 06/16] Convert msleep to use hrtimers when active.
+
+---
+ kernel/time/timer.c | 24 ++++++++++++++++++++++--
+ 1 file changed, 22 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index dd4d1b193286..c68cb9307f64 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1884,7 +1884,19 @@ void __init init_timers(void)
+ */
+ void msleep(unsigned int msecs)
+ {
+- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
++ int jiffs = msecs_to_jiffies(msecs);
++ unsigned long timeout;
++
++ /*
++ * Use high resolution timers where the resolution of tick based
++ * timers is inadequate.
++ */
++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
++ while (msecs)
++ msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
++ return;
++ }
++ timeout = msecs_to_jiffies(msecs) + 1;
+
+ while (timeout)
+ timeout = schedule_timeout_uninterruptible(timeout);
+@@ -1898,7 +1910,15 @@ EXPORT_SYMBOL(msleep);
+ */
+ unsigned long msleep_interruptible(unsigned int msecs)
+ {
+- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
++ int jiffs = msecs_to_jiffies(msecs);
++ unsigned long timeout;
++
++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
++ while (msecs && !signal_pending(current))
++ msecs = schedule_msec_hrtimeout_interruptible(msecs);
++ return msecs;
++ }
++ timeout = msecs_to_jiffies(msecs) + 1;
+
+ while (timeout && !signal_pending(current))
+ timeout = schedule_timeout_interruptible(timeout);
+--
+2.11.0
+
diff --git a/sys-kernel/linux-image-redcore-lts/files/0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch b/sys-kernel/linux-image-redcore-lts/files/0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
new file mode 100644
index 00000000..ff071da8
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
@@ -0,0 +1,529 @@
+diff -Nur a/drivers/block/swim.c b/drivers/block/swim.c
+--- a/drivers/block/swim.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/block/swim.c 2018-11-03 16:30:39.471807304 +0000
+@@ -332,7 +332,7 @@
+ if (swim_readbit(base, MOTOR_ON))
+ break;
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+ } else if (action == OFF) {
+ swim_action(base, MOTOR_OFF);
+@@ -351,7 +351,7 @@
+ if (!swim_readbit(base, DISK_IN))
+ break;
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+ swim_select(base, RELAX);
+ }
+@@ -375,7 +375,7 @@
+ for (wait = 0; wait < HZ; wait++) {
+
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+
+ swim_select(base, RELAX);
+ if (!swim_readbit(base, STEP))
+diff -Nur a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+--- a/drivers/bluetooth/hci_qca.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/bluetooth/hci_qca.c 2018-11-03 16:31:56.065260061 +0000
+@@ -880,7 +880,7 @@
+ * then host can communicate with new baudrate to controller
+ */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
++ schedule_msec_hrtimeout((BAUDRATE_SETTLE_TIMEOUT_MS));
+ set_current_state(TASK_RUNNING);
+
+ return 0;
+diff -Nur a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+--- a/drivers/char/ipmi/ipmi_msghandler.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/char/ipmi/ipmi_msghandler.c 2018-11-03 16:30:39.473807368 +0000
+@@ -2953,7 +2953,7 @@
+ /* Current message first, to preserve order */
+ while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
+ /* Wait for the message to clear out. */
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+
+ /* No need for locks, the interface is down. */
+diff -Nur a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+--- a/drivers/char/ipmi/ipmi_ssif.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/char/ipmi/ipmi_ssif.c 2018-11-03 16:30:39.473807368 +0000
+@@ -1200,7 +1200,7 @@
+
+ /* make sure the driver is not looking for flags any more. */
+ while (ssif_info->ssif_state != SSIF_NORMAL)
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+
+ ssif_info->stopping = true;
+ del_timer_sync(&ssif_info->retry_timer);
+diff -Nur a/drivers/char/snsc.c b/drivers/char/snsc.c
+--- a/drivers/char/snsc.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/char/snsc.c 2018-11-03 16:30:39.474807400 +0000
+@@ -198,7 +198,7 @@
+ add_wait_queue(&sd->sd_rq, &wait);
+ spin_unlock_irqrestore(&sd->sd_rlock, flags);
+
+- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
++ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
+
+ remove_wait_queue(&sd->sd_rq, &wait);
+ if (signal_pending(current)) {
+@@ -294,7 +294,7 @@
+ add_wait_queue(&sd->sd_wq, &wait);
+ spin_unlock_irqrestore(&sd->sd_wlock, flags);
+
+- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
++ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
+
+ remove_wait_queue(&sd->sd_wq, &wait);
+ if (signal_pending(current)) {
+diff -Nur a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2018-11-03 16:30:39.474807400 +0000
+@@ -235,7 +235,7 @@
+ DRM_ERROR("SVGA device lockup.\n");
+ break;
+ }
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+diff -Nur a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2018-11-03 16:30:39.474807400 +0000
+@@ -202,7 +202,7 @@
+ break;
+ }
+ if (lazy)
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ else if ((++count & 0x0F) == 0) {
+ /**
+ * FIXME: Use schedule_hr_timeout here for
+diff -Nur a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
+--- a/drivers/media/pci/ivtv/ivtv-ioctl.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/media/pci/ivtv/ivtv-ioctl.c 2018-11-03 16:30:39.475807432 +0000
+@@ -1154,7 +1154,7 @@
+ TASK_UNINTERRUPTIBLE);
+ if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
+ break;
+- schedule_timeout(msecs_to_jiffies(25));
++ schedule_msec_hrtimeout((25));
+ }
+ finish_wait(&itv->vsync_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
+diff -Nur a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
+--- a/drivers/media/pci/ivtv/ivtv-streams.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/media/pci/ivtv/ivtv-streams.c 2018-11-03 16:30:39.475807432 +0000
+@@ -834,7 +834,7 @@
+ while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
+ time_before(jiffies,
+ then + msecs_to_jiffies(2000))) {
+- schedule_timeout(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout((10));
+ }
+
+ /* To convert jiffies to ms, we must multiply by 1000
+diff -Nur a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
+--- a/drivers/mfd/ucb1x00-core.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/mfd/ucb1x00-core.c 2018-11-03 16:30:39.476807464 +0000
+@@ -253,7 +253,7 @@
+ break;
+ /* yield to other processes */
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+
+ return UCB_ADC_DAT(val);
+diff -Nur a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
+--- a/drivers/misc/sgi-xp/xpc_channel.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/misc/sgi-xp/xpc_channel.c 2018-11-03 16:30:39.476807464 +0000
+@@ -837,7 +837,7 @@
+
+ atomic_inc(&ch->n_on_msg_allocate_wq);
+ prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
+- ret = schedule_timeout(1);
++ ret = schedule_min_hrtimeout();
+ finish_wait(&ch->msg_allocate_wq, &wait);
+ atomic_dec(&ch->n_on_msg_allocate_wq);
+
+diff -Nur a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
+--- a/drivers/net/caif/caif_hsi.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/net/caif/caif_hsi.c 2018-11-03 16:30:39.477807497 +0000
+@@ -940,7 +940,7 @@
+ break;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ retry--;
+ }
+
+diff -Nur a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
+--- a/drivers/net/can/usb/peak_usb/pcan_usb.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/net/can/usb/peak_usb/pcan_usb.c 2018-11-03 16:30:39.477807497 +0000
+@@ -250,7 +250,7 @@
+ } else {
+ /* the PCAN-USB needs time to init */
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
++ schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
+ }
+
+ return err;
+diff -Nur a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+--- a/drivers/net/usb/lan78xx.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/net/usb/lan78xx.c 2018-11-03 16:30:39.478807529 +0000
+@@ -2567,7 +2567,7 @@
+ while (!skb_queue_empty(&dev->rxq) &&
+ !skb_queue_empty(&dev->txq) &&
+ !skb_queue_empty(&dev->done)) {
+- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ netif_dbg(dev, ifdown, dev->net,
+ "waited for %d urb completions\n", temp);
+diff -Nur a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+--- a/drivers/net/usb/usbnet.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/net/usb/usbnet.c 2018-11-03 16:30:39.479807561 +0000
+@@ -772,7 +772,7 @@
+ spin_lock_irqsave(&q->lock, flags);
+ while (!skb_queue_empty(q)) {
+ spin_unlock_irqrestore(&q->lock, flags);
+- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_lock_irqsave(&q->lock, flags);
+ }
+diff -Nur a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
+--- a/drivers/ntb/test/ntb_perf.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/ntb/test/ntb_perf.c 2018-11-03 16:30:39.479807561 +0000
+@@ -310,7 +310,7 @@
+ if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
+ last_sleep = jiffies;
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+
+ if (unlikely(kthread_should_stop()))
+diff -Nur a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
+--- a/drivers/scsi/fnic/fnic_scsi.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/scsi/fnic/fnic_scsi.c 2018-11-03 16:30:39.480807592 +0000
+@@ -217,7 +217,7 @@
+
+ /* wait for io cmpl */
+ while (atomic_read(&fnic->in_flight))
+- schedule_timeout(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout((1));
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+@@ -2255,7 +2255,7 @@
+ }
+ }
+
+- schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
++ schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
+
+ /* walk again to check, if IOs are still pending in fw */
+ if (fnic_is_abts_pending(fnic, lr_sc))
+diff -Nur a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
+--- a/drivers/scsi/snic/snic_scsi.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/scsi/snic/snic_scsi.c 2018-11-03 16:30:39.481807625 +0000
+@@ -2354,7 +2354,7 @@
+
+ /* Wait for all the IOs that are entered in Qcmd */
+ while (atomic_read(&snic->ios_inflight))
+- schedule_timeout(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout((1));
+
+ ret = snic_issue_hba_reset(snic, sc);
+ if (ret) {
+diff -Nur a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c 2018-11-03 16:30:39.483807688 +0000
+@@ -4657,7 +4657,7 @@
+ if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
+ break;
+ set_current_state(TASK_INTERRUPTIBLE);
+- if (schedule_timeout(1))
++ if (schedule_min_hrtimeout())
+ return -EIO;
+ }
+ if (i == timeout) {
+diff -Nur a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
+--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c 2018-11-03 16:30:39.483807688 +0000
+@@ -329,7 +329,7 @@
+ schedule();
+ } else {
+ now = jiffies;
+- schedule_timeout(msecs_to_jiffies(tms));
++ schedule_msec_hrtimeout((tms));
+ tms -= jiffies_to_msecs(jiffies - now);
+ if (tms < 0) /* no more wait but may have new event */
+ tms = 0;
+diff -Nur a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
+--- a/drivers/staging/rts5208/rtsx.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/rts5208/rtsx.c 2018-11-03 16:30:39.483807688 +0000
+@@ -524,7 +524,7 @@
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
++ schedule_msec_hrtimeout((POLLING_INTERVAL));
+
+ /* lock the device pointers */
+ mutex_lock(&dev->dev_mutex);
+diff -Nur a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
+--- a/drivers/staging/speakup/speakup_acntpc.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_acntpc.c 2018-11-03 16:30:39.484807721 +0000
+@@ -206,7 +206,7 @@
+ full_time_val = full_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout((full_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -234,7 +234,7 @@
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ jiff_max = jiffies + jiffy_delta_val;
+ }
+ }
+diff -Nur a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
+--- a/drivers/staging/speakup/speakup_apollo.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_apollo.c 2018-11-03 16:30:39.484807721 +0000
+@@ -174,7 +174,7 @@
+ if (!synth->io_ops->synth_out(synth, ch)) {
+ synth->io_ops->tiocmset(0, UART_MCR_RTS);
+ synth->io_ops->tiocmset(UART_MCR_RTS, 0);
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout(full_time_val);
+ continue;
+ }
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
+diff -Nur a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
+--- a/drivers/staging/speakup/speakup_decext.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_decext.c 2018-11-03 16:30:39.484807721 +0000
+@@ -185,7 +185,7 @@
+ if (ch == '\n')
+ ch = 0x0D;
+ if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff -Nur a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
+--- a/drivers/staging/speakup/speakup_decpc.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_decpc.c 2018-11-03 16:30:39.484807721 +0000
+@@ -403,7 +403,7 @@
+ if (ch == '\n')
+ ch = 0x0D;
+ if (dt_sendchar(ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff -Nur a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
+--- a/drivers/staging/speakup/speakup_dectlk.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_dectlk.c 2018-11-03 16:30:39.485807753 +0000
+@@ -253,7 +253,7 @@
+ if (ch == '\n')
+ ch = 0x0D;
+ if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff -Nur a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
+--- a/drivers/staging/speakup/speakup_dtlk.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_dtlk.c 2018-11-03 16:30:39.485807753 +0000
+@@ -220,7 +220,7 @@
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -236,7 +236,7 @@
+ delay_time_val = delay_time->u.n.value;
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ jiff_max = jiffies + jiffy_delta_val;
+ }
+ }
+diff -Nur a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
+--- a/drivers/staging/speakup/speakup_keypc.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_keypc.c 2018-11-03 16:30:39.485807753 +0000
+@@ -208,7 +208,7 @@
+ full_time_val = full_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout((full_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -241,7 +241,7 @@
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ jiff_max = jiffies+jiffy_delta_val;
+ }
+ }
+diff -Nur a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
+--- a/drivers/staging/speakup/synth.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/synth.c 2018-11-03 16:30:39.486807785 +0000
+@@ -92,7 +92,7 @@
+ if (ch == '\n')
+ ch = synth->procspeech;
+ if (!synth->io_ops->synth_out(synth, ch)) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout(full_time_val);
+ continue;
+ }
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
+diff -Nur a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
+--- a/drivers/staging/unisys/visornic/visornic_main.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/unisys/visornic/visornic_main.c 2018-11-03 16:30:39.486807785 +0000
+@@ -556,7 +556,7 @@
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- wait += schedule_timeout(msecs_to_jiffies(10));
++ wait += schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ }
+
+@@ -567,7 +567,7 @@
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- schedule_timeout(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ if (atomic_read(&devdata->usage))
+ break;
+@@ -721,7 +721,7 @@
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- wait += schedule_timeout(msecs_to_jiffies(10));
++ wait += schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ }
+
+diff -Nur a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+--- a/drivers/target/target_core_user.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/target/target_core_user.c 2018-11-03 16:30:39.487807817 +0000
+@@ -808,10 +808,9 @@
+ pr_debug("sleeping for ring space\n");
+ mutex_unlock(&udev->cmdr_lock);
+ if (udev->cmd_time_out)
+- ret = schedule_timeout(
+- msecs_to_jiffies(udev->cmd_time_out));
++ ret = schedule_msec_hrtimeout(udev->cmd_time_out);
+ else
+- ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
++ ret = schedule_msec_hrtimeout(TCMU_TIME_OUT);
+ finish_wait(&udev->wait_cmdr, &__wait);
+ if (!ret) {
+ pr_warn("tcmu: command timed out\n");
+diff -Nur a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
+--- a/drivers/video/fbdev/omap/hwa742.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/video/fbdev/omap/hwa742.c 2018-11-03 16:30:39.487807817 +0000
+@@ -926,7 +926,7 @@
+ if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(5));
++ schedule_msec_hrtimeout((5));
+ }
+ hwa742_set_update_mode(hwa742.update_mode_before_suspend);
+ }
+diff -Nur a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
+--- a/drivers/video/fbdev/pxafb.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/video/fbdev/pxafb.c 2018-11-03 16:30:39.488807849 +0000
+@@ -1286,7 +1286,7 @@
+ mutex_unlock(&fbi->ctrlr_lock);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(30));
++ schedule_msec_hrtimeout((30));
+ }
+
+ pr_debug("%s(): task ending\n", __func__);
+diff -Nur a/fs/afs/vlocation.c b/fs/afs/vlocation.c
+--- a/fs/afs/vlocation.c 2018-10-10 07:54:28.000000000 +0100
++++ b/fs/afs/vlocation.c 2018-11-03 16:30:39.488807849 +0000
+@@ -129,7 +129,7 @@
+ if (vl->upd_busy_cnt > 1) {
+ /* second+ BUSY - sleep a little bit */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+ continue;
+ }
+diff -Nur a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+--- a/fs/btrfs/extent-tree.c 2018-10-10 07:54:28.000000000 +0100
++++ b/fs/btrfs/extent-tree.c 2018-11-03 16:30:39.491807945 +0000
+@@ -6106,7 +6106,7 @@
+
+ if (flush != BTRFS_RESERVE_NO_FLUSH &&
+ btrfs_transaction_in_commit(fs_info))
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+
+ if (delalloc_lock)
+ mutex_lock(&inode->delalloc_mutex);
+diff -Nur a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+--- a/fs/btrfs/inode-map.c 2018-10-10 07:54:28.000000000 +0100
++++ b/fs/btrfs/inode-map.c 2018-11-03 16:30:39.492807977 +0000
+@@ -89,7 +89,7 @@
+ btrfs_release_path(path);
+ root->ino_cache_progress = last;
+ up_read(&fs_info->commit_root_sem);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ goto again;
+ } else
+ continue;
+diff -Nur a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
+--- a/sound/usb/line6/pcm.c 2018-10-10 07:54:28.000000000 +0100
++++ b/sound/usb/line6/pcm.c 2018-11-03 16:30:39.492807977 +0000
+@@ -131,7 +131,7 @@
+ if (!alive)
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ } while (--timeout > 0);
+ if (alive)
+ dev_err(line6pcm->line6->ifcdev,
diff --git a/sys-kernel/linux-image-redcore-lts/files/0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch b/sys-kernel/linux-image-redcore-lts/files/0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch
new file mode 100644
index 00000000..f9f274ce
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch
@@ -0,0 +1,311 @@
+From 3ef5df78c2f425115b87f0f2f59fd189c0f1bbe3 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Mon, 20 Feb 2017 13:30:07 +1100
+Subject: [PATCH 08/16] Replace all calls to schedule_timeout_interruptible of
+ potentially under 50ms to use schedule_msec_hrtimeout_interruptible.
+
+---
+ drivers/hwmon/fam15h_power.c | 2 +-
+ drivers/iio/light/tsl2563.c | 6 +-----
+ drivers/media/i2c/msp3400-driver.c | 4 ++--
+ drivers/media/pci/ivtv/ivtv-gpio.c | 6 +++---
+ drivers/media/radio/radio-mr800.c | 2 +-
+ drivers/media/radio/radio-tea5777.c | 2 +-
+ drivers/media/radio/tea575x.c | 2 +-
+ drivers/parport/ieee1284.c | 2 +-
+ drivers/parport/ieee1284_ops.c | 2 +-
+ drivers/platform/x86/intel_ips.c | 8 ++++----
+ net/core/pktgen.c | 2 +-
+ sound/soc/codecs/wm8350.c | 12 ++++++------
+ sound/soc/codecs/wm8900.c | 2 +-
+ sound/soc/codecs/wm9713.c | 4 ++--
+ 14 files changed, 26 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
+index 9545a346044f..c24cf1302ec7 100644
+--- a/drivers/hwmon/fam15h_power.c
++++ b/drivers/hwmon/fam15h_power.c
+@@ -237,7 +237,7 @@ static ssize_t power1_average_show(struct device *dev,
+ prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
+ }
+
+- leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
++ leftover = schedule_msec_hrtimeout_interruptible((data->power_period));
+ if (leftover)
+ return 0;
+
+diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
+index 7599693f7fe9..452090739138 100644
+--- a/drivers/iio/light/tsl2563.c
++++ b/drivers/iio/light/tsl2563.c
+@@ -282,11 +282,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip)
+ default:
+ delay = 402;
+ }
+- /*
+- * TODO: Make sure that we wait at least required delay but why we
+- * have to extend it one tick more?
+- */
+- schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
++ schedule_msec_hrtimeout_interruptible(delay + 1);
+ }
+
+ static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
+diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
+index 3db966db83eb..f0fab7676f72 100644
+--- a/drivers/media/i2c/msp3400-driver.c
++++ b/drivers/media/i2c/msp3400-driver.c
+@@ -179,7 +179,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
+ break;
+ dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
+ dev, addr);
+- schedule_timeout_interruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_interruptible((10));
+ }
+ if (err == 3) {
+ dev_warn(&client->dev, "resetting chip, sound will go off.\n");
+@@ -220,7 +220,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
+ break;
+ dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
+ dev, addr);
+- schedule_timeout_interruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_interruptible((10));
+ }
+ if (err == 3) {
+ dev_warn(&client->dev, "resetting chip, sound will go off.\n");
+diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
+index f752f3993687..23372af61ebf 100644
+--- a/drivers/media/pci/ivtv/ivtv-gpio.c
++++ b/drivers/media/pci/ivtv/ivtv-gpio.c
+@@ -117,7 +117,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv)
+ curout = (curout & ~0xF) | 1;
+ write_reg(curout, IVTV_REG_GPIO_OUT);
+ /* We could use something else for smaller time */
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+ curout |= 2;
+ write_reg(curout, IVTV_REG_GPIO_OUT);
+ curdir &= ~0x80;
+@@ -137,11 +137,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
+ curout = read_reg(IVTV_REG_GPIO_OUT);
+ curout &= ~(1 << itv->card->xceive_pin);
+ write_reg(curout, IVTV_REG_GPIO_OUT);
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+
+ curout |= 1 << itv->card->xceive_pin;
+ write_reg(curout, IVTV_REG_GPIO_OUT);
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+ return 0;
+ }
+
+diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
+index c9f59129af79..cb6f8394a5c2 100644
+--- a/drivers/media/radio/radio-mr800.c
++++ b/drivers/media/radio/radio-mr800.c
+@@ -378,7 +378,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
+ retval = -ENODATA;
+ break;
+ }
+- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
++ if (schedule_msec_hrtimeout_interruptible((10))) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
+index 04ed1a5d1177..d593d28dc286 100644
+--- a/drivers/media/radio/radio-tea5777.c
++++ b/drivers/media/radio/radio-tea5777.c
+@@ -245,7 +245,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait)
+ }
+
+ if (wait) {
+- if (schedule_timeout_interruptible(msecs_to_jiffies(wait)))
++ if (schedule_msec_hrtimeout_interruptible((wait)))
+ return -ERESTARTSYS;
+ }
+
+diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
+index 4dc2067bce14..29f4416fb9ae 100644
+--- a/drivers/media/radio/tea575x.c
++++ b/drivers/media/radio/tea575x.c
+@@ -416,7 +416,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ break;
+- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
++ if (schedule_msec_hrtimeout_interruptible((10))) {
+ /* some signal arrived, stop search */
+ tea->val &= ~TEA575X_BIT_SEARCH;
+ snd_tea575x_set_freq(tea);
+diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
+index 74cc6dd982d2..c22c4d5f08d0 100644
+--- a/drivers/parport/ieee1284.c
++++ b/drivers/parport/ieee1284.c
+@@ -215,7 +215,7 @@ int parport_wait_peripheral(struct parport *port,
+ /* parport_wait_event didn't time out, but the
+ * peripheral wasn't actually ready either.
+ * Wait for another 10ms. */
+- schedule_timeout_interruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_interruptible((10));
+ }
+ }
+
+diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
+index 5d41dda6da4e..34705f6b423f 100644
+--- a/drivers/parport/ieee1284_ops.c
++++ b/drivers/parport/ieee1284_ops.c
+@@ -537,7 +537,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
+ /* Yield the port for a while. */
+ if (count && dev->port->irq != PARPORT_IRQ_NONE) {
+ parport_release (dev);
+- schedule_timeout_interruptible(msecs_to_jiffies(40));
++ schedule_msec_hrtimeout_interruptible((40));
+ parport_claim_or_block (dev);
+ }
+ else
+diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
+index 58dcee562d64..b661b7c071bb 100644
+--- a/drivers/platform/x86/intel_ips.c
++++ b/drivers/platform/x86/intel_ips.c
+@@ -813,7 +813,7 @@ static int ips_adjust(void *data)
+ ips_gpu_lower(ips);
+
+ sleep:
+- schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
++ schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD));
+ } while (!kthread_should_stop());
+
+ dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n");
+@@ -992,7 +992,7 @@ static int ips_monitor(void *data)
+ seqno_timestamp = get_jiffies_64();
+
+ old_cpu_power = thm_readl(THM_CEC);
+- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
+
+ /* Collect an initial average */
+ for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
+@@ -1019,7 +1019,7 @@ static int ips_monitor(void *data)
+ mchp_samples[i] = mchp;
+ }
+
+- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
+ if (kthread_should_stop())
+ break;
+ }
+@@ -1046,7 +1046,7 @@ static int ips_monitor(void *data)
+ * us to reduce the sample frequency if the CPU and GPU are idle.
+ */
+ old_cpu_power = thm_readl(THM_CEC);
+- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
+ last_sample_period = IPS_SAMPLE_PERIOD;
+
+ setup_deferrable_timer_on_stack(&timer, monitor_timeout,
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 6e1e10ff433a..be5d6f7142e4 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -1992,7 +1992,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
+ mutex_unlock(&pktgen_thread_lock);
+ pr_debug("%s: waiting for %s to disappear....\n",
+ __func__, ifname);
+- schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
++ schedule_msec_hrtimeout_interruptible((msec_per_try));
+ mutex_lock(&pktgen_thread_lock);
+
+ if (++i >= max_tries) {
+diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
+index 2efc5b41ad0f..3e3248c48c6b 100644
+--- a/sound/soc/codecs/wm8350.c
++++ b/sound/soc/codecs/wm8350.c
+@@ -236,10 +236,10 @@ static void wm8350_pga_work(struct work_struct *work)
+ out2->ramp == WM8350_RAMP_UP) {
+ /* delay is longer over 0dB as increases are larger */
+ if (i >= WM8350_OUTn_0dB)
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (2));
+ else
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (1));
+ } else
+ udelay(50); /* doesn't matter if we delay longer */
+@@ -1123,7 +1123,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
+ (platform->dis_out4 << 6));
+
+ /* wait for discharge */
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (platform->
+ cap_discharge_msecs));
+
+@@ -1139,7 +1139,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
+ WM8350_VBUFEN);
+
+ /* wait for vmid */
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (platform->
+ vmid_charge_msecs));
+
+@@ -1190,7 +1190,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
+ wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1);
+
+ /* wait */
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (platform->
+ vmid_discharge_msecs));
+
+@@ -1208,7 +1208,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
+ pm1 | WM8350_OUTPUT_DRAIN_EN);
+
+ /* wait */
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (platform->drain_msecs));
+
+ pm1 &= ~WM8350_BIASEN;
+diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
+index c77b49a29311..fc50456e90a9 100644
+--- a/sound/soc/codecs/wm8900.c
++++ b/sound/soc/codecs/wm8900.c
+@@ -1112,7 +1112,7 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec,
+ /* Need to let things settle before stopping the clock
+ * to ensure that restart works, see "Stopping the
+ * master clock" in the datasheet. */
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+ snd_soc_write(codec, WM8900_REG_POWER2,
+ WM8900_REG_POWER2_SYSCLK_ENA);
+ break;
+diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
+index 7e4822185feb..0c85a207446a 100644
+--- a/sound/soc/codecs/wm9713.c
++++ b/sound/soc/codecs/wm9713.c
+@@ -199,7 +199,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
+
+ /* Gracefully shut down the voice interface. */
+ snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0200);
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+ snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0f00);
+ snd_soc_update_bits(codec, AC97_EXTENDED_MID, 0x1000, 0x1000);
+
+@@ -868,7 +868,7 @@ static int wm9713_set_pll(struct snd_soc_codec *codec,
+ wm9713->pll_in = freq_in;
+
+ /* wait 10ms AC97 link frames for the link to stabilise */
+- schedule_timeout_interruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_interruptible((10));
+ return 0;
+ }
+
+--
+2.11.0
+
diff --git a/sys-kernel/linux-image-redcore-lts/files/0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch b/sys-kernel/linux-image-redcore-lts/files/0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch
new file mode 100644
index 00000000..c910f3df
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch
@@ -0,0 +1,160 @@
+From 6044370cf4bbc5e05f5d78f5772c1d88e3153603 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Mon, 20 Feb 2017 13:30:32 +1100
+Subject: [PATCH 09/16] Replace all calls to schedule_timeout_uninterruptible
+ of potentially under 50ms to use schedule_msec_hrtimeout_uninterruptible
+
+---
+ drivers/media/pci/cx18/cx18-gpio.c | 4 ++--
+ drivers/net/wireless/intel/ipw2x00/ipw2100.c | 4 ++--
+ drivers/rtc/rtc-wm8350.c | 6 +++---
+ drivers/scsi/lpfc/lpfc_scsi.c | 2 +-
+ sound/pci/maestro3.c | 4 ++--
+ sound/soc/codecs/rt5631.c | 4 ++--
+ sound/soc/soc-dapm.c | 2 +-
+ 7 files changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
+index 012859e6dc7b..206bd08265a5 100644
+--- a/drivers/media/pci/cx18/cx18-gpio.c
++++ b/drivers/media/pci/cx18/cx18-gpio.c
+@@ -90,11 +90,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi,
+
+ /* Assert */
+ gpio_update(cx, mask, ~active_lo);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs));
++ schedule_msec_hrtimeout_uninterruptible((assert_msecs));
+
+ /* Deassert */
+ gpio_update(cx, mask, ~active_hi);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs));
++ schedule_msec_hrtimeout_uninterruptible((recovery_msecs));
+ }
+
+ /*
+diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+index 19c442cb93e4..448f41782060 100644
+--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
++++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+@@ -830,7 +830,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
+ * doesn't seem to have as many firmware restart cycles...
+ *
+ * As a test, we're sticking in a 1/100s delay here */
+- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_uninterruptible((10));
+
+ return 0;
+
+@@ -1281,7 +1281,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
+ IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
+ i = 5000;
+ do {
+- schedule_timeout_uninterruptible(msecs_to_jiffies(40));
++ schedule_msec_hrtimeout_uninterruptible((40));
+ /* Todo... wait for sync command ... */
+
+ read_register(priv->net_dev, IPW_REG_INTA, &inta);
+diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
+index 483c7993516b..fddbaa475066 100644
+--- a/drivers/rtc/rtc-wm8350.c
++++ b/drivers/rtc/rtc-wm8350.c
+@@ -119,7 +119,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
+ /* Wait until confirmation of stopping */
+ do {
+ rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_uninterruptible((1));
+ } while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
+
+ if (!retries) {
+@@ -202,7 +202,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
+ /* Wait until confirmation of stopping */
+ do {
+ rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_uninterruptible((1));
+ } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
+
+ if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
+@@ -225,7 +225,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
+ /* Wait until confirmation */
+ do {
+ rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_uninterruptible((1));
+ } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
+
+ if (rtc_ctrl & WM8350_RTC_ALMSTS)
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 1a6f122bb25d..c0db66302a3e 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -5131,7 +5131,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
+ tgt_id, lun_id, context);
+ later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ while (time_after(later, jiffies) && cnt) {
+- schedule_timeout_uninterruptible(msecs_to_jiffies(20));
++ schedule_msec_hrtimeout_uninterruptible((20));
+ cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
+ }
+ if (cnt) {
+diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
+index 8f20dec97843..944ce63431b0 100644
+--- a/sound/pci/maestro3.c
++++ b/sound/pci/maestro3.c
+@@ -2016,7 +2016,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
+ outw(0, io + GPIO_DATA);
+ outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION);
+
+- schedule_timeout_uninterruptible(msecs_to_jiffies(delay1));
++ schedule_msec_hrtimeout_uninterruptible((delay1));
+
+ outw(GPO_PRIMARY_AC97, io + GPIO_DATA);
+ udelay(5);
+@@ -2024,7 +2024,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
+ outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A);
+ outw(~0, io + GPIO_MASK);
+
+- schedule_timeout_uninterruptible(msecs_to_jiffies(delay2));
++ schedule_msec_hrtimeout_uninterruptible((delay2));
+
+ if (! snd_m3_try_read_vendor(chip))
+ break;
+diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
+index 55b04c55fb4b..2ed02ad6ac41 100644
+--- a/sound/soc/codecs/rt5631.c
++++ b/sound/soc/codecs/rt5631.c
+@@ -419,7 +419,7 @@ static void onebit_depop_mute_stage(struct snd_soc_codec *codec, int enable)
+ hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
+ snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
+ if (enable) {
+- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_uninterruptible((10));
+ /* config one-bit depop parameter */
+ rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x307f);
+ snd_soc_update_bits(codec, RT5631_HP_OUT_VOL,
+@@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_codec *codec, int enable)
+ hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
+ snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
+ if (enable) {
+- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_uninterruptible((10));
+
+ /* config depop sequence parameter */
+ rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x302f);
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index dcef67a9bd48..11c2bb48c8f2 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -134,7 +134,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm)
+ static void pop_wait(u32 pop_time)
+ {
+ if (pop_time)
+- schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
++ schedule_msec_hrtimeout_uninterruptible((pop_time));
+ }
+
+ static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...)
+--
+2.11.0
+
diff --git a/sys-kernel/linux-image-redcore-lts/files/0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch b/sys-kernel/linux-image-redcore-lts/files/0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch
new file mode 100644
index 00000000..260bb98d
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch
@@ -0,0 +1,69 @@
+From 071486de633698dcdd163295173ce4663ec9158c Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Mon, 20 Feb 2017 13:32:58 +1100
+Subject: [PATCH 10/16] Don't use hrtimer overlay when pm_freezing since some
+ drivers still don't correctly use freezable timeouts.
+
+---
+ kernel/time/hrtimer.c | 2 +-
+ kernel/time/timer.c | 9 +++++----
+ 2 files changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 13227cf2814c..66456c72bace 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1809,7 +1809,7 @@ long __sched schedule_msec_hrtimeout(long timeout)
+ * (yet) better than Hz, as would occur during startup, use regular
+ * timers.
+ */
+- if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ)
++ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
+ return schedule_timeout(jiffs);
+
+ secs = timeout / 1000;
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index c68cb9307f64..2f2c96b03efe 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -44,6 +44,7 @@
+ #include <linux/sched/debug.h>
+ #include <linux/slab.h>
+ #include <linux/compat.h>
++#include <linux/freezer.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/unistd.h>
+@@ -1891,12 +1892,12 @@ void msleep(unsigned int msecs)
+ * Use high resolution timers where the resolution of tick based
+ * timers is inadequate.
+ */
+- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
+ while (msecs)
+ msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
+ return;
+ }
+- timeout = msecs_to_jiffies(msecs) + 1;
++ timeout = jiffs + 1;
+
+ while (timeout)
+ timeout = schedule_timeout_uninterruptible(timeout);
+@@ -1913,12 +1914,12 @@ unsigned long msleep_interruptible(unsigned int msecs)
+ int jiffs = msecs_to_jiffies(msecs);
+ unsigned long timeout;
+
+- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
+ while (msecs && !signal_pending(current))
+ msecs = schedule_msec_hrtimeout_interruptible(msecs);
+ return msecs;
+ }
+- timeout = msecs_to_jiffies(msecs) + 1;
++ timeout = jiffs + 1;
+
+ while (timeout && !signal_pending(current))
+ timeout = schedule_timeout_interruptible(timeout);
+--
+2.11.0
+
diff --git a/sys-kernel/linux-image-redcore-lts/files/0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch b/sys-kernel/linux-image-redcore-lts/files/0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch
new file mode 100644
index 00000000..5ac20300
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch
@@ -0,0 +1,136 @@
+diff -Nur a/kernel/sysctl.c b/kernel/sysctl.c
+--- a/kernel/sysctl.c 2018-11-03 17:03:07.433069521 +0000
++++ b/kernel/sysctl.c 2018-11-03 17:02:11.020267246 +0000
+@@ -141,7 +141,9 @@
+ extern int sched_iso_cpu;
+ extern int sched_yield_type;
+ #endif
+-#ifdef CONFIG_PRINTK
++extern int hrtimer_granularity_us;
++extern int hrtimeout_min_us;
++#if defined(CONFIG_PRINTK) || defined(CONFIG_SCHED_MUQSS)
+ static int ten_thousand __read_only = 10000;
+ #endif
+ #ifdef CONFIG_PERF_EVENTS
+@@ -1119,6 +1121,24 @@
+ .extra2 = &two,
+ },
+ #endif
++ {
++ .procname = "hrtimer_granularity_us",
++ .data = &hrtimer_granularity_us,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &ten_thousand,
++ },
++ {
++ .procname = "hrtimeout_min_us",
++ .data = &hrtimeout_min_us,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &ten_thousand,
++ },
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ {
+ .procname = "spin_retry",
+diff -Nur a/kernel/time/clockevents.c b/kernel/time/clockevents.c
+--- a/kernel/time/clockevents.c 2018-11-03 17:03:07.433069521 +0000
++++ b/kernel/time/clockevents.c 2018-11-03 16:58:17.283800909 +0000
+@@ -198,13 +198,9 @@
+
+ #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
+
+-#ifdef CONFIG_SCHED_MUQSS
++int __read_mostly hrtimer_granularity_us = 100;
+ /* Limit min_delta to 100us */
+-#define MIN_DELTA_LIMIT (NSEC_PER_SEC / 10000)
+-#else
+-/* Limit min_delta to a jiffie */
+-#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
+-#endif
++#define MIN_DELTA_LIMIT (hrtimer_granularity_us * NSEC_PER_USEC)
+
+ /**
+ * clockevents_increase_min_delta - raise minimum delta of a clock event device
+diff -Nur a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+--- a/kernel/time/hrtimer.c 2018-11-03 17:04:16.448274547 +0000
++++ b/kernel/time/hrtimer.c 2018-11-03 16:58:17.283800909 +0000
+@@ -1803,7 +1803,7 @@
+ long __sched schedule_msec_hrtimeout(long timeout)
+ {
+ struct hrtimer_sleeper t;
+- int delta, secs, jiffs;
++ int delta, jiffs;
+ ktime_t expires;
+
+ if (!timeout) {
+@@ -1820,9 +1820,8 @@
+ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
+ return schedule_timeout(jiffs);
+
+- secs = timeout / 1000;
+ delta = (timeout % 1000) * NSEC_PER_MSEC;
+- expires = ktime_set(secs, delta);
++ expires = ktime_set(0, delta);
+
+ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
+@@ -1846,9 +1845,53 @@
+
+ EXPORT_SYMBOL(schedule_msec_hrtimeout);
+
++#define USECS_PER_SEC 1000000
++extern int hrtimer_granularity_us;
++
++static inline long schedule_usec_hrtimeout(long timeout)
++{
++ struct hrtimer_sleeper t;
++ ktime_t expires;
++ int delta;
++
++ if (!timeout) {
++ __set_current_state(TASK_RUNNING);
++ return 0;
++ }
++
++ if (hrtimer_resolution >= NSEC_PER_SEC / HZ)
++ return schedule_timeout(usecs_to_jiffies(timeout));
++
++ if (timeout < hrtimer_granularity_us)
++ timeout = hrtimer_granularity_us;
++ delta = (timeout % USECS_PER_SEC) * NSEC_PER_USEC;
++ expires = ktime_set(0, delta);
++
++ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
++
++ hrtimer_init_sleeper(&t, current);
++
++ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL);
++
++ if (likely(t.task))
++ schedule();
++
++ hrtimer_cancel(&t.timer);
++ destroy_hrtimer_on_stack(&t.timer);
++
++ __set_current_state(TASK_RUNNING);
++
++ expires = hrtimer_expires_remaining(&t.timer);
++ timeout = ktime_to_us(expires);
++ return timeout < 0 ? 0 : timeout;
++}
++
++int __read_mostly hrtimeout_min_us = 1000;
++
+ long __sched schedule_min_hrtimeout(void)
+ {
+- return schedule_msec_hrtimeout(1);
++ return usecs_to_jiffies(schedule_usec_hrtimeout(hrtimeout_min_us));
+ }
+
+ EXPORT_SYMBOL(schedule_min_hrtimeout);
diff --git a/sys-kernel/linux-image-redcore-lts/files/0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch b/sys-kernel/linux-image-redcore-lts/files/0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch
new file mode 100644
index 00000000..99b28d65
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch
@@ -0,0 +1,81 @@
+From 9e47a80f690080c12ce607158b96c305707543b8 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Wed, 7 Dec 2016 21:23:01 +1100
+Subject: [PATCH 12/16] Reinstate default Hz of 100 in combination with MuQSS
+ and -ck patches.
+
+---
+ kernel/Kconfig.hz | 25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
+index 2a202a846757..1806fcac8f14 100644
+--- a/kernel/Kconfig.hz
++++ b/kernel/Kconfig.hz
+@@ -4,7 +4,8 @@
+
+ choice
+ prompt "Timer frequency"
+- default HZ_250
++ default HZ_100 if SCHED_MUQSS
++ default HZ_250_NODEF if !SCHED_MUQSS
+ help
+ Allows the configuration of the timer frequency. It is customary
+ to have the timer interrupt run at 1000 Hz but 100 Hz may be more
+@@ -19,11 +20,18 @@ choice
+ config HZ_100
+ bool "100 HZ"
+ help
++ 100 Hz is a suitable choice in combination with MuQSS which does
++ not rely on ticks for rescheduling interrupts, and is not Hz limited
++ for timeouts and sleeps from both the kernel and userspace.
++ This allows us to benefit from the lower overhead and higher
++ throughput of fewer timer ticks.
++
++ Non-MuQSS kernels:
+ 100 Hz is a typical choice for servers, SMP and NUMA systems
+ with lots of processors that may show reduced performance if
+ too many timer interrupts are occurring.
+
+- config HZ_250
++ config HZ_250_NODEF
+ bool "250 HZ"
+ help
+ 250 Hz is a good compromise choice allowing server performance
+@@ -31,7 +39,10 @@ choice
+ on SMP and NUMA systems. If you are going to be using NTSC video
+ or multimedia, selected 300Hz instead.
+
+- config HZ_300
++ 250 Hz is the default choice for the mainline scheduler but not
++ advantageous in combination with MuQSS.
++
++ config HZ_300_NODEF
+ bool "300 HZ"
+ help
+ 300 Hz is a good compromise choice allowing server performance
+@@ -39,7 +50,7 @@ choice
+ on SMP and NUMA systems and exactly dividing by both PAL and
+ NTSC frame rates for video and multimedia work.
+
+- config HZ_1000
++ config HZ_1000_NODEF
+ bool "1000 HZ"
+ help
+ 1000 Hz is the preferred choice for desktop systems and other
+@@ -50,9 +61,9 @@ endchoice
+ config HZ
+ int
+ default 100 if HZ_100
+- default 250 if HZ_250
+- default 300 if HZ_300
+- default 1000 if HZ_1000
++ default 250 if HZ_250_NODEF
++ default 300 if HZ_300_NODEF
++ default 1000 if HZ_1000_NODEF
+
+ config SCHED_HRTICK
+ def_bool HIGH_RES_TIMERS
+--
+2.11.0
+
diff --git a/sys-kernel/linux-image-redcore-lts/files/0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch b/sys-kernel/linux-image-redcore-lts/files/0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch
new file mode 100644
index 00000000..63ec9fdf
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch
@@ -0,0 +1,61 @@
+From 5902b315d4061ebbe73a62c52e6d3b618066cebc Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Wed, 7 Dec 2016 21:13:16 +1100
+Subject: [PATCH 13/16] Make threaded IRQs optionally the default which can be
+ disabled.
+
+---
+ kernel/irq/Kconfig | 14 ++++++++++++++
+ kernel/irq/manage.c | 10 ++++++++++
+ 2 files changed, 24 insertions(+)
+
+diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
+index a117adf7084b..0984c54fd4e9 100644
+--- a/kernel/irq/Kconfig
++++ b/kernel/irq/Kconfig
+@@ -111,6 +111,20 @@ config IRQ_DOMAIN_DEBUG
+ config IRQ_FORCED_THREADING
+ bool
+
++config FORCE_IRQ_THREADING
++ bool "Make IRQ threading compulsory"
++ depends on IRQ_FORCED_THREADING
++ default y
++ ---help---
++
++ Make IRQ threading mandatory for any IRQ handlers that support it
++ instead of being optional and requiring the threadirqs kernel
++ parameter. Instead they can be optionally disabled with the
++ nothreadirqs kernel parameter.
++
++ Enable if you are building for a desktop or low latency system,
++ otherwise say N.
++
+ config SPARSE_IRQ
+ bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ
+ ---help---
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 4bff6a10ae8e..5a6df0dd23c4 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -24,7 +24,17 @@
+ #include "internals.h"
+
+ #ifdef CONFIG_IRQ_FORCED_THREADING
++#ifdef CONFIG_FORCE_IRQ_THREADING
++__read_mostly bool force_irqthreads = true;
++#else
+ __read_mostly bool force_irqthreads;
++#endif
++static int __init setup_noforced_irqthreads(char *arg)
++{
++ force_irqthreads = false;
++ return 0;
++}
++early_param("nothreadirqs", setup_noforced_irqthreads);
+
+ static int __init setup_forced_irqthreads(char *arg)
+ {
+--
+2.11.0
+
diff --git a/sys-kernel/linux-image-redcore-lts/files/0014-Swap-sucks.patch b/sys-kernel/linux-image-redcore-lts/files/0014-Swap-sucks.patch
new file mode 100644
index 00000000..6bf5bcda
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0014-Swap-sucks.patch
@@ -0,0 +1,25 @@
+From ed0ab4c80fcb6fa4abb4f2f897e591df6eaa2d0e Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Sat, 12 Aug 2017 12:02:04 +1000
+Subject: [PATCH 14/16] Swap sucks.
+
+---
+ mm/vmscan.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index eb2f0315b8c0..67d03efab288 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -149,7 +149,7 @@ struct scan_control {
+ /*
+ * From 0 .. 100. Higher means more swappy.
+ */
+-int vm_swappiness = 60;
++int vm_swappiness = 33;
+ /*
+ * The total number of pages which are beyond the high watermark within all
+ * zones.
+--
+2.11.0
+
diff --git a/sys-kernel/linux-image-redcore-lts/files/0015-Enable-BFQ-io-scheduler-by-default.patch b/sys-kernel/linux-image-redcore-lts/files/0015-Enable-BFQ-io-scheduler-by-default.patch
deleted file mode 100644
index d12753be..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/0015-Enable-BFQ-io-scheduler-by-default.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From 0e7ab31fb218e2a18fbecd19c24dfaae14c88afd Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Mon, 20 Nov 2017 18:02:03 +1100
-Subject: [PATCH 15/18] Enable BFQ io scheduler by default.
-
----
- block/Kconfig.iosched | 2 +-
- drivers/scsi/Kconfig | 1 +
- 2 files changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
-index a4a8914bf7a4..2d9be91e8e87 100644
---- a/block/Kconfig.iosched
-+++ b/block/Kconfig.iosched
-@@ -82,7 +82,7 @@ config MQ_IOSCHED_KYBER
-
- config IOSCHED_BFQ
- tristate "BFQ I/O scheduler"
-- default n
-+ default y
- ---help---
- BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
- of the device among all processes according to their weights,
-diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
-index 8a739b74cfb7..9e939ee76e72 100644
---- a/drivers/scsi/Kconfig
-+++ b/drivers/scsi/Kconfig
-@@ -50,6 +50,7 @@ config SCSI_NETLINK
- config SCSI_MQ_DEFAULT
- bool "SCSI: use blk-mq I/O path by default"
- depends on SCSI
-+ default y
- ---help---
- This option enables the new blk-mq based I/O path for SCSI
- devices by default. With the option the scsi_mod.use_blk_mq
---
-2.14.1
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch b/sys-kernel/linux-image-redcore-lts/files/0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch
new file mode 100644
index 00000000..bfa509a5
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch
@@ -0,0 +1,19 @@
+diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
+index e84d700709ff6..16364915cff53 100644
+--- a/kernel/sched/MuQSS.c
++++ b/kernel/sched/MuQSS.c
+@@ -70,6 +70,14 @@
+
+ #include "MuQSS.h"
+
++/* needing to include irq_regs.h, "because reasons"...
++ * implicit declaration of function ‘get_irq_regs’;
++ * did you mean ‘get_ibs_caps’?
++ * [-Werror=implicit-function-declaration]
++ * ^ this is because autodetect is not flawless
++ */
++#include <asm/irq_regs.h>
++
+ #define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
+ #define rt_task(p) rt_prio((p)->prio)
+ #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
diff --git a/sys-kernel/linux-image-redcore-lts/files/0016-unfuck-MuQSS-on-linux-4_14_15+.patch b/sys-kernel/linux-image-redcore-lts/files/0016-unfuck-MuQSS-on-linux-4_14_15+.patch
new file mode 100644
index 00000000..f7dc1d1c
--- /dev/null
+++ b/sys-kernel/linux-image-redcore-lts/files/0016-unfuck-MuQSS-on-linux-4_14_15+.patch
@@ -0,0 +1,48 @@
+diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
+index e84d700709ff6..b0be7fcfe41f9 100644
+--- a/kernel/sched/MuQSS.c
++++ b/kernel/sched/MuQSS.c
+@@ -55,6 +55,7 @@
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
+ #include <linux/tick.h>
++#include <linux/version.h>
+
+ #include <asm/switch_to.h>
+ #include <asm/tlb.h>
+@@ -1959,7 +1960,11 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ p->state = TASK_WAKING;
+
+ if (p->in_iowait) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 15)
+ delayacct_blkio_end();
++#else
++ delayacct_blkio_end(p);
++#endif
+ atomic_dec(&task_rq(p)->nr_iowait);
+ }
+
+@@ -1970,7 +1975,11 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ #else /* CONFIG_SMP */
+
+ if (p->in_iowait) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 15)
+ delayacct_blkio_end();
++#else
++ delayacct_blkio_end(p);
++#endif
+ atomic_dec(&task_rq(p)->nr_iowait);
+ }
+
+@@ -2022,7 +2031,11 @@ static void try_to_wake_up_local(struct task_struct *p)
+
+ if (!task_on_rq_queued(p)) {
+ if (p->in_iowait) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 15)
+ delayacct_blkio_end();
++#else
++ delayacct_blkio_end(p);
++#endif
+ atomic_dec(&rq->nr_iowait);
+ }
+ ttwu_activate(rq, p);
diff --git a/sys-kernel/linux-image-redcore-lts/files/redcore-lts-amd64.config b/sys-kernel/linux-image-redcore-lts/files/redcore-lts-amd64.config
index 73c7d194..f41bc39d 100644
--- a/sys-kernel/linux-image-redcore-lts/files/redcore-lts-amd64.config
+++ b/sys-kernel/linux-image-redcore-lts/files/redcore-lts-amd64.config
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 4.14.65-redcore-lts Kernel Configuration
+# Linux/x86 4.14.75-redcore-lts Kernel Configuration
#
CONFIG_64BIT=y
CONFIG_X86_64=y
@@ -50,6 +50,7 @@ CONFIG_THREAD_INFO_IN_TASK=y
#
# General setup
#
+CONFIG_SCHED_MUQSS=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_CROSS_COMPILE=""
# CONFIG_COMPILE_TEST is not set
@@ -98,6 +99,7 @@ CONFIG_GENERIC_MSI_IRQ=y
CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
# CONFIG_IRQ_DOMAIN_DEBUG is not set
CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_FORCE_IRQ_THREADING=y
CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_IRQ_DEBUGFS is not set
CONFIG_CLOCKSOURCE_WATCHDOG=y
@@ -113,11 +115,9 @@ CONFIG_GENERIC_CMOS_UPDATE=y
# Timers subsystem
#
CONFIG_TICK_ONESHOT=y
-CONFIG_NO_HZ_COMMON=y
-# CONFIG_HZ_PERIODIC is not set
+CONFIG_HZ_PERIODIC=y
# CONFIG_NO_HZ_IDLE is not set
-CONFIG_NO_HZ_FULL=y
-# CONFIG_NO_HZ_FULL_ALL is not set
+# CONFIG_NO_HZ_FULL is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -125,6 +125,7 @@ CONFIG_HIGH_RES_TIMERS=y
# CPU/Task time and stats accounting
#
CONFIG_VIRT_CPU_ACCOUNTING=y
+# CONFIG_TICK_CPU_ACCOUNTING is not set
CONFIG_VIRT_CPU_ACCOUNTING_GEN=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_BSD_PROCESS_ACCT=y
@@ -146,7 +147,6 @@ CONFIG_RCU_STALL_COMMON=y
CONFIG_RCU_NEED_SEGCBLIST=y
CONFIG_CONTEXT_TRACKING=y
# CONFIG_CONTEXT_TRACKING_FORCE is not set
-CONFIG_RCU_NOCB_CPU=y
CONFIG_BUILD_BIN2C=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@@ -157,8 +157,6 @@ CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
CONFIG_ARCH_SUPPORTS_INT128=y
-CONFIG_NUMA_BALANCING=y
-CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
CONFIG_CGROUPS=y
CONFIG_PAGE_COUNTER=y
CONFIG_MEMCG=y
@@ -168,9 +166,6 @@ CONFIG_BLK_CGROUP=y
# CONFIG_DEBUG_BLK_CGROUP is not set
CONFIG_CGROUP_WRITEBACK=y
CONFIG_CGROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
# CONFIG_CGROUP_RDMA is not set
CONFIG_CGROUP_FREEZER=y
@@ -178,7 +173,6 @@ CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
CONFIG_PROC_PID_CPUSET=y
CONFIG_CGROUP_DEVICE=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_BPF=y
# CONFIG_CGROUP_DEBUG is not set
@@ -190,7 +184,6 @@ CONFIG_IPC_NS=y
CONFIG_USER_NS=y
CONFIG_PID_NS=y
CONFIG_NET_NS=y
-CONFIG_SCHED_AUTOGROUP=y
# CONFIG_SYSFS_DEPRECATED is not set
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
@@ -306,6 +299,7 @@ CONFIG_HAVE_PERF_REGS=y
CONFIG_HAVE_PERF_USER_STACK_DUMP=y
CONFIG_HAVE_ARCH_JUMP_LABEL=y
CONFIG_HAVE_RCU_TABLE_FREE=y
+CONFIG_HAVE_RCU_TABLE_INVALIDATE=y
CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
CONFIG_HAVE_CMPXCHG_LOCAL=y
@@ -438,10 +432,15 @@ CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_IOSCHED_BFQ_SQ=y
+CONFIG_BFQ_SQ_GROUP_IOSCHED=y
# CONFIG_DEFAULT_DEADLINE is not set
-CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_BFQ_SQ=y
# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_DEFAULT_IOSCHED="bfq-sq"
+CONFIG_MQ_IOSCHED_BFQ=y
+CONFIG_MQ_BFQ_GROUP_IOSCHED=y
CONFIG_MQ_IOSCHED_DEADLINE=y
# CONFIG_MQ_IOSCHED_KYBER is not set
CONFIG_IOSCHED_BFQ=y
@@ -515,6 +514,7 @@ CONFIG_IOMMU_HELPER=y
CONFIG_MAXSMP=y
CONFIG_NR_CPUS=8192
CONFIG_SCHED_SMT=y
+CONFIG_SMT_NICE=y
CONFIG_SCHED_MC=y
CONFIG_SCHED_MC_PRIO=y
# CONFIG_PREEMPT_NONE is not set
@@ -655,11 +655,11 @@ CONFIG_EFI=y
CONFIG_EFI_STUB=y
CONFIG_EFI_MIXED=y
CONFIG_SECCOMP=y
-# CONFIG_HZ_100 is not set
-# CONFIG_HZ_250 is not set
-# CONFIG_HZ_300 is not set
-CONFIG_HZ_1000=y
-CONFIG_HZ=1000
+CONFIG_HZ_100=y
+# CONFIG_HZ_250_NODEF is not set
+# CONFIG_HZ_300_NODEF is not set
+# CONFIG_HZ_1000_NODEF is not set
+CONFIG_HZ=100
CONFIG_SCHED_HRTICK=y
CONFIG_KEXEC=y
# CONFIG_CRASH_DUMP is not set
@@ -4083,6 +4083,7 @@ CONFIG_HSI_BOARDINFO=y
CONFIG_HSI_CHAR=m
CONFIG_PPS=m
# CONFIG_PPS_DEBUG is not set
+# CONFIG_NTP_PPS is not set
#
# PPS clients support
diff --git a/sys-kernel/linux-image-redcore-lts/files/uksm-for-linux-hardened.patch b/sys-kernel/linux-image-redcore-lts/files/uksm-linux-hardened.patch
index f0596117..f0596117 100644
--- a/sys-kernel/linux-image-redcore-lts/files/uksm-for-linux-hardened.patch
+++ b/sys-kernel/linux-image-redcore-lts/files/uksm-linux-hardened.patch
diff --git a/sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.14.65.ebuild b/sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.14.75.ebuild
index 8577515c..a50b0ce0 100644
--- a/sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.14.65.ebuild
+++ b/sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.14.75.ebuild
@@ -37,8 +37,25 @@ PATCHES=( "${FILESDIR}"/introduce-NUMA-identity-node-sched-domain.patch
"${FILESDIR}"/restore-SD_PREFER_SIBLING-on-MC-domains.patch
"${FILESDIR}"/Revert-ath10k-activate-user-space-firmware-loading.patch
"${FILESDIR}"/linux-hardened.patch
- "${FILESDIR}"/uksm-for-linux-hardened.patch
- "${FILESDIR}"/0015-Enable-BFQ-io-scheduler-by-default.patch )
+ "${FILESDIR}"/uksm-linux-hardened.patch
+ "${FILESDIR}"/0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch
+ "${FILESDIR}"/0002-Make-preemptible-kernel-default.patch
+ "${FILESDIR}"/0003-Expose-vmsplit-for-our-poor-32-bit-users.patch
+ "${FILESDIR}"/0004-Create-highres-timeout-variants-of-schedule_timeout-.patch
+ "${FILESDIR}"/0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch
+ "${FILESDIR}"/0006-Convert-msleep-to-use-hrtimers-when-active.patch
+ "${FILESDIR}"/0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
+ "${FILESDIR}"/0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch
+ "${FILESDIR}"/0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch
+ "${FILESDIR}"/0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch
+ "${FILESDIR}"/0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch
+ "${FILESDIR}"/0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch
+ "${FILESDIR}"/0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch
+ "${FILESDIR}"/0014-Swap-sucks.patch
+ "${FILESDIR}"/0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch
+ "${FILESDIR}"/0016-unfuck-MuQSS-on-linux-4_14_15+.patch
+ "${FILESDIR}"/0001-BFQ-v8r12-20171108.patch
+ "${FILESDIR}"/0002-BFQ-v8r12-20180404.patch )
S="${WORKDIR}"/linux-"${PV}"
@@ -55,11 +72,11 @@ src_prepare() {
emake mrproper
sed -ri "s|^(EXTRAVERSION =).*|\1 -${EXTRAVERSION}|" Makefile
cp "${FILESDIR}"/"${EXTRAVERSION}"-amd64.config .config
+ rm -rf $(find . -type f|grep -F \.orig)
}
src_compile() {
- emake prepare modules_prepare
- emake bzImage modules
+ emake prepare modules_prepare bzImage modules
}
src_install() {
diff --git a/sys-kernel/linux-sources-redcore-lts/Manifest b/sys-kernel/linux-sources-redcore-lts/Manifest
index 17b5b0b2..48f87643 100644
--- a/sys-kernel/linux-sources-redcore-lts/Manifest
+++ b/sys-kernel/linux-sources-redcore-lts/Manifest
@@ -1 +1 @@
-DIST linux-4.14.65.tar.xz 100977596 BLAKE2B 1864dadfbdd4cf2e8c89c196291e04a680f06f9916a792bc6f2c22e9b74e512f6475a7dbfb70c81882841583e726466c0f7ff6995d3e78d6334a71b4cef06303 SHA512 162382b3567ba256a1caac7b9c0e2188484ae22d8731c2627ab0faa471ac35ca6578e0f0428c17d63d14f53316b7701a0e9c7a99b1bc749ddd6ab408f10c2185
+DIST linux-4.14.75.tar.xz 100992748 BLAKE2B febb717f667f380b4c39a06c0bb522181dc7f16fd21e86794589cef8b4de1b064c216e5e51aa6b4bfb2deead6263b76ecce3bfc480126bdf9840d17c9ba590b9 SHA512 d6d75a89fd0aed92d3dae4e651273a5b2fec242e49ba6fd71cf642c32e346fb6be083b3c9d1f77fc6ded9531d9f1efd82041f28b12f71eaf2c53d16c071e6703
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0001-BFQ-v8r12-20171108.patch b/sys-kernel/linux-sources-redcore-lts/files/0001-BFQ-v8r12-20171108.patch
new file mode 100644
index 00000000..db7d064b
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0001-BFQ-v8r12-20171108.patch
@@ -0,0 +1,25199 @@
+From c21f53f17430230dab50df29b8ea1b71f99d09d6 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@unimore.it>
+Date: Tue, 7 Apr 2015 13:39:12 +0200
+Subject: [PATCH 01/51] Add BFQ-v8r12
+
+This commit is the result of the following operations.
+
+1. The squash of all the commits between "block: cgroups, kconfig,
+build bits for BFQ-v7r11-4.5.0" and BFQ-v8r12 in the branch
+bfq-mq-v8-v4.11
+
+2. The renaming of two files (block/bfq-cgroup.c ->
+block/bfq-cgroup-included.c and block/bfq-iosched.c ->
+block/bfq-sq-iosched.c) and of one option (CONFIG_BFQ_GROUP_IOSCHED ->
+CONFIG_BFQ_SQ_GROUP_IOSCHED), to avoid name clashes. These name
+clashes are due to the presence of bfq in mainline from 4.12.
+
+3. The modification of block/Makefile and block/Kconfig.iosched to
+comply with the above renaming.
+
+Signed-off-by: Mauro Andreolini <mauro.andreolini@unimore.it>
+Signed-off-by: Arianna Avanzini <avanzini@google.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ Makefile | 2 +-
+ block/Kconfig.iosched | 31 +
+ block/bfq-cgroup-included.c | 1190 ++++++++++
+ block/bfq-ioc.c | 36 +
+ block/bfq-sched.c | 2002 ++++++++++++++++
+ block/bfq-sq-iosched.c | 5379 +++++++++++++++++++++++++++++++++++++++++++
+ block/bfq.h | 948 ++++++++
+ include/linux/blkdev.h | 2 +-
+ 9 files changed, 9589 insertions(+), 2 deletions(-)
+ create mode 100644 block/bfq-cgroup-included.c
+ create mode 100644 block/bfq-ioc.c
+ create mode 100644 block/bfq-sched.c
+ create mode 100644 block/bfq-sq-iosched.c
+ create mode 100644 block/bfq.h
+
+diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
+index a4a8914bf7a4..9e3f4c2f7390 100644
+--- a/block/Kconfig.iosched
++++ b/block/Kconfig.iosched
+@@ -40,6 +40,26 @@ config CFQ_GROUP_IOSCHED
+ ---help---
+ Enable group IO scheduling in CFQ.
+
++config IOSCHED_BFQ_SQ
++ tristate "BFQ-SQ I/O scheduler"
++ default n
++ ---help---
++ The BFQ-SQ I/O scheduler (for legacy blk: SQ stands for
++ SingleQueue) distributes bandwidth among all processes
++ according to their weights, regardless of the device
++ parameters and with any workload. It also guarantees a low
++ latency to interactive and soft real-time applications.
++ Details in Documentation/block/bfq-iosched.txt
++
++config BFQ_SQ_GROUP_IOSCHED
++ bool "BFQ-SQ hierarchical scheduling support"
++ depends on IOSCHED_BFQ_SQ && BLK_CGROUP
++ default n
++ ---help---
++
++ Enable hierarchical scheduling in BFQ-SQ, using the blkio
++ (cgroups-v1) or io (cgroups-v2) controller.
++
+ choice
+
+ prompt "Default I/O scheduler"
+@@ -54,6 +74,16 @@ choice
+ config DEFAULT_CFQ
+ bool "CFQ" if IOSCHED_CFQ=y
+
++ config DEFAULT_BFQ_SQ
++ bool "BFQ-SQ" if IOSCHED_BFQ_SQ=y
++ help
++ Selects BFQ-SQ as the default I/O scheduler which will be
++ used by default for all block devices.
++ The BFQ-SQ I/O scheduler aims at distributing the bandwidth
++ as desired, independently of the disk parameters and with
++ any workload. It also tries to guarantee low latency to
++ interactive and soft real-time applications.
++
+ config DEFAULT_NOOP
+ bool "No-op"
+
+@@ -63,6 +93,7 @@ config DEFAULT_IOSCHED
+ string
+ default "deadline" if DEFAULT_DEADLINE
+ default "cfq" if DEFAULT_CFQ
++ default "bfq-sq" if DEFAULT_BFQ_SQ
+ default "noop" if DEFAULT_NOOP
+
+ config MQ_IOSCHED_DEADLINE
+diff --git a/block/Makefile b/block/Makefile
+index 6a56303b9925..59026b425791 100644
+--- a/block/Makefile
++++ b/block/Makefile
+@@ -24,6 +24,7 @@ obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o
+ obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
+ bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
+ obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
++obj-$(CONFIG_IOSCHED_BFQ_SQ) += bfq-sq-iosched.o
+
+ obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
+ obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+new file mode 100644
+index 000000000000..af7c216a3540
+--- /dev/null
++++ b/block/bfq-cgroup-included.c
+@@ -0,0 +1,1190 @@
++/*
++ * BFQ: CGROUPS support.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2016 Paolo Valente <paolo.valente@linaro.org>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ */
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++
++/* bfqg stats flags */
++enum bfqg_stats_flags {
++ BFQG_stats_waiting = 0,
++ BFQG_stats_idling,
++ BFQG_stats_empty,
++};
++
++#define BFQG_FLAG_FNS(name) \
++static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
++{ \
++ stats->flags |= (1 << BFQG_stats_##name); \
++} \
++static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
++{ \
++ stats->flags &= ~(1 << BFQG_stats_##name); \
++} \
++static int bfqg_stats_##name(struct bfqg_stats *stats) \
++{ \
++ return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
++} \
++
++BFQG_FLAG_FNS(waiting)
++BFQG_FLAG_FNS(idling)
++BFQG_FLAG_FNS(empty)
++#undef BFQG_FLAG_FNS
++
++/* This should be called with the queue_lock held. */
++static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
++{
++ unsigned long long now;
++
++ if (!bfqg_stats_waiting(stats))
++ return;
++
++ now = sched_clock();
++ if (time_after64(now, stats->start_group_wait_time))
++ blkg_stat_add(&stats->group_wait_time,
++ now - stats->start_group_wait_time);
++ bfqg_stats_clear_waiting(stats);
++}
++
++/* This should be called with the queue_lock held. */
++static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
++ struct bfq_group *curr_bfqg)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++
++ if (bfqg_stats_waiting(stats))
++ return;
++ if (bfqg == curr_bfqg)
++ return;
++ stats->start_group_wait_time = sched_clock();
++ bfqg_stats_mark_waiting(stats);
++}
++
++/* This should be called with the queue_lock held. */
++static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
++{
++ unsigned long long now;
++
++ if (!bfqg_stats_empty(stats))
++ return;
++
++ now = sched_clock();
++ if (time_after64(now, stats->start_empty_time))
++ blkg_stat_add(&stats->empty_time,
++ now - stats->start_empty_time);
++ bfqg_stats_clear_empty(stats);
++}
++
++static void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
++{
++ blkg_stat_add(&bfqg->stats.dequeue, 1);
++}
++
++static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++
++ if (blkg_rwstat_total(&stats->queued))
++ return;
++
++ /*
++ * group is already marked empty. This can happen if bfqq got new
++ * request in parent group and moved to this group while being added
++ * to service tree. Just ignore the event and move on.
++ */
++ if (bfqg_stats_empty(stats))
++ return;
++
++ stats->start_empty_time = sched_clock();
++ bfqg_stats_mark_empty(stats);
++}
++
++static void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++
++ if (bfqg_stats_idling(stats)) {
++ unsigned long long now = sched_clock();
++
++ if (time_after64(now, stats->start_idle_time))
++ blkg_stat_add(&stats->idle_time,
++ now - stats->start_idle_time);
++ bfqg_stats_clear_idling(stats);
++ }
++}
++
++static void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++
++ stats->start_idle_time = sched_clock();
++ bfqg_stats_mark_idling(stats);
++}
++
++static void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++
++ blkg_stat_add(&stats->avg_queue_size_sum,
++ blkg_rwstat_total(&stats->queued));
++ blkg_stat_add(&stats->avg_queue_size_samples, 1);
++ bfqg_stats_update_group_wait_time(stats);
++}
++
++static struct blkcg_policy blkcg_policy_bfq;
++
++/*
++ * blk-cgroup policy-related handlers
++ * The following functions help in converting between blk-cgroup
++ * internal structures and BFQ-specific structures.
++ */
++
++static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
++{
++ return pd ? container_of(pd, struct bfq_group, pd) : NULL;
++}
++
++static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
++{
++ return pd_to_blkg(&bfqg->pd);
++}
++
++static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
++{
++ struct blkg_policy_data *pd = blkg_to_pd(blkg, &blkcg_policy_bfq);
++
++ return pd_to_bfqg(pd);
++}
++
++/*
++ * bfq_group handlers
++ * The following functions help in navigating the bfq_group hierarchy
++ * by allowing to find the parent of a bfq_group or the bfq_group
++ * associated to a bfq_queue.
++ */
++
++static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
++{
++ struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
++
++ return pblkg ? blkg_to_bfqg(pblkg) : NULL;
++}
++
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *group_entity = bfqq->entity.parent;
++
++ return group_entity ? container_of(group_entity, struct bfq_group,
++ entity) :
++ bfqq->bfqd->root_group;
++}
++
++/*
++ * The following two functions handle get and put of a bfq_group by
++ * wrapping the related blk-cgroup hooks.
++ */
++
++static void bfqg_get(struct bfq_group *bfqg)
++{
++ return blkg_get(bfqg_to_blkg(bfqg));
++}
++
++static void bfqg_put(struct bfq_group *bfqg)
++{
++ return blkg_put(bfqg_to_blkg(bfqg));
++}
++
++static void bfqg_stats_update_io_add(struct bfq_group *bfqg,
++ struct bfq_queue *bfqq,
++ unsigned int op)
++{
++ blkg_rwstat_add(&bfqg->stats.queued, op, 1);
++ bfqg_stats_end_empty_time(&bfqg->stats);
++ if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
++ bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
++}
++
++static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
++{
++ blkg_rwstat_add(&bfqg->stats.queued, op, -1);
++}
++
++static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
++{
++ blkg_rwstat_add(&bfqg->stats.merged, op, 1);
++}
++
++static void bfqg_stats_update_completion(struct bfq_group *bfqg,
++ uint64_t start_time, uint64_t io_start_time,
++ unsigned int op)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++ unsigned long long now = sched_clock();
++
++ if (time_after64(now, io_start_time))
++ blkg_rwstat_add(&stats->service_time, op,
++ now - io_start_time);
++ if (time_after64(io_start_time, start_time))
++ blkg_rwstat_add(&stats->wait_time, op,
++ io_start_time - start_time);
++}
++
++/* @stats = 0 */
++static void bfqg_stats_reset(struct bfqg_stats *stats)
++{
++ /* queued stats shouldn't be cleared */
++ blkg_rwstat_reset(&stats->merged);
++ blkg_rwstat_reset(&stats->service_time);
++ blkg_rwstat_reset(&stats->wait_time);
++ blkg_stat_reset(&stats->time);
++ blkg_stat_reset(&stats->avg_queue_size_sum);
++ blkg_stat_reset(&stats->avg_queue_size_samples);
++ blkg_stat_reset(&stats->dequeue);
++ blkg_stat_reset(&stats->group_wait_time);
++ blkg_stat_reset(&stats->idle_time);
++ blkg_stat_reset(&stats->empty_time);
++}
++
++/* @to += @from */
++static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
++{
++ if (!to || !from)
++ return;
++
++ /* queued stats shouldn't be cleared */
++ blkg_rwstat_add_aux(&to->merged, &from->merged);
++ blkg_rwstat_add_aux(&to->service_time, &from->service_time);
++ blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
++ blkg_stat_add_aux(&from->time, &from->time);
++ blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
++ blkg_stat_add_aux(&to->avg_queue_size_samples,
++ &from->avg_queue_size_samples);
++ blkg_stat_add_aux(&to->dequeue, &from->dequeue);
++ blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
++ blkg_stat_add_aux(&to->idle_time, &from->idle_time);
++ blkg_stat_add_aux(&to->empty_time, &from->empty_time);
++}
++
++/*
++ * Transfer @bfqg's stats to its parent's dead_stats so that the ancestors'
++ * recursive stats can still account for the amount used by this bfqg after
++ * it's gone.
++ */
++static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
++{
++ struct bfq_group *parent;
++
++ if (!bfqg) /* root_group */
++ return;
++
++ parent = bfqg_parent(bfqg);
++
++ lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
++
++ if (unlikely(!parent))
++ return;
++
++ bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
++ bfqg_stats_reset(&bfqg->stats);
++}
++
++static void bfq_init_entity(struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ entity->weight = entity->new_weight;
++ entity->orig_weight = entity->new_weight;
++ if (bfqq) {
++ bfqq->ioprio = bfqq->new_ioprio;
++ bfqq->ioprio_class = bfqq->new_ioprio_class;
++ bfqg_get(bfqg);
++ }
++ entity->parent = bfqg->my_entity; /* NULL for root group */
++ entity->sched_data = &bfqg->sched_data;
++}
++
++static void bfqg_stats_exit(struct bfqg_stats *stats)
++{
++ blkg_rwstat_exit(&stats->merged);
++ blkg_rwstat_exit(&stats->service_time);
++ blkg_rwstat_exit(&stats->wait_time);
++ blkg_rwstat_exit(&stats->queued);
++ blkg_stat_exit(&stats->time);
++ blkg_stat_exit(&stats->avg_queue_size_sum);
++ blkg_stat_exit(&stats->avg_queue_size_samples);
++ blkg_stat_exit(&stats->dequeue);
++ blkg_stat_exit(&stats->group_wait_time);
++ blkg_stat_exit(&stats->idle_time);
++ blkg_stat_exit(&stats->empty_time);
++}
++
++static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
++{
++ if (blkg_rwstat_init(&stats->merged, gfp) ||
++ blkg_rwstat_init(&stats->service_time, gfp) ||
++ blkg_rwstat_init(&stats->wait_time, gfp) ||
++ blkg_rwstat_init(&stats->queued, gfp) ||
++ blkg_stat_init(&stats->time, gfp) ||
++ blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
++ blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
++ blkg_stat_init(&stats->dequeue, gfp) ||
++ blkg_stat_init(&stats->group_wait_time, gfp) ||
++ blkg_stat_init(&stats->idle_time, gfp) ||
++ blkg_stat_init(&stats->empty_time, gfp)) {
++ bfqg_stats_exit(stats);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
++{
++ return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
++}
++
++static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
++{
++ return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
++}
++
++static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
++{
++ struct bfq_group_data *bgd;
++
++ bgd = kzalloc(sizeof(*bgd), gfp);
++ if (!bgd)
++ return NULL;
++ return &bgd->pd;
++}
++
++static void bfq_cpd_init(struct blkcg_policy_data *cpd)
++{
++ struct bfq_group_data *d = cpd_to_bfqgd(cpd);
++
++ d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
++ CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
++}
++
++static void bfq_cpd_free(struct blkcg_policy_data *cpd)
++{
++ kfree(cpd_to_bfqgd(cpd));
++}
++
++static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
++{
++ struct bfq_group *bfqg;
++
++ bfqg = kzalloc_node(sizeof(*bfqg), gfp, node);
++ if (!bfqg)
++ return NULL;
++
++ if (bfqg_stats_init(&bfqg->stats, gfp)) {
++ kfree(bfqg);
++ return NULL;
++ }
++
++ return &bfqg->pd;
++}
++
++static void bfq_pd_init(struct blkg_policy_data *pd)
++{
++ struct blkcg_gq *blkg;
++ struct bfq_group *bfqg;
++ struct bfq_data *bfqd;
++ struct bfq_entity *entity;
++ struct bfq_group_data *d;
++
++ blkg = pd_to_blkg(pd);
++ BUG_ON(!blkg);
++ bfqg = blkg_to_bfqg(blkg);
++ bfqd = blkg->q->elevator->elevator_data;
++ entity = &bfqg->entity;
++ d = blkcg_to_bfqgd(blkg->blkcg);
++
++ entity->orig_weight = entity->weight = entity->new_weight = d->weight;
++ entity->my_sched_data = &bfqg->sched_data;
++ bfqg->my_entity = entity; /*
++ * the root_group's will be set to NULL
++ * in bfq_init_queue()
++ */
++ bfqg->bfqd = bfqd;
++ bfqg->active_entities = 0;
++ bfqg->rq_pos_tree = RB_ROOT;
++}
++
++static void bfq_pd_free(struct blkg_policy_data *pd)
++{
++ struct bfq_group *bfqg = pd_to_bfqg(pd);
++
++ bfqg_stats_exit(&bfqg->stats);
++ return kfree(bfqg);
++}
++
++static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
++{
++ struct bfq_group *bfqg = pd_to_bfqg(pd);
++
++ bfqg_stats_reset(&bfqg->stats);
++}
++
++static void bfq_group_set_parent(struct bfq_group *bfqg,
++ struct bfq_group *parent)
++{
++ struct bfq_entity *entity;
++
++ BUG_ON(!parent);
++ BUG_ON(!bfqg);
++ BUG_ON(bfqg == parent);
++
++ entity = &bfqg->entity;
++ entity->parent = parent->my_entity;
++ entity->sched_data = &parent->sched_data;
++}
++
++static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
++ struct blkcg *blkcg)
++{
++ struct blkcg_gq *blkg;
++
++ blkg = blkg_lookup(blkcg, bfqd->queue);
++ if (likely(blkg))
++ return blkg_to_bfqg(blkg);
++ return NULL;
++}
++
++static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
++ struct blkcg *blkcg)
++{
++ struct bfq_group *bfqg, *parent;
++ struct bfq_entity *entity;
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++
++ bfqg = bfq_lookup_bfqg(bfqd, blkcg);
++
++ if (unlikely(!bfqg))
++ return NULL;
++
++ /*
++ * Update chain of bfq_groups as we might be handling a leaf group
++ * which, along with some of its relatives, has not been hooked yet
++ * to the private hierarchy of BFQ.
++ */
++ entity = &bfqg->entity;
++ for_each_entity(entity) {
++ bfqg = container_of(entity, struct bfq_group, entity);
++ BUG_ON(!bfqg);
++ if (bfqg != bfqd->root_group) {
++ parent = bfqg_parent(bfqg);
++ if (!parent)
++ parent = bfqd->root_group;
++ BUG_ON(!parent);
++ bfq_group_set_parent(bfqg, parent);
++ }
++ }
++
++ return bfqg;
++}
++
++static void bfq_pos_tree_add_move(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq);
++
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool compensate,
++ enum bfqq_expiration reason);
++
++/**
++ * bfq_bfqq_move - migrate @bfqq to @bfqg.
++ * @bfqd: queue descriptor.
++ * @bfqq: the queue to move.
++ * @bfqg: the group to move to.
++ *
++ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
++ * it on the new one. Avoid putting the entity on the old group idle tree.
++ *
++ * Must be called under the queue lock; the cgroup owning @bfqg must
++ * not disappear (by now this just means that we are called under
++ * rcu_read_lock()).
++ */
++static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct bfq_group *bfqg)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ BUG_ON(!bfq_bfqq_busy(bfqq) && !RB_EMPTY_ROOT(&bfqq->sort_list));
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list) && !entity->on_st);
++ BUG_ON(bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list)
++ && entity->on_st &&
++ bfqq != bfqd->in_service_queue);
++ BUG_ON(!bfq_bfqq_busy(bfqq) && bfqq == bfqd->in_service_queue);
++
++ /* If bfqq is empty, then bfq_bfqq_expire also invokes
++ * bfq_del_bfqq_busy, thereby removing bfqq and its entity
++ * from data structures related to current group. Otherwise we
++ * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
++ * we do below.
++ */
++ if (bfqq == bfqd->in_service_queue)
++ bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
++ false, BFQ_BFQQ_PREEMPTED);
++
++ BUG_ON(entity->on_st && !bfq_bfqq_busy(bfqq)
++ && &bfq_entity_service_tree(entity)->idle !=
++ entity->tree);
++
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_busy(bfqq));
++
++ if (bfq_bfqq_busy(bfqq))
++ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
++ else if (entity->on_st) {
++ BUG_ON(&bfq_entity_service_tree(entity)->idle !=
++ entity->tree);
++ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
++ }
++ bfqg_put(bfqq_group(bfqq));
++
++ /*
++ * Here we use a reference to bfqg. We don't need a refcounter
++ * as the cgroup reference will not be dropped, so that its
++ * destroy() callback will not be invoked.
++ */
++ entity->parent = bfqg->my_entity;
++ entity->sched_data = &bfqg->sched_data;
++ bfqg_get(bfqg);
++
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_busy(bfqq));
++ if (bfq_bfqq_busy(bfqq)) {
++ bfq_pos_tree_add_move(bfqd, bfqq);
++ bfq_activate_bfqq(bfqd, bfqq);
++ }
++
++ if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++ BUG_ON(entity->on_st && !bfq_bfqq_busy(bfqq)
++ && &bfq_entity_service_tree(entity)->idle !=
++ entity->tree);
++}
++
++/**
++ * __bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bfqd: the queue descriptor.
++ * @bic: the bic to move.
++ * @blkcg: the blk-cgroup to move to.
++ *
++ * Move bic to blkcg, assuming that bfqd->queue is locked; the caller
++ * has to make sure that the reference to cgroup is valid across the call.
++ *
++ * NOTE: an alternative approach might have been to store the current
++ * cgroup in bfqq and getting a reference to it, reducing the lookup
++ * time here, at the price of slightly more complex code.
++ */
++static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic,
++ struct blkcg *blkcg)
++{
++ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
++ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
++ struct bfq_group *bfqg;
++ struct bfq_entity *entity;
++
++ lockdep_assert_held(bfqd->queue->queue_lock);
++
++ bfqg = bfq_find_set_group(bfqd, blkcg);
++
++ if (unlikely(!bfqg))
++ bfqg = bfqd->root_group;
++
++ if (async_bfqq) {
++ entity = &async_bfqq->entity;
++
++ if (entity->sched_data != &bfqg->sched_data) {
++ bic_set_bfqq(bic, NULL, 0);
++ bfq_log_bfqq(bfqd, async_bfqq,
++ "bic_change_group: %p %d",
++ async_bfqq,
++ async_bfqq->ref);
++ bfq_put_queue(async_bfqq);
++ }
++ }
++
++ if (sync_bfqq) {
++ entity = &sync_bfqq->entity;
++ if (entity->sched_data != &bfqg->sched_data)
++ bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
++ }
++
++ return bfqg;
++}
++
++static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ struct bfq_group *bfqg = NULL;
++ uint64_t serial_nr;
++
++ rcu_read_lock();
++ serial_nr = bio_blkcg(bio)->css.serial_nr;
++
++ /*
++ * Check whether blkcg has changed. The condition may trigger
++ * spuriously on a newly created cic but there's no harm.
++ */
++ if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
++ goto out;
++
++ bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
++ bic->blkcg_serial_nr = serial_nr;
++out:
++ rcu_read_unlock();
++}
++
++/**
++ * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
++ * @st: the service tree being flushed.
++ */
++static void bfq_flush_idle_tree(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entity = st->first_idle;
++
++ for (; entity ; entity = st->first_idle)
++ __bfq_deactivate_entity(entity, false);
++}
++
++/**
++ * bfq_reparent_leaf_entity - move leaf entity to the root_group.
++ * @bfqd: the device data structure with the root group.
++ * @entity: the entity to move.
++ */
++static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ BUG_ON(!bfqq);
++ bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
++}
++
++/**
++ * bfq_reparent_active_entities - move to the root group all active
++ * entities.
++ * @bfqd: the device data structure with the root group.
++ * @bfqg: the group to move from.
++ * @st: the service tree with the entities.
++ *
++ * Needs queue_lock to be taken and reference to be valid over the call.
++ */
++static void bfq_reparent_active_entities(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ struct bfq_service_tree *st)
++{
++ struct rb_root *active = &st->active;
++ struct bfq_entity *entity = NULL;
++
++ if (!RB_EMPTY_ROOT(&st->active))
++ entity = bfq_entity_of(rb_first(active));
++
++ for (; entity ; entity = bfq_entity_of(rb_first(active)))
++ bfq_reparent_leaf_entity(bfqd, entity);
++
++ if (bfqg->sched_data.in_service_entity)
++ bfq_reparent_leaf_entity(bfqd,
++ bfqg->sched_data.in_service_entity);
++}
++
++/**
++ * bfq_pd_offline - deactivate the entity associated with @pd,
++ * and reparent its children entities.
++ * @pd: descriptor of the policy going offline.
++ *
++ * blkio already grabs the queue_lock for us, so no need to use
++ * RCU-based magic
++ */
++static void bfq_pd_offline(struct blkg_policy_data *pd)
++{
++ struct bfq_service_tree *st;
++ struct bfq_group *bfqg;
++ struct bfq_data *bfqd;
++ struct bfq_entity *entity;
++ int i;
++
++ BUG_ON(!pd);
++ bfqg = pd_to_bfqg(pd);
++ BUG_ON(!bfqg);
++ bfqd = bfqg->bfqd;
++ BUG_ON(bfqd && !bfqd->root_group);
++
++ entity = bfqg->my_entity;
++
++ if (!entity) /* root group */
++ return;
++
++ /*
++ * Empty all service_trees belonging to this group before
++ * deactivating the group itself.
++ */
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
++ BUG_ON(!bfqg->sched_data.service_tree);
++ st = bfqg->sched_data.service_tree + i;
++ /*
++ * The idle tree may still contain bfq_queues belonging
++ * to exited task because they never migrated to a different
++ * cgroup from the one being destroyed now. No one else
++ * can access them so it's safe to act without any lock.
++ */
++ bfq_flush_idle_tree(st);
++
++ /*
++ * It may happen that some queues are still active
++ * (busy) upon group destruction (if the corresponding
++ * processes have been forced to terminate). We move
++ * all the leaf entities corresponding to these queues
++ * to the root_group.
++ * Also, it may happen that the group has an entity
++ * in service, which is disconnected from the active
++ * tree: it must be moved, too.
++ * There is no need to put the sync queues, as the
++ * scheduler has taken no reference.
++ */
++ bfq_reparent_active_entities(bfqd, bfqg, st);
++ BUG_ON(!RB_EMPTY_ROOT(&st->active));
++ BUG_ON(!RB_EMPTY_ROOT(&st->idle));
++ }
++ BUG_ON(bfqg->sched_data.next_in_service);
++ BUG_ON(bfqg->sched_data.in_service_entity);
++
++ __bfq_deactivate_entity(entity, false);
++ bfq_put_async_queues(bfqd, bfqg);
++
++ /*
++ * @blkg is going offline and will be ignored by
++ * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
++ * that they don't get lost. If IOs complete after this point, the
++ * stats for them will be lost. Oh well...
++ */
++ bfqg_stats_xfer_dead(bfqg);
++}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++ struct blkcg_gq *blkg;
++
++ list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
++ struct bfq_group *bfqg = blkg_to_bfqg(blkg);
++ BUG_ON(!bfqg);
++
++ bfq_end_wr_async_queues(bfqd, bfqg);
++ }
++ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++static int bfq_io_show_weight(struct seq_file *sf, void *v)
++{
++ struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
++ struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
++ unsigned int val = 0;
++
++ if (bfqgd)
++ val = bfqgd->weight;
++
++ seq_printf(sf, "%u\n", val);
++
++ return 0;
++}
++
++static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
++ struct cftype *cftype,
++ u64 val)
++{
++ struct blkcg *blkcg = css_to_blkcg(css);
++ struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
++ struct blkcg_gq *blkg;
++ int ret = -ERANGE;
++
++ if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
++ return ret;
++
++ ret = 0;
++ spin_lock_irq(&blkcg->lock);
++ bfqgd->weight = (unsigned short)val;
++ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
++ struct bfq_group *bfqg = blkg_to_bfqg(blkg);
++
++ if (!bfqg)
++ continue;
++ /*
++ * Setting the prio_changed flag of the entity
++ * to 1 with new_weight == weight would re-set
++ * the value of the weight to its ioprio mapping.
++ * Set the flag only if necessary.
++ */
++ if ((unsigned short)val != bfqg->entity.new_weight) {
++ bfqg->entity.new_weight = (unsigned short)val;
++ /*
++ * Make sure that the above new value has been
++ * stored in bfqg->entity.new_weight before
++ * setting the prio_changed flag. In fact,
++ * this flag may be read asynchronously (in
++ * critical sections protected by a different
++ * lock than that held here), and finding this
++ * flag set may cause the execution of the code
++ * for updating parameters whose value may
++ * depend also on bfqg->entity.new_weight (in
++ * __bfq_entity_update_weight_prio).
++ * This barrier makes sure that the new value
++ * of bfqg->entity.new_weight is correctly
++ * seen in that code.
++ */
++ smp_wmb();
++ bfqg->entity.prio_changed = 1;
++ }
++ }
++ spin_unlock_irq(&blkcg->lock);
++
++ return ret;
++}
++
++static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
++ char *buf, size_t nbytes,
++ loff_t off)
++{
++ u64 weight;
++ /* First unsigned long found in the file is used */
++ int ret = kstrtoull(strim(buf), 0, &weight);
++
++ if (ret)
++ return ret;
++
++ return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
++}
++
++static int bfqg_print_stat(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
++ &blkcg_policy_bfq, seq_cft(sf)->private, false);
++ return 0;
++}
++
++static int bfqg_print_rwstat(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
++ &blkcg_policy_bfq, seq_cft(sf)->private, true);
++ return 0;
++}
++
++static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
++ struct blkg_policy_data *pd, int off)
++{
++ u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
++ &blkcg_policy_bfq, off);
++ return __blkg_prfill_u64(sf, pd, sum);
++}
++
++static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
++ struct blkg_policy_data *pd, int off)
++{
++ struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
++ &blkcg_policy_bfq,
++ off);
++ return __blkg_prfill_rwstat(sf, pd, &sum);
++}
++
++static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
++ bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
++ seq_cft(sf)->private, false);
++ return 0;
++}
++
++static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
++ bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
++ seq_cft(sf)->private, true);
++ return 0;
++}
++
++static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
++ int off)
++{
++ u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
++
++ return __blkg_prfill_u64(sf, pd, sum >> 9);
++}
++
++static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
++ bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
++ return 0;
++}
++
++static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
++ struct blkg_policy_data *pd, int off)
++{
++ struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
++ offsetof(struct blkcg_gq, stat_bytes));
++ u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
++ atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
++
++ return __blkg_prfill_u64(sf, pd, sum >> 9);
++}
++
++static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
++ bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
++ false);
++ return 0;
++}
++
++
++static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
++ struct blkg_policy_data *pd, int off)
++{
++ struct bfq_group *bfqg = pd_to_bfqg(pd);
++ u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples);
++ u64 v = 0;
++
++ if (samples) {
++ v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum);
++ v = div64_u64(v, samples);
++ }
++ __blkg_prfill_u64(sf, pd, v);
++ return 0;
++}
++
++/* print avg_queue_size */
++static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
++{
++ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
++ bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
++ 0, false);
++ return 0;
++}
++
++static struct bfq_group *
++bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
++{
++ int ret;
++
++ ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
++ if (ret)
++ return NULL;
++
++ return blkg_to_bfqg(bfqd->queue->root_blkg);
++}
++
++static struct cftype bfq_blkcg_legacy_files[] = {
++ {
++ .name = "bfq.weight",
++ .flags = CFTYPE_NOT_ON_ROOT,
++ .seq_show = bfq_io_show_weight,
++ .write_u64 = bfq_io_set_weight_legacy,
++ },
++
++ /* statistics, covers only the tasks in the bfqg */
++ {
++ .name = "bfq.time",
++ .private = offsetof(struct bfq_group, stats.time),
++ .seq_show = bfqg_print_stat,
++ },
++ {
++ .name = "bfq.sectors",
++ .seq_show = bfqg_print_stat_sectors,
++ },
++ {
++ .name = "bfq.io_service_bytes",
++ .private = (unsigned long)&blkcg_policy_bfq,
++ .seq_show = blkg_print_stat_bytes,
++ },
++ {
++ .name = "bfq.io_serviced",
++ .private = (unsigned long)&blkcg_policy_bfq,
++ .seq_show = blkg_print_stat_ios,
++ },
++ {
++ .name = "bfq.io_service_time",
++ .private = offsetof(struct bfq_group, stats.service_time),
++ .seq_show = bfqg_print_rwstat,
++ },
++ {
++ .name = "bfq.io_wait_time",
++ .private = offsetof(struct bfq_group, stats.wait_time),
++ .seq_show = bfqg_print_rwstat,
++ },
++ {
++ .name = "bfq.io_merged",
++ .private = offsetof(struct bfq_group, stats.merged),
++ .seq_show = bfqg_print_rwstat,
++ },
++ {
++ .name = "bfq.io_queued",
++ .private = offsetof(struct bfq_group, stats.queued),
++ .seq_show = bfqg_print_rwstat,
++ },
++
++ /* the same statictics which cover the bfqg and its descendants */
++ {
++ .name = "bfq.time_recursive",
++ .private = offsetof(struct bfq_group, stats.time),
++ .seq_show = bfqg_print_stat_recursive,
++ },
++ {
++ .name = "bfq.sectors_recursive",
++ .seq_show = bfqg_print_stat_sectors_recursive,
++ },
++ {
++ .name = "bfq.io_service_bytes_recursive",
++ .private = (unsigned long)&blkcg_policy_bfq,
++ .seq_show = blkg_print_stat_bytes_recursive,
++ },
++ {
++ .name = "bfq.io_serviced_recursive",
++ .private = (unsigned long)&blkcg_policy_bfq,
++ .seq_show = blkg_print_stat_ios_recursive,
++ },
++ {
++ .name = "bfq.io_service_time_recursive",
++ .private = offsetof(struct bfq_group, stats.service_time),
++ .seq_show = bfqg_print_rwstat_recursive,
++ },
++ {
++ .name = "bfq.io_wait_time_recursive",
++ .private = offsetof(struct bfq_group, stats.wait_time),
++ .seq_show = bfqg_print_rwstat_recursive,
++ },
++ {
++ .name = "bfq.io_merged_recursive",
++ .private = offsetof(struct bfq_group, stats.merged),
++ .seq_show = bfqg_print_rwstat_recursive,
++ },
++ {
++ .name = "bfq.io_queued_recursive",
++ .private = offsetof(struct bfq_group, stats.queued),
++ .seq_show = bfqg_print_rwstat_recursive,
++ },
++ {
++ .name = "bfq.avg_queue_size",
++ .seq_show = bfqg_print_avg_queue_size,
++ },
++ {
++ .name = "bfq.group_wait_time",
++ .private = offsetof(struct bfq_group, stats.group_wait_time),
++ .seq_show = bfqg_print_stat,
++ },
++ {
++ .name = "bfq.idle_time",
++ .private = offsetof(struct bfq_group, stats.idle_time),
++ .seq_show = bfqg_print_stat,
++ },
++ {
++ .name = "bfq.empty_time",
++ .private = offsetof(struct bfq_group, stats.empty_time),
++ .seq_show = bfqg_print_stat,
++ },
++ {
++ .name = "bfq.dequeue",
++ .private = offsetof(struct bfq_group, stats.dequeue),
++ .seq_show = bfqg_print_stat,
++ },
++ { } /* terminate */
++};
++
++static struct cftype bfq_blkg_files[] = {
++ {
++ .name = "bfq.weight",
++ .flags = CFTYPE_NOT_ON_ROOT,
++ .seq_show = bfq_io_show_weight,
++ .write = bfq_io_set_weight,
++ },
++ {} /* terminate */
++};
++
++#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++
++static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg,
++ struct bfq_queue *bfqq, unsigned int op) { }
++static inline void
++bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
++static inline void
++bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
++static inline void bfqg_stats_update_completion(struct bfq_group *bfqg,
++ uint64_t start_time, uint64_t io_start_time,
++ unsigned int op) { }
++static inline void
++bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
++ struct bfq_group *curr_bfqg) { }
++static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { }
++static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
++
++static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct bfq_group *bfqg) {}
++
++static void bfq_init_entity(struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ entity->weight = entity->new_weight;
++ entity->orig_weight = entity->new_weight;
++ if (bfqq) {
++ bfqq->ioprio = bfqq->new_ioprio;
++ bfqq->ioprio_class = bfqq->new_ioprio_class;
++ }
++ entity->sched_data = &bfqg->sched_data;
++}
++
++static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
++ struct blkcg *blkcg)
++{
++ return bfqd->root_group;
++}
++
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
++{
++ return bfqq->bfqd->root_group;
++}
++
++static struct bfq_group *
++bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
++{
++ struct bfq_group *bfqg;
++ int i;
++
++ bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
++ if (!bfqg)
++ return NULL;
++
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++ return bfqg;
++}
++#endif
+diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c
+new file mode 100644
+index 000000000000..fb7bb8f08b75
+--- /dev/null
++++ b/block/bfq-ioc.c
+@@ -0,0 +1,36 @@
++/*
++ * BFQ: I/O context handling.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++/**
++ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
++ * @icq: the iocontext queue.
++ */
++static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
++{
++ /* bic->icq is the first member, %NULL will convert to %NULL */
++ return container_of(icq, struct bfq_io_cq, icq);
++}
++
++/**
++ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
++ * @bfqd: the lookup key.
++ * @ioc: the io_context of the process doing I/O.
++ *
++ * Queue lock must be held.
++ */
++static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
++ struct io_context *ioc)
++{
++ if (ioc)
++ return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue));
++ return NULL;
++}
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+new file mode 100644
+index 000000000000..ac8991bca9fa
+--- /dev/null
++++ b/block/bfq-sched.c
+@@ -0,0 +1,2002 @@
++/*
++ * BFQ: Hierarchical B-WF2Q+ scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2016 Paolo Valente <paolo.valente@linaro.org>
++ */
++
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
++
++/**
++ * bfq_gt - compare two timestamps.
++ * @a: first ts.
++ * @b: second ts.
++ *
++ * Return @a > @b, dealing with wrapping correctly.
++ */
++static int bfq_gt(u64 a, u64 b)
++{
++ return (s64)(a - b) > 0;
++}
++
++static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree)
++{
++ struct rb_node *node = tree->rb_node;
++
++ return rb_entry(node, struct bfq_entity, rb_node);
++}
++
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd);
++
++static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
++
++/**
++ * bfq_update_next_in_service - update sd->next_in_service
++ * @sd: sched_data for which to perform the update.
++ * @new_entity: if not NULL, pointer to the entity whose activation,
++ * requeueing or repositionig triggered the invocation of
++ * this function.
++ *
++ * This function is called to update sd->next_in_service, which, in
++ * its turn, may change as a consequence of the insertion or
++ * extraction of an entity into/from one of the active trees of
++ * sd. These insertions/extractions occur as a consequence of
++ * activations/deactivations of entities, with some activations being
++ * 'true' activations, and other activations being requeueings (i.e.,
++ * implementing the second, requeueing phase of the mechanism used to
++ * reposition an entity in its active tree; see comments on
++ * __bfq_activate_entity and __bfq_requeue_entity for details). In
++ * both the last two activation sub-cases, new_entity points to the
++ * just activated or requeued entity.
++ *
++ * Returns true if sd->next_in_service changes in such a way that
++ * entity->parent may become the next_in_service for its parent
++ * entity.
++ */
++static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
++ struct bfq_entity *new_entity)
++{
++ struct bfq_entity *next_in_service = sd->next_in_service;
++ struct bfq_queue *bfqq;
++ bool parent_sched_may_change = false;
++
++ /*
++ * If this update is triggered by the activation, requeueing
++ * or repositiong of an entity that does not coincide with
++ * sd->next_in_service, then a full lookup in the active tree
++ * can be avoided. In fact, it is enough to check whether the
++ * just-modified entity has a higher priority than
++ * sd->next_in_service, or, even if it has the same priority
++ * as sd->next_in_service, is eligible and has a lower virtual
++ * finish time than sd->next_in_service. If this compound
++ * condition holds, then the new entity becomes the new
++ * next_in_service. Otherwise no change is needed.
++ */
++ if (new_entity && new_entity != sd->next_in_service) {
++ /*
++ * Flag used to decide whether to replace
++ * sd->next_in_service with new_entity. Tentatively
++ * set to true, and left as true if
++ * sd->next_in_service is NULL.
++ */
++ bool replace_next = true;
++
++ /*
++ * If there is already a next_in_service candidate
++ * entity, then compare class priorities or timestamps
++ * to decide whether to replace sd->service_tree with
++ * new_entity.
++ */
++ if (next_in_service) {
++ unsigned int new_entity_class_idx =
++ bfq_class_idx(new_entity);
++ struct bfq_service_tree *st =
++ sd->service_tree + new_entity_class_idx;
++
++ /*
++ * For efficiency, evaluate the most likely
++ * sub-condition first.
++ */
++ replace_next =
++ (new_entity_class_idx ==
++ bfq_class_idx(next_in_service)
++ &&
++ !bfq_gt(new_entity->start, st->vtime)
++ &&
++ bfq_gt(next_in_service->finish,
++ new_entity->finish))
++ ||
++ new_entity_class_idx <
++ bfq_class_idx(next_in_service);
++ }
++
++ if (replace_next)
++ next_in_service = new_entity;
++ } else /* invoked because of a deactivation: lookup needed */
++ next_in_service = bfq_lookup_next_entity(sd);
++
++ if (next_in_service) {
++ parent_sched_may_change = !sd->next_in_service ||
++ bfq_update_parent_budget(next_in_service);
++ }
++
++ sd->next_in_service = next_in_service;
++
++ if (!next_in_service)
++ return parent_sched_may_change;
++
++ bfqq = bfq_entity_to_bfqq(next_in_service);
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "update_next_in_service: chosen this queue");
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(next_in_service,
++ struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "update_next_in_service: chosen this entity");
++ }
++#endif
++ return parent_sched_may_change;
++}
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++/* both next loops stop at one of the child entities of the root group */
++#define for_each_entity(entity) \
++ for (; entity ; entity = entity->parent)
++
++/*
++ * For each iteration, compute parent in advance, so as to be safe if
++ * entity is deallocated during the iteration. Such a deallocation may
++ * happen as a consequence of a bfq_put_queue that frees the bfq_queue
++ * containing entity.
++ */
++#define for_each_entity_safe(entity, parent) \
++ for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
++
++/*
++ * Returns true if this budget changes may let next_in_service->parent
++ * become the next_in_service entity for its parent entity.
++ */
++static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
++{
++ struct bfq_entity *bfqg_entity;
++ struct bfq_group *bfqg;
++ struct bfq_sched_data *group_sd;
++ bool ret = false;
++
++ BUG_ON(!next_in_service);
++
++ group_sd = next_in_service->sched_data;
++
++ bfqg = container_of(group_sd, struct bfq_group, sched_data);
++ /*
++ * bfq_group's my_entity field is not NULL only if the group
++ * is not the root group. We must not touch the root entity
++ * as it must never become an in-service entity.
++ */
++ bfqg_entity = bfqg->my_entity;
++ if (bfqg_entity) {
++ if (bfqg_entity->budget > next_in_service->budget)
++ ret = true;
++ bfqg_entity->budget = next_in_service->budget;
++ }
++
++ return ret;
++}
++
++/*
++ * This function tells whether entity stops being a candidate for next
++ * service, according to the following logic.
++ *
++ * This function is invoked for an entity that is about to be set in
++ * service. If such an entity is a queue, then the entity is no longer
++ * a candidate for next service (i.e, a candidate entity to serve
++ * after the in-service entity is expired). The function then returns
++ * true.
++ *
++ * In contrast, the entity could stil be a candidate for next service
++ * if it is not a queue, and has more than one child. In fact, even if
++ * one of its children is about to be set in service, other children
++ * may still be the next to serve. As a consequence, a non-queue
++ * entity is not a candidate for next-service only if it has only one
++ * child. And only if this condition holds, then the function returns
++ * true for a non-queue entity.
++ */
++static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
++{
++ struct bfq_group *bfqg;
++
++ if (bfq_entity_to_bfqq(entity))
++ return true;
++
++ bfqg = container_of(entity, struct bfq_group, entity);
++
++ BUG_ON(bfqg == ((struct bfq_data *)(bfqg->bfqd))->root_group);
++ BUG_ON(bfqg->active_entities == 0);
++ if (bfqg->active_entities == 1)
++ return true;
++
++ return false;
++}
++
++#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#define for_each_entity(entity) \
++ for (; entity ; entity = NULL)
++
++#define for_each_entity_safe(entity, parent) \
++ for (parent = NULL; entity ; entity = parent)
++
++static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
++{
++ return false;
++}
++
++static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
++{
++ return true;
++}
++
++#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++
++/*
++ * Shift for timestamp calculations. This actually limits the maximum
++ * service allowed in one timestamp delta (small shift values increase it),
++ * the maximum total weight that can be used for the queues in the system
++ * (big shift values increase it), and the period of virtual time
++ * wraparounds.
++ */
++#define WFQ_SERVICE_SHIFT 22
++
++static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = NULL;
++
++ BUG_ON(!entity);
++
++ if (!entity->my_sched_data)
++ bfqq = container_of(entity, struct bfq_queue, entity);
++
++ return bfqq;
++}
++
++
++/**
++ * bfq_delta - map service into the virtual time domain.
++ * @service: amount of service.
++ * @weight: scale factor (weight of an entity or weight sum).
++ */
++static u64 bfq_delta(unsigned long service, unsigned long weight)
++{
++ u64 d = (u64)service << WFQ_SERVICE_SHIFT;
++
++ do_div(d, weight);
++ return d;
++}
++
++/**
++ * bfq_calc_finish - assign the finish time to an entity.
++ * @entity: the entity to act upon.
++ * @service: the service to be charged to the entity.
++ */
++static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ unsigned long long start, finish, delta;
++
++ BUG_ON(entity->weight == 0);
++
++ entity->finish = entity->start +
++ bfq_delta(service, entity->weight);
++
++ start = ((entity->start>>10)*1000)>>12;
++ finish = ((entity->finish>>10)*1000)>>12;
++ delta = ((bfq_delta(service, entity->weight)>>10)*1000)>>12;
++
++ if (bfqq) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_finish: serv %lu, w %d",
++ service, entity->weight);
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_finish: start %llu, finish %llu, delta %llu",
++ start, finish, delta);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ } else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "calc_finish group: serv %lu, w %d",
++ service, entity->weight);
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "calc_finish group: start %llu, finish %llu, delta %llu",
++ start, finish, delta);
++#endif
++ }
++}
++
++/**
++ * bfq_entity_of - get an entity from a node.
++ * @node: the node field of the entity.
++ *
++ * Convert a node pointer to the relative entity. This is used only
++ * to simplify the logic of some functions and not as the generic
++ * conversion mechanism because, e.g., in the tree walking functions,
++ * the check for a %NULL value would be redundant.
++ */
++static struct bfq_entity *bfq_entity_of(struct rb_node *node)
++{
++ struct bfq_entity *entity = NULL;
++
++ if (node)
++ entity = rb_entry(node, struct bfq_entity, rb_node);
++
++ return entity;
++}
++
++/**
++ * bfq_extract - remove an entity from a tree.
++ * @root: the tree root.
++ * @entity: the entity to remove.
++ */
++static void bfq_extract(struct rb_root *root, struct bfq_entity *entity)
++{
++ BUG_ON(entity->tree != root);
++
++ entity->tree = NULL;
++ rb_erase(&entity->rb_node, root);
++}
++
++/**
++ * bfq_idle_extract - extract an entity from the idle tree.
++ * @st: the service tree of the owning @entity.
++ * @entity: the entity being removed.
++ */
++static void bfq_idle_extract(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *next;
++
++ BUG_ON(entity->tree != &st->idle);
++
++ if (entity == st->first_idle) {
++ next = rb_next(&entity->rb_node);
++ st->first_idle = bfq_entity_of(next);
++ }
++
++ if (entity == st->last_idle) {
++ next = rb_prev(&entity->rb_node);
++ st->last_idle = bfq_entity_of(next);
++ }
++
++ bfq_extract(&st->idle, entity);
++
++ if (bfqq)
++ list_del(&bfqq->bfqq_list);
++}
++
++/**
++ * bfq_insert - generic tree insertion.
++ * @root: tree root.
++ * @entity: entity to insert.
++ *
++ * This is used for the idle and the active tree, since they are both
++ * ordered by finish time.
++ */
++static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
++{
++ struct bfq_entity *entry;
++ struct rb_node **node = &root->rb_node;
++ struct rb_node *parent = NULL;
++
++ BUG_ON(entity->tree);
++
++ while (*node) {
++ parent = *node;
++ entry = rb_entry(parent, struct bfq_entity, rb_node);
++
++ if (bfq_gt(entry->finish, entity->finish))
++ node = &parent->rb_left;
++ else
++ node = &parent->rb_right;
++ }
++
++ rb_link_node(&entity->rb_node, parent, node);
++ rb_insert_color(&entity->rb_node, root);
++
++ entity->tree = root;
++}
++
++/**
++ * bfq_update_min - update the min_start field of a entity.
++ * @entity: the entity to update.
++ * @node: one of its children.
++ *
++ * This function is called when @entity may store an invalid value for
++ * min_start due to updates to the active tree. The function assumes
++ * that the subtree rooted at @node (which may be its left or its right
++ * child) has a valid min_start value.
++ */
++static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node)
++{
++ struct bfq_entity *child;
++
++ if (node) {
++ child = rb_entry(node, struct bfq_entity, rb_node);
++ if (bfq_gt(entity->min_start, child->min_start))
++ entity->min_start = child->min_start;
++ }
++}
++
++/**
++ * bfq_update_active_node - recalculate min_start.
++ * @node: the node to update.
++ *
++ * @node may have changed position or one of its children may have moved,
++ * this function updates its min_start value. The left and right subtrees
++ * are assumed to hold a correct min_start value.
++ */
++static void bfq_update_active_node(struct rb_node *node)
++{
++ struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ entity->min_start = entity->start;
++ bfq_update_min(entity, node->rb_right);
++ bfq_update_min(entity, node->rb_left);
++
++ if (bfqq) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "update_active_node: new min_start %llu",
++ ((entity->min_start>>10)*1000)>>12);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ } else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "update_active_node: new min_start %llu",
++ ((entity->min_start>>10)*1000)>>12);
++#endif
++ }
++}
++
++/**
++ * bfq_update_active_tree - update min_start for the whole active tree.
++ * @node: the starting node.
++ *
++ * @node must be the deepest modified node after an update. This function
++ * updates its min_start using the values held by its children, assuming
++ * that they did not change, and then updates all the nodes that may have
++ * changed in the path to the root. The only nodes that may have changed
++ * are the ones in the path or their siblings.
++ */
++static void bfq_update_active_tree(struct rb_node *node)
++{
++ struct rb_node *parent;
++
++up:
++ bfq_update_active_node(node);
++
++ parent = rb_parent(node);
++ if (!parent)
++ return;
++
++ if (node == parent->rb_left && parent->rb_right)
++ bfq_update_active_node(parent->rb_right);
++ else if (parent->rb_left)
++ bfq_update_active_node(parent->rb_left);
++
++ node = parent;
++ goto up;
++}
++
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root);
++
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root);
++
++
++/**
++ * bfq_active_insert - insert an entity in the active tree of its
++ * group/device.
++ * @st: the service tree of the entity.
++ * @entity: the entity being inserted.
++ *
++ * The active tree is ordered by finish time, but an extra key is kept
++ * per each node, containing the minimum value for the start times of
++ * its children (and the node itself), so it's possible to search for
++ * the eligible node with the lowest finish time in logarithmic time.
++ */
++static void bfq_active_insert(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *node = &entity->rb_node;
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ struct bfq_sched_data *sd = NULL;
++ struct bfq_group *bfqg = NULL;
++ struct bfq_data *bfqd = NULL;
++#endif
++
++ bfq_insert(&st->active, entity);
++
++ if (node->rb_left)
++ node = node->rb_left;
++ else if (node->rb_right)
++ node = node->rb_right;
++
++ bfq_update_active_tree(node);
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ sd = entity->sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++ if (bfqq)
++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else { /* bfq_group */
++ BUG_ON(!bfqd);
++ bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
++ }
++ if (bfqg != bfqd->root_group) {
++ BUG_ON(!bfqg);
++ BUG_ON(!bfqd);
++ bfqg->active_entities++;
++ }
++#endif
++}
++
++/**
++ * bfq_ioprio_to_weight - calc a weight from an ioprio.
++ * @ioprio: the ioprio value to convert.
++ */
++static unsigned short bfq_ioprio_to_weight(int ioprio)
++{
++ BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR);
++ return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
++}
++
++/**
++ * bfq_weight_to_ioprio - calc an ioprio from a weight.
++ * @weight: the weight value to convert.
++ *
++ * To preserve as much as possible the old only-ioprio user interface,
++ * 0 is used as an escape ioprio value for weights (numerically) equal or
++ * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
++ */
++static unsigned short bfq_weight_to_ioprio(int weight)
++{
++ BUG_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT);
++ return IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight < 0 ?
++ 0 : IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight;
++}
++
++static void bfq_get_entity(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ if (bfqq) {
++ bfqq->ref++;
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
++ bfqq, bfqq->ref);
++ }
++}
++
++/**
++ * bfq_find_deepest - find the deepest node that an extraction can modify.
++ * @node: the node being removed.
++ *
++ * Do the first step of an extraction in an rb tree, looking for the
++ * node that will replace @node, and returning the deepest node that
++ * the following modifications to the tree can touch. If @node is the
++ * last node in the tree return %NULL.
++ */
++static struct rb_node *bfq_find_deepest(struct rb_node *node)
++{
++ struct rb_node *deepest;
++
++ if (!node->rb_right && !node->rb_left)
++ deepest = rb_parent(node);
++ else if (!node->rb_right)
++ deepest = node->rb_left;
++ else if (!node->rb_left)
++ deepest = node->rb_right;
++ else {
++ deepest = rb_next(node);
++ if (deepest->rb_right)
++ deepest = deepest->rb_right;
++ else if (rb_parent(deepest) != node)
++ deepest = rb_parent(deepest);
++ }
++
++ return deepest;
++}
++
++/**
++ * bfq_active_extract - remove an entity from the active tree.
++ * @st: the service_tree containing the tree.
++ * @entity: the entity being removed.
++ */
++static void bfq_active_extract(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *node;
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ struct bfq_sched_data *sd = NULL;
++ struct bfq_group *bfqg = NULL;
++ struct bfq_data *bfqd = NULL;
++#endif
++
++ node = bfq_find_deepest(&entity->rb_node);
++ bfq_extract(&st->active, entity);
++
++ if (node)
++ bfq_update_active_tree(node);
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ sd = entity->sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++ if (bfqq)
++ list_del(&bfqq->bfqq_list);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else { /* bfq_group */
++ BUG_ON(!bfqd);
++ bfq_weights_tree_remove(bfqd, entity,
++ &bfqd->group_weights_tree);
++ }
++ if (bfqg != bfqd->root_group) {
++ BUG_ON(!bfqg);
++ BUG_ON(!bfqd);
++ BUG_ON(!bfqg->active_entities);
++ bfqg->active_entities--;
++ }
++#endif
++}
++
++/**
++ * bfq_idle_insert - insert an entity into the idle tree.
++ * @st: the service tree containing the tree.
++ * @entity: the entity to insert.
++ */
++static void bfq_idle_insert(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct bfq_entity *first_idle = st->first_idle;
++ struct bfq_entity *last_idle = st->last_idle;
++
++ if (!first_idle || bfq_gt(first_idle->finish, entity->finish))
++ st->first_idle = entity;
++ if (!last_idle || bfq_gt(entity->finish, last_idle->finish))
++ st->last_idle = entity;
++
++ bfq_insert(&st->idle, entity);
++
++ if (bfqq)
++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
++}
++
++/**
++ * bfq_forget_entity - do not consider entity any longer for scheduling
++ * @st: the service tree.
++ * @entity: the entity being removed.
++ * @is_in_service: true if entity is currently the in-service entity.
++ *
++ * Forget everything about @entity. In addition, if entity represents
++ * a queue, and the latter is not in service, then release the service
++ * reference to the queue (the one taken through bfq_get_entity). In
++ * fact, in this case, there is really no more service reference to
++ * the queue, as the latter is also outside any service tree. If,
++ * instead, the queue is in service, then __bfq_bfqd_reset_in_service
++ * will take care of putting the reference when the queue finally
++ * stops being served.
++ */
++static void bfq_forget_entity(struct bfq_service_tree *st,
++ struct bfq_entity *entity,
++ bool is_in_service)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ BUG_ON(!entity->on_st);
++
++ entity->on_st = false;
++ st->wsum -= entity->weight;
++ if (bfqq && !is_in_service) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity (before): %p %d",
++ bfqq, bfqq->ref);
++ bfq_put_queue(bfqq);
++ }
++}
++
++/**
++ * bfq_put_idle_entity - release the idle tree ref of an entity.
++ * @st: service tree for the entity.
++ * @entity: the entity being released.
++ */
++static void bfq_put_idle_entity(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ bfq_idle_extract(st, entity);
++ bfq_forget_entity(st, entity,
++ entity == entity->sched_data->in_service_entity);
++}
++
++/**
++ * bfq_forget_idle - update the idle tree if necessary.
++ * @st: the service tree to act upon.
++ *
++ * To preserve the global O(log N) complexity we only remove one entry here;
++ * as the idle tree will not grow indefinitely this can be done safely.
++ */
++static void bfq_forget_idle(struct bfq_service_tree *st)
++{
++ struct bfq_entity *first_idle = st->first_idle;
++ struct bfq_entity *last_idle = st->last_idle;
++
++ if (RB_EMPTY_ROOT(&st->active) && last_idle &&
++ !bfq_gt(last_idle->finish, st->vtime)) {
++ /*
++ * Forget the whole idle tree, increasing the vtime past
++ * the last finish time of idle entities.
++ */
++ st->vtime = last_idle->finish;
++ }
++
++ if (first_idle && !bfq_gt(first_idle->finish, st->vtime))
++ bfq_put_idle_entity(st, first_idle);
++}
++
++/*
++ * Update weight and priority of entity. If update_class_too is true,
++ * then update the ioprio_class of entity too.
++ *
++ * The reason why the update of ioprio_class is controlled through the
++ * last parameter is as follows. Changing the ioprio class of an
++ * entity implies changing the destination service trees for that
++ * entity. If such a change occurred when the entity is already on one
++ * of the service trees for its previous class, then the state of the
++ * entity would become more complex: none of the new possible service
++ * trees for the entity, according to bfq_entity_service_tree(), would
++ * match any of the possible service trees on which the entity
++ * is. Complex operations involving these trees, such as entity
++ * activations and deactivations, should take into account this
++ * additional complexity. To avoid this issue, this function is
++ * invoked with update_class_too unset in the points in the code where
++ * entity may happen to be on some tree.
++ */
++static struct bfq_service_tree *
++__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
++ struct bfq_entity *entity,
++ bool update_class_too)
++{
++ struct bfq_service_tree *new_st = old_st;
++
++ if (entity->prio_changed) {
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ unsigned int prev_weight, new_weight;
++ struct bfq_data *bfqd = NULL;
++ struct rb_root *root;
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ struct bfq_sched_data *sd;
++ struct bfq_group *bfqg;
++#endif
++
++ if (bfqq)
++ bfqd = bfqq->bfqd;
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ sd = entity->my_sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++ BUG_ON(!bfqd);
++ }
++#endif
++
++ BUG_ON(old_st->wsum < entity->weight);
++ old_st->wsum -= entity->weight;
++
++ if (entity->new_weight != entity->orig_weight) {
++ if (entity->new_weight < BFQ_MIN_WEIGHT ||
++ entity->new_weight > BFQ_MAX_WEIGHT) {
++ pr_crit("update_weight_prio: new_weight %d\n",
++ entity->new_weight);
++ if (entity->new_weight < BFQ_MIN_WEIGHT)
++ entity->new_weight = BFQ_MIN_WEIGHT;
++ else
++ entity->new_weight = BFQ_MAX_WEIGHT;
++ }
++ entity->orig_weight = entity->new_weight;
++ if (bfqq)
++ bfqq->ioprio =
++ bfq_weight_to_ioprio(entity->orig_weight);
++ }
++
++ if (bfqq && update_class_too)
++ bfqq->ioprio_class = bfqq->new_ioprio_class;
++
++ /*
++ * Reset prio_changed only if the ioprio_class change
++ * is not pending any longer.
++ */
++ if (!bfqq || bfqq->ioprio_class == bfqq->new_ioprio_class)
++ entity->prio_changed = 0;
++
++ /*
++ * NOTE: here we may be changing the weight too early,
++ * this will cause unfairness. The correct approach
++ * would have required additional complexity to defer
++ * weight changes to the proper time instants (i.e.,
++ * when entity->finish <= old_st->vtime).
++ */
++ new_st = bfq_entity_service_tree(entity);
++
++ prev_weight = entity->weight;
++ new_weight = entity->orig_weight *
++ (bfqq ? bfqq->wr_coeff : 1);
++ /*
++ * If the weight of the entity changes, remove the entity
++ * from its old weight counter (if there is a counter
++ * associated with the entity), and add it to the counter
++ * associated with its new weight.
++ */
++ if (prev_weight != new_weight) {
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "weight changed %d %d(%d %d)",
++ prev_weight, new_weight,
++ entity->orig_weight,
++ bfqq->wr_coeff);
++
++ root = bfqq ? &bfqd->queue_weights_tree :
++ &bfqd->group_weights_tree;
++ bfq_weights_tree_remove(bfqd, entity, root);
++ }
++ entity->weight = new_weight;
++ /*
++ * Add the entity to its weights tree only if it is
++ * not associated with a weight-raised queue.
++ */
++ if (prev_weight != new_weight &&
++ (bfqq ? bfqq->wr_coeff == 1 : 1))
++ /* If we get here, root has been initialized. */
++ bfq_weights_tree_add(bfqd, entity, root);
++
++ new_st->wsum += entity->weight;
++
++ if (new_st != old_st)
++ entity->start = new_st->vtime;
++ }
++
++ return new_st;
++}
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
++#endif
++
++/**
++ * bfq_bfqq_served - update the scheduler status after selection for
++ * service.
++ * @bfqq: the queue being served.
++ * @served: bytes to transfer.
++ *
++ * NOTE: this can be optimized, as the timestamps of upper level entities
++ * are synchronized every time a new bfqq is selected for service. By now,
++ * we keep it to better check consistency.
++ */
++static void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st;
++
++ for_each_entity(entity) {
++ st = bfq_entity_service_tree(entity);
++
++ entity->service += served;
++
++ BUG_ON(st->wsum == 0);
++
++ st->vtime += bfq_delta(served, st->wsum);
++ bfq_forget_idle(st);
++ }
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ bfqg_stats_set_start_empty_time(bfqq_group(bfqq));
++#endif
++ st = bfq_entity_service_tree(&bfqq->entity);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs, vtime %llu on %p",
++ served, ((st->vtime>>10)*1000)>>12, st);
++}
++
++/**
++ * bfq_bfqq_charge_time - charge an amount of service equivalent to the length
++ * of the time interval during which bfqq has been in
++ * service.
++ * @bfqd: the device
++ * @bfqq: the queue that needs a service update.
++ * @time_ms: the amount of time during which the queue has received service
++ *
++ * If a queue does not consume its budget fast enough, then providing
++ * the queue with service fairness may impair throughput, more or less
++ * severely. For this reason, queues that consume their budget slowly
++ * are provided with time fairness instead of service fairness. This
++ * goal is achieved through the BFQ scheduling engine, even if such an
++ * engine works in the service, and not in the time domain. The trick
++ * is charging these queues with an inflated amount of service, equal
++ * to the amount of service that they would have received during their
++ * service slot if they had been fast, i.e., if their requests had
++ * been dispatched at a rate equal to the estimated peak rate.
++ *
++ * It is worth noting that time fairness can cause important
++ * distortions in terms of bandwidth distribution, on devices with
++ * internal queueing. The reason is that I/O requests dispatched
++ * during the service slot of a queue may be served after that service
++ * slot is finished, and may have a total processing time loosely
++ * correlated with the duration of the service slot. This is
++ * especially true for short service slots.
++ */
++static void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ unsigned long time_ms)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ int tot_serv_to_charge = entity->service;
++ unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout);
++
++ if (time_ms > 0 && time_ms < timeout_ms)
++ tot_serv_to_charge =
++ (bfqd->bfq_max_budget * time_ms) / timeout_ms;
++
++ if (tot_serv_to_charge < entity->service)
++ tot_serv_to_charge = entity->service;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "charge_time: %lu/%u ms, %d/%d/%d sectors",
++ time_ms, timeout_ms, entity->service,
++ tot_serv_to_charge, entity->budget);
++
++ /* Increase budget to avoid inconsistencies */
++ if (tot_serv_to_charge > entity->budget)
++ entity->budget = tot_serv_to_charge;
++
++ bfq_bfqq_served(bfqq,
++ max_t(int, 0, tot_serv_to_charge - entity->service));
++}
++
++static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
++ struct bfq_service_tree *st,
++ bool backshifted)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct bfq_sched_data *sd = entity->sched_data;
++
++ /*
++ * When this function is invoked, entity is not in any service
++ * tree, then it is safe to invoke next function with the last
++ * parameter set (see the comments on the function).
++ */
++ st = __bfq_entity_update_weight_prio(st, entity, true);
++ bfq_calc_finish(entity, entity->budget);
++
++ /*
++ * If some queues enjoy backshifting for a while, then their
++ * (virtual) finish timestamps may happen to become lower and
++ * lower than the system virtual time. In particular, if
++ * these queues often happen to be idle for short time
++ * periods, and during such time periods other queues with
++ * higher timestamps happen to be busy, then the backshifted
++ * timestamps of the former queues can become much lower than
++ * the system virtual time. In fact, to serve the queues with
++ * higher timestamps while the ones with lower timestamps are
++ * idle, the system virtual time may be pushed-up to much
++ * higher values than the finish timestamps of the idle
++ * queues. As a consequence, the finish timestamps of all new
++ * or newly activated queues may end up being much larger than
++ * those of lucky queues with backshifted timestamps. The
++ * latter queues may then monopolize the device for a lot of
++ * time. This would simply break service guarantees.
++ *
++ * To reduce this problem, push up a little bit the
++ * backshifted timestamps of the queue associated with this
++ * entity (only a queue can happen to have the backshifted
++ * flag set): just enough to let the finish timestamp of the
++ * queue be equal to the current value of the system virtual
++ * time. This may introduce a little unfairness among queues
++ * with backshifted timestamps, but it does not break
++ * worst-case fairness guarantees.
++ *
++ * As a special case, if bfqq is weight-raised, push up
++ * timestamps much less, to keep very low the probability that
++ * this push up causes the backshifted finish timestamps of
++ * weight-raised queues to become higher than the backshifted
++ * finish timestamps of non weight-raised queues.
++ */
++ if (backshifted && bfq_gt(st->vtime, entity->finish)) {
++ unsigned long delta = st->vtime - entity->finish;
++
++ if (bfqq)
++ delta /= bfqq->wr_coeff;
++
++ entity->start += delta;
++ entity->finish += delta;
++
++ if (bfqq) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "__activate_entity: new queue finish %llu",
++ ((entity->finish>>10)*1000)>>12);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ } else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "__activate_entity: new group finish %llu",
++ ((entity->finish>>10)*1000)>>12);
++#endif
++ }
++ }
++
++ bfq_active_insert(st, entity);
++
++ if (bfqq) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "__activate_entity: queue %seligible in st %p",
++ entity->start <= st->vtime ? "" : "non ", st);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ } else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "__activate_entity: group %seligible in st %p",
++ entity->start <= st->vtime ? "" : "non ", st);
++#endif
++ }
++ BUG_ON(RB_EMPTY_ROOT(&st->active));
++ BUG_ON(&st->active != &sd->service_tree->active &&
++ &st->active != &(sd->service_tree+1)->active &&
++ &st->active != &(sd->service_tree+2)->active);
++}
++
++/**
++ * __bfq_activate_entity - handle activation of entity.
++ * @entity: the entity being activated.
++ * @non_blocking_wait_rq: true if entity was waiting for a request
++ *
++ * Called for a 'true' activation, i.e., if entity is not active and
++ * one of its children receives a new request.
++ *
++ * Basically, this function updates the timestamps of entity and
++ * inserts entity into its active tree, ater possible extracting it
++ * from its idle tree.
++ */
++static void __bfq_activate_entity(struct bfq_entity *entity,
++ bool non_blocking_wait_rq)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ bool backshifted = false;
++ unsigned long long min_vstart;
++
++ BUG_ON(!sd);
++ BUG_ON(!st);
++
++ /* See comments on bfq_fqq_update_budg_for_activation */
++ if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) {
++ backshifted = true;
++ min_vstart = entity->finish;
++ } else
++ min_vstart = st->vtime;
++
++ if (entity->tree == &st->idle) {
++ /*
++ * Must be on the idle tree, bfq_idle_extract() will
++ * check for that.
++ */
++ bfq_idle_extract(st, entity);
++ entity->start = bfq_gt(min_vstart, entity->finish) ?
++ min_vstart : entity->finish;
++ } else {
++ /*
++ * The finish time of the entity may be invalid, and
++ * it is in the past for sure, otherwise the queue
++ * would have been on the idle tree.
++ */
++ entity->start = min_vstart;
++ st->wsum += entity->weight;
++ /*
++ * entity is about to be inserted into a service tree,
++ * and then set in service: get a reference to make
++ * sure entity does not disappear until it is no
++ * longer in service or scheduled for service.
++ */
++ bfq_get_entity(entity);
++
++ BUG_ON(entity->on_st && bfqq);
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ if (entity->on_st && !bfqq) {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group,
++ entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd,
++ bfqg,
++ "activate bug, class %d in_service %p",
++ bfq_class_idx(entity), sd->in_service_entity);
++ }
++#endif
++ BUG_ON(entity->on_st && !bfqq);
++ entity->on_st = true;
++ }
++
++ bfq_update_fin_time_enqueue(entity, st, backshifted);
++}
++
++/**
++ * __bfq_requeue_entity - handle requeueing or repositioning of an entity.
++ * @entity: the entity being requeued or repositioned.
++ *
++ * Requeueing is needed if this entity stops being served, which
++ * happens if a leaf descendant entity has expired. On the other hand,
++ * repositioning is needed if the next_inservice_entity for the child
++ * entity has changed. See the comments inside the function for
++ * details.
++ *
++ * Basically, this function: 1) removes entity from its active tree if
++ * present there, 2) updates the timestamps of entity and 3) inserts
++ * entity back into its active tree (in the new, right position for
++ * the new values of the timestamps).
++ */
++static void __bfq_requeue_entity(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++
++ BUG_ON(!sd);
++ BUG_ON(!st);
++
++ BUG_ON(entity != sd->in_service_entity &&
++ entity->tree != &st->active);
++
++ if (entity == sd->in_service_entity) {
++ /*
++ * We are requeueing the current in-service entity,
++ * which may have to be done for one of the following
++ * reasons:
++ * - entity represents the in-service queue, and the
++ * in-service queue is being requeued after an
++ * expiration;
++ * - entity represents a group, and its budget has
++ * changed because one of its child entities has
++ * just been either activated or requeued for some
++ * reason; the timestamps of the entity need then to
++ * be updated, and the entity needs to be enqueued
++ * or repositioned accordingly.
++ *
++ * In particular, before requeueing, the start time of
++ * the entity must be moved forward to account for the
++ * service that the entity has received while in
++ * service. This is done by the next instructions. The
++ * finish time will then be updated according to this
++ * new value of the start time, and to the budget of
++ * the entity.
++ */
++ bfq_calc_finish(entity, entity->service);
++ entity->start = entity->finish;
++ BUG_ON(entity->tree && entity->tree != &st->active);
++ /*
++ * In addition, if the entity had more than one child
++ * when set in service, then was not extracted from
++ * the active tree. This implies that the position of
++ * the entity in the active tree may need to be
++ * changed now, because we have just updated the start
++ * time of the entity, and we will update its finish
++ * time in a moment (the requeueing is then, more
++ * precisely, a repositioning in this case). To
++ * implement this repositioning, we: 1) dequeue the
++ * entity here, 2) update the finish time and
++ * requeue the entity according to the new
++ * timestamps below.
++ */
++ if (entity->tree)
++ bfq_active_extract(st, entity);
++ } else { /* The entity is already active, and not in service */
++ /*
++ * In this case, this function gets called only if the
++ * next_in_service entity below this entity has
++ * changed, and this change has caused the budget of
++ * this entity to change, which, finally implies that
++ * the finish time of this entity must be
++ * updated. Such an update may cause the scheduling,
++ * i.e., the position in the active tree, of this
++ * entity to change. We handle this change by: 1)
++ * dequeueing the entity here, 2) updating the finish
++ * time and requeueing the entity according to the new
++ * timestamps below. This is the same approach as the
++ * non-extracted-entity sub-case above.
++ */
++ bfq_active_extract(st, entity);
++ }
++
++ bfq_update_fin_time_enqueue(entity, st, false);
++}
++
++static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
++ struct bfq_sched_data *sd,
++ bool non_blocking_wait_rq)
++{
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++
++ if (sd->in_service_entity == entity || entity->tree == &st->active)
++ /*
++ * in service or already queued on the active tree,
++ * requeue or reposition
++ */
++ __bfq_requeue_entity(entity);
++ else
++ /*
++ * Not in service and not queued on its active tree:
++ * the activity is idle and this is a true activation.
++ */
++ __bfq_activate_entity(entity, non_blocking_wait_rq);
++}
++
++
++/**
++ * bfq_activate_entity - activate or requeue an entity representing a bfq_queue,
++ * and activate, requeue or reposition all ancestors
++ * for which such an update becomes necessary.
++ * @entity: the entity to activate.
++ * @non_blocking_wait_rq: true if this entity was waiting for a request
++ * @requeue: true if this is a requeue, which implies that bfqq is
++ * being expired; thus ALL its ancestors stop being served and must
++ * therefore be requeued
++ */
++static void bfq_activate_requeue_entity(struct bfq_entity *entity,
++ bool non_blocking_wait_rq,
++ bool requeue)
++{
++ struct bfq_sched_data *sd;
++
++ for_each_entity(entity) {
++ BUG_ON(!entity);
++ sd = entity->sched_data;
++ __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq);
++
++ BUG_ON(RB_EMPTY_ROOT(&sd->service_tree->active) &&
++ RB_EMPTY_ROOT(&(sd->service_tree+1)->active) &&
++ RB_EMPTY_ROOT(&(sd->service_tree+2)->active));
++
++ if (!bfq_update_next_in_service(sd, entity) && !requeue) {
++ BUG_ON(!sd->next_in_service);
++ break;
++ }
++ BUG_ON(!sd->next_in_service);
++ }
++}
++
++/**
++ * __bfq_deactivate_entity - deactivate an entity from its service tree.
++ * @entity: the entity to deactivate.
++ * @ins_into_idle_tree: if false, the entity will not be put into the
++ * idle tree.
++ *
++ * Deactivates an entity, independently from its previous state. Must
++ * be invoked only if entity is on a service tree. Extracts the entity
++ * from that tree, and if necessary and allowed, puts it on the idle
++ * tree.
++ */
++static bool __bfq_deactivate_entity(struct bfq_entity *entity,
++ bool ins_into_idle_tree)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st;
++ bool is_in_service;
++
++ if (!entity->on_st) { /* entity never activated, or already inactive */
++ BUG_ON(sd && entity == sd->in_service_entity);
++ return false;
++ }
++
++ /*
++ * If we get here, then entity is active, which implies that
++ * bfq_group_set_parent has already been invoked for the group
++ * represented by entity. Therefore, the field
++ * entity->sched_data has been set, and we can safely use it.
++ */
++ st = bfq_entity_service_tree(entity);
++ is_in_service = entity == sd->in_service_entity;
++
++ BUG_ON(is_in_service && entity->tree && entity->tree != &st->active);
++
++ if (is_in_service)
++ bfq_calc_finish(entity, entity->service);
++
++ if (entity->tree == &st->active)
++ bfq_active_extract(st, entity);
++ else if (!is_in_service && entity->tree == &st->idle)
++ bfq_idle_extract(st, entity);
++ else if (entity->tree)
++ BUG();
++
++ if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime))
++ bfq_forget_entity(st, entity, is_in_service);
++ else
++ bfq_idle_insert(st, entity);
++
++ return true;
++}
++
++/**
++ * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
++ * @entity: the entity to deactivate.
++ * @ins_into_idle_tree: true if the entity can be put on the idle tree
++ */
++static void bfq_deactivate_entity(struct bfq_entity *entity,
++ bool ins_into_idle_tree,
++ bool expiration)
++{
++ struct bfq_sched_data *sd;
++ struct bfq_entity *parent = NULL;
++
++ for_each_entity_safe(entity, parent) {
++ sd = entity->sched_data;
++
++ BUG_ON(sd == NULL); /*
++ * It would mean that this is the
++ * root group.
++ */
++
++ BUG_ON(expiration && entity != sd->in_service_entity);
++
++ BUG_ON(entity != sd->in_service_entity &&
++ entity->tree ==
++ &bfq_entity_service_tree(entity)->active &&
++ !sd->next_in_service);
++
++ if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) {
++ /*
++ * entity is not in any tree any more, so
++ * this deactivation is a no-op, and there is
++ * nothing to change for upper-level entities
++ * (in case of expiration, this can never
++ * happen).
++ */
++ BUG_ON(expiration); /*
++ * entity cannot be already out of
++ * any tree
++ */
++ return;
++ }
++
++ if (sd->next_in_service == entity)
++ /*
++ * entity was the next_in_service entity,
++ * then, since entity has just been
++ * deactivated, a new one must be found.
++ */
++ bfq_update_next_in_service(sd, NULL);
++
++ if (sd->next_in_service) {
++ /*
++ * The parent entity is still backlogged,
++ * because next_in_service is not NULL. So, no
++ * further upwards deactivation must be
++ * performed. Yet, next_in_service has
++ * changed. Then the schedule does need to be
++ * updated upwards.
++ */
++ BUG_ON(sd->next_in_service == entity);
++ break;
++ }
++
++ /*
++ * If we get here, then the parent is no more
++ * backlogged and we need to propagate the
++ * deactivation upwards. Thus let the loop go on.
++ */
++
++ /*
++ * Also let parent be queued into the idle tree on
++ * deactivation, to preserve service guarantees, and
++ * assuming that who invoked this function does not
++ * need parent entities too to be removed completely.
++ */
++ ins_into_idle_tree = true;
++ }
++
++ /*
++ * If the deactivation loop is fully executed, then there are
++ * no more entities to touch and next loop is not executed at
++ * all. Otherwise, requeue remaining entities if they are
++ * about to stop receiving service, or reposition them if this
++ * is not the case.
++ */
++ entity = parent;
++ for_each_entity(entity) {
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ /*
++ * Invoke __bfq_requeue_entity on entity, even if
++ * already active, to requeue/reposition it in the
++ * active tree (because sd->next_in_service has
++ * changed)
++ */
++ __bfq_requeue_entity(entity);
++
++ sd = entity->sched_data;
++ BUG_ON(expiration && sd->in_service_entity != entity);
++
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "invoking udpdate_next for this queue");
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(entity,
++ struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "invoking udpdate_next for this entity");
++ }
++#endif
++ if (!bfq_update_next_in_service(sd, entity) &&
++ !expiration)
++ /*
++ * next_in_service unchanged or not causing
++ * any change in entity->parent->sd, and no
++ * requeueing needed for expiration: stop
++ * here.
++ */
++ break;
++ }
++}
++
++/**
++ * bfq_calc_vtime_jump - compute the value to which the vtime should jump,
++ * if needed, to have at least one entity eligible.
++ * @st: the service tree to act upon.
++ *
++ * Assumes that st is not empty.
++ */
++static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
++{
++ struct bfq_entity *root_entity = bfq_root_active_entity(&st->active);
++
++ if (bfq_gt(root_entity->min_start, st->vtime)) {
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(root_entity);
++
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_vtime_jump: new value %llu",
++ root_entity->min_start);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(root_entity, struct bfq_group,
++ entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "calc_vtime_jump: new value %llu",
++ root_entity->min_start);
++ }
++#endif
++ return root_entity->min_start;
++ }
++ return st->vtime;
++}
++
++static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
++{
++ if (new_value > st->vtime) {
++ st->vtime = new_value;
++ bfq_forget_idle(st);
++ }
++}
++
++/**
++ * bfq_first_active_entity - find the eligible entity with
++ * the smallest finish time
++ * @st: the service tree to select from.
++ * @vtime: the system virtual to use as a reference for eligibility
++ *
++ * This function searches the first schedulable entity, starting from the
++ * root of the tree and going on the left every time on this side there is
++ * a subtree with at least one eligible (start >= vtime) entity. The path on
++ * the right is followed only if a) the left subtree contains no eligible
++ * entities and b) no eligible entity has been found yet.
++ */
++static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
++ u64 vtime)
++{
++ struct bfq_entity *entry, *first = NULL;
++ struct rb_node *node = st->active.rb_node;
++
++ while (node) {
++ entry = rb_entry(node, struct bfq_entity, rb_node);
++left:
++ if (!bfq_gt(entry->start, vtime))
++ first = entry;
++
++ BUG_ON(bfq_gt(entry->min_start, vtime));
++
++ if (node->rb_left) {
++ entry = rb_entry(node->rb_left,
++ struct bfq_entity, rb_node);
++ if (!bfq_gt(entry->min_start, vtime)) {
++ node = node->rb_left;
++ goto left;
++ }
++ }
++ if (first)
++ break;
++ node = node->rb_right;
++ }
++
++ BUG_ON(!first && !RB_EMPTY_ROOT(&st->active));
++ return first;
++}
++
++/**
++ * __bfq_lookup_next_entity - return the first eligible entity in @st.
++ * @st: the service tree.
++ *
++ * If there is no in-service entity for the sched_data st belongs to,
++ * then return the entity that will be set in service if:
++ * 1) the parent entity this st belongs to is set in service;
++ * 2) no entity belonging to such parent entity undergoes a state change
++ * that would influence the timestamps of the entity (e.g., becomes idle,
++ * becomes backlogged, changes its budget, ...).
++ *
++ * In this first case, update the virtual time in @st too (see the
++ * comments on this update inside the function).
++ *
++ * In constrast, if there is an in-service entity, then return the
++ * entity that would be set in service if not only the above
++ * conditions, but also the next one held true: the currently
++ * in-service entity, on expiration,
++ * 1) gets a finish time equal to the current one, or
++ * 2) is not eligible any more, or
++ * 3) is idle.
++ */
++static struct bfq_entity *
++__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service
++#if 0
++ , bool force
++#endif
++ )
++{
++ struct bfq_entity *entity
++#if 0
++ , *new_next_in_service = NULL
++#endif
++ ;
++ u64 new_vtime;
++ struct bfq_queue *bfqq;
++
++ if (RB_EMPTY_ROOT(&st->active))
++ return NULL;
++
++ /*
++ * Get the value of the system virtual time for which at
++ * least one entity is eligible.
++ */
++ new_vtime = bfq_calc_vtime_jump(st);
++
++ /*
++ * If there is no in-service entity for the sched_data this
++ * active tree belongs to, then push the system virtual time
++ * up to the value that guarantees that at least one entity is
++ * eligible. If, instead, there is an in-service entity, then
++ * do not make any such update, because there is already an
++ * eligible entity, namely the in-service one (even if the
++ * entity is not on st, because it was extracted when set in
++ * service).
++ */
++ if (!in_service)
++ bfq_update_vtime(st, new_vtime);
++
++ entity = bfq_first_active_entity(st, new_vtime);
++ BUG_ON(bfq_gt(entity->start, new_vtime));
++
++ /* Log some information */
++ bfqq = bfq_entity_to_bfqq(entity);
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "__lookup_next: start %llu vtime %llu st %p",
++ ((entity->start>>10)*1000)>>12,
++ ((new_vtime>>10)*1000)>>12, st);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "__lookup_next: start %llu vtime %llu st %p",
++ ((entity->start>>10)*1000)>>12,
++ ((new_vtime>>10)*1000)>>12, st);
++ }
++#endif
++
++ BUG_ON(!entity);
++
++ return entity;
++}
++
++/**
++ * bfq_lookup_next_entity - return the first eligible entity in @sd.
++ * @sd: the sched_data.
++ *
++ * This function is invoked when there has been a change in the trees
++ * for sd, and we need know what is the new next entity after this
++ * change.
++ */
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd)
++{
++ struct bfq_service_tree *st = sd->service_tree;
++ struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1);
++ struct bfq_entity *entity = NULL;
++ struct bfq_queue *bfqq;
++ int class_idx = 0;
++
++ BUG_ON(!sd);
++ BUG_ON(!st);
++ /*
++ * Choose from idle class, if needed to guarantee a minimum
++ * bandwidth to this class (and if there is some active entity
++ * in idle class). This should also mitigate
++ * priority-inversion problems in case a low priority task is
++ * holding file system resources.
++ */
++ if (time_is_before_jiffies(sd->bfq_class_idle_last_service +
++ BFQ_CL_IDLE_TIMEOUT)) {
++ if (!RB_EMPTY_ROOT(&idle_class_st->active))
++ class_idx = BFQ_IOPRIO_CLASSES - 1;
++ /* About to be served if backlogged, or not yet backlogged */
++ sd->bfq_class_idle_last_service = jiffies;
++ }
++
++ /*
++ * Find the next entity to serve for the highest-priority
++ * class, unless the idle class needs to be served.
++ */
++ for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) {
++ entity = __bfq_lookup_next_entity(st + class_idx,
++ sd->in_service_entity);
++
++ if (entity)
++ break;
++ }
++
++ BUG_ON(!entity &&
++ (!RB_EMPTY_ROOT(&st->active) || !RB_EMPTY_ROOT(&(st+1)->active) ||
++ !RB_EMPTY_ROOT(&(st+2)->active)));
++
++ if (!entity)
++ return NULL;
++
++ /* Log some information */
++ bfqq = bfq_entity_to_bfqq(entity);
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "chosen from st %p %d",
++ st + class_idx, class_idx);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "chosen from st %p %d",
++ st + class_idx, class_idx);
++ }
++#endif
++
++ return entity;
++}
++
++static bool next_queue_may_preempt(struct bfq_data *bfqd)
++{
++ struct bfq_sched_data *sd = &bfqd->root_group->sched_data;
++
++ return sd->next_in_service != sd->in_service_entity;
++}
++
++/*
++ * Get next queue for service.
++ */
++static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
++{
++ struct bfq_entity *entity = NULL;
++ struct bfq_sched_data *sd;
++ struct bfq_queue *bfqq;
++
++ BUG_ON(bfqd->in_service_queue);
++
++ if (bfqd->busy_queues == 0)
++ return NULL;
++
++ /*
++ * Traverse the path from the root to the leaf entity to
++ * serve. Set in service all the entities visited along the
++ * way.
++ */
++ sd = &bfqd->root_group->sched_data;
++ for (; sd ; sd = entity->my_sched_data) {
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ if (entity) {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg(bfqd, bfqg,
++ "get_next_queue: lookup in this group");
++ if (!sd->next_in_service)
++ pr_crit("get_next_queue: lookup in this group");
++ } else {
++ bfq_log_bfqg(bfqd, bfqd->root_group,
++ "get_next_queue: lookup in root group");
++ if (!sd->next_in_service)
++ pr_crit("get_next_queue: lookup in root group");
++ }
++#endif
++
++ BUG_ON(!sd->next_in_service);
++
++ /*
++ * WARNING. We are about to set the in-service entity
++ * to sd->next_in_service, i.e., to the (cached) value
++ * returned by bfq_lookup_next_entity(sd) the last
++ * time it was invoked, i.e., the last time when the
++ * service order in sd changed as a consequence of the
++ * activation or deactivation of an entity. In this
++ * respect, if we execute bfq_lookup_next_entity(sd)
++ * in this very moment, it may, although with low
++ * probability, yield a different entity than that
++ * pointed to by sd->next_in_service. This rare event
++ * happens in case there was no CLASS_IDLE entity to
++ * serve for sd when bfq_lookup_next_entity(sd) was
++ * invoked for the last time, while there is now one
++ * such entity.
++ *
++ * If the above event happens, then the scheduling of
++ * such entity in CLASS_IDLE is postponed until the
++ * service of the sd->next_in_service entity
++ * finishes. In fact, when the latter is expired,
++ * bfq_lookup_next_entity(sd) gets called again,
++ * exactly to update sd->next_in_service.
++ */
++
++ /* Make next_in_service entity become in_service_entity */
++ entity = sd->next_in_service;
++ sd->in_service_entity = entity;
++
++ /*
++ * Reset the accumulator of the amount of service that
++ * the entity is about to receive.
++ */
++ entity->service = 0;
++
++ /*
++ * If entity is no longer a candidate for next
++ * service, then we extract it from its active tree,
++ * for the following reason. To further boost the
++ * throughput in some special case, BFQ needs to know
++ * which is the next candidate entity to serve, while
++ * there is already an entity in service. In this
++ * respect, to make it easy to compute/update the next
++ * candidate entity to serve after the current
++ * candidate has been set in service, there is a case
++ * where it is necessary to extract the current
++ * candidate from its service tree. Such a case is
++ * when the entity just set in service cannot be also
++ * a candidate for next service. Details about when
++ * this conditions holds are reported in the comments
++ * on the function bfq_no_longer_next_in_service()
++ * invoked below.
++ */
++ if (bfq_no_longer_next_in_service(entity))
++ bfq_active_extract(bfq_entity_service_tree(entity),
++ entity);
++
++ /*
++ * For the same reason why we may have just extracted
++ * entity from its active tree, we may need to update
++ * next_in_service for the sched_data of entity too,
++ * regardless of whether entity has been extracted.
++ * In fact, even if entity has not been extracted, a
++ * descendant entity may get extracted. Such an event
++ * would cause a change in next_in_service for the
++ * level of the descendant entity, and thus possibly
++ * back to upper levels.
++ *
++ * We cannot perform the resulting needed update
++ * before the end of this loop, because, to know which
++ * is the correct next-to-serve candidate entity for
++ * each level, we need first to find the leaf entity
++ * to set in service. In fact, only after we know
++ * which is the next-to-serve leaf entity, we can
++ * discover whether the parent entity of the leaf
++ * entity becomes the next-to-serve, and so on.
++ */
++
++ /* Log some information */
++ bfqq = bfq_entity_to_bfqq(entity);
++ if (bfqq)
++ bfq_log_bfqq(bfqd, bfqq,
++ "get_next_queue: this queue, finish %llu",
++ (((entity->finish>>10)*1000)>>10)>>2);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg(bfqd, bfqg,
++ "get_next_queue: this entity, finish %llu",
++ (((entity->finish>>10)*1000)>>10)>>2);
++ }
++#endif
++
++ }
++
++ BUG_ON(!entity);
++ bfqq = bfq_entity_to_bfqq(entity);
++ BUG_ON(!bfqq);
++
++ /*
++ * We can finally update all next-to-serve entities along the
++ * path from the leaf entity just set in service to the root.
++ */
++ for_each_entity(entity) {
++ struct bfq_sched_data *sd = entity->sched_data;
++
++ if(!bfq_update_next_in_service(sd, NULL))
++ break;
++ }
++
++ return bfqq;
++}
++
++static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
++{
++ struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
++ struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
++ struct bfq_entity *entity = in_serv_entity;
++
++ if (bfqd->in_service_bic) {
++ put_io_context(bfqd->in_service_bic->icq.ioc);
++ bfqd->in_service_bic = NULL;
++ }
++
++ bfq_clear_bfqq_wait_request(in_serv_bfqq);
++ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
++ bfqd->in_service_queue = NULL;
++
++ /*
++ * When this function is called, all in-service entities have
++ * been properly deactivated or requeued, so we can safely
++ * execute the final step: reset in_service_entity along the
++ * path from entity to the root.
++ */
++ for_each_entity(entity)
++ entity->sched_data->in_service_entity = NULL;
++
++ /*
++ * in_serv_entity is no longer in service, so, if it is in no
++ * service tree either, then release the service reference to
++ * the queue it represents (taken with bfq_get_entity).
++ */
++ if (!in_serv_entity->on_st)
++ bfq_put_queue(in_serv_bfqq);
++}
++
++static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ bool ins_into_idle_tree, bool expiration)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ bfq_deactivate_entity(entity, ins_into_idle_tree, expiration);
++}
++
++static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++
++ BUG_ON(bfqq == bfqd->in_service_queue);
++ BUG_ON(entity->tree != &st->active && entity->tree != &st->idle &&
++ entity->on_st);
++
++ bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq),
++ false);
++ bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
++}
++
++static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ bfq_activate_requeue_entity(entity, false,
++ bfqq == bfqd->in_service_queue);
++}
++
++static void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
++
++/*
++ * Called when the bfqq no longer has requests pending, remove it from
++ * the service tree. As a special case, it can be invoked during an
++ * expiration.
++ */
++static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ bool expiration)
++{
++ BUG_ON(!bfq_bfqq_busy(bfqq));
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ bfq_log_bfqq(bfqd, bfqq, "del from busy");
++
++ bfq_clear_bfqq_busy(bfqq);
++
++ BUG_ON(bfqd->busy_queues == 0);
++ bfqd->busy_queues--;
++
++ if (!bfqq->dispatched)
++ bfq_weights_tree_remove(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++
++ if (bfqq->wr_coeff > 1) {
++ bfqd->wr_busy_queues--;
++ BUG_ON(bfqd->wr_busy_queues < 0);
++ }
++
++ bfqg_stats_update_dequeue(bfqq_group(bfqq));
++
++ BUG_ON(bfqq->entity.budget < 0);
++
++ bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
++}
++
++/*
++ * Called when an inactive queue receives a new request.
++ */
++static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfq_bfqq_busy(bfqq));
++ BUG_ON(bfqq == bfqd->in_service_queue);
++
++ bfq_log_bfqq(bfqd, bfqq, "add to busy");
++
++ bfq_activate_bfqq(bfqd, bfqq);
++
++ bfq_mark_bfqq_busy(bfqq);
++ bfqd->busy_queues++;
++
++ if (!bfqq->dispatched)
++ if (bfqq->wr_coeff == 1)
++ bfq_weights_tree_add(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++
++ if (bfqq->wr_coeff > 1) {
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ }
++
++}
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+new file mode 100644
+index 000000000000..65e7c7e77f3c
+--- /dev/null
++++ b/block/bfq-sq-iosched.c
+@@ -0,0 +1,5379 @@
++/*
++ * Budget Fair Queueing (BFQ) I/O scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ *
++ * BFQ is a proportional-share I/O scheduler, with some extra
++ * low-latency capabilities. BFQ also supports full hierarchical
++ * scheduling through cgroups. Next paragraphs provide an introduction
++ * on BFQ inner workings. Details on BFQ benefits and usage can be
++ * found in Documentation/block/bfq-iosched.txt.
++ *
++ * BFQ is a proportional-share storage-I/O scheduling algorithm based
++ * on the slice-by-slice service scheme of CFQ. But BFQ assigns
++ * budgets, measured in number of sectors, to processes instead of
++ * time slices. The device is not granted to the in-service process
++ * for a given time slice, but until it has exhausted its assigned
++ * budget. This change from the time to the service domain enables BFQ
++ * to distribute the device throughput among processes as desired,
++ * without any distortion due to throughput fluctuations, or to device
++ * internal queueing. BFQ uses an ad hoc internal scheduler, called
++ * B-WF2Q+, to schedule processes according to their budgets. More
++ * precisely, BFQ schedules queues associated with processes. Thanks to
++ * the accurate policy of B-WF2Q+, BFQ can afford to assign high
++ * budgets to I/O-bound processes issuing sequential requests (to
++ * boost the throughput), and yet guarantee a low latency to
++ * interactive and soft real-time applications.
++ *
++ * NOTE: if the main or only goal, with a given device, is to achieve
++ * the maximum-possible throughput at all times, then do switch off
++ * all low-latency heuristics for that device, by setting low_latency
++ * to 0.
++ *
++ * BFQ is described in [1], where also a reference to the initial, more
++ * theoretical paper on BFQ can be found. The interested reader can find
++ * in the latter paper full details on the main algorithm, as well as
++ * formulas of the guarantees and formal proofs of all the properties.
++ * With respect to the version of BFQ presented in these papers, this
++ * implementation adds a few more heuristics, such as the one that
++ * guarantees a low latency to soft real-time applications, and a
++ * hierarchical extension based on H-WF2Q+.
++ *
++ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with
++ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
++ * complexity derives from the one introduced with EEVDF in [3].
++ *
++ * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
++ * Scheduler", Proceedings of the First Workshop on Mobile System
++ * Technologies (MST-2015), May 2015.
++ * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
++ *
++ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf
++ *
++ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
++ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
++ * Oct 1997.
++ *
++ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
++ *
++ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
++ * First: A Flexible and Accurate Mechanism for Proportional Share
++ * Resource Allocation,'' technical report.
++ *
++ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
++ */
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/cgroup.h>
++#include <linux/elevator.h>
++#include <linux/jiffies.h>
++#include <linux/rbtree.h>
++#include <linux/ioprio.h>
++#include "blk.h"
++#include "bfq.h"
++
++/* Expiration time of sync (0) and async (1) requests, in ns. */
++static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
++
++/* Maximum backwards seek, in KiB. */
++static const int bfq_back_max = (16 * 1024);
++
++/* Penalty of a backwards seek, in number of sectors. */
++static const int bfq_back_penalty = 2;
++
++/* Idling period duration, in ns. */
++static u32 bfq_slice_idle = (NSEC_PER_SEC / 125);
++
++/* Minimum number of assigned budgets for which stats are safe to compute. */
++static const int bfq_stats_min_budgets = 194;
++
++/* Default maximum budget values, in sectors and number of requests. */
++static const int bfq_default_max_budget = (16 * 1024);
++
++/*
++ * Async to sync throughput distribution is controlled as follows:
++ * when an async request is served, the entity is charged the number
++ * of sectors of the request, multiplied by the factor below
++ */
++static const int bfq_async_charge_factor = 10;
++
++/* Default timeout values, in jiffies, approximating CFQ defaults. */
++static const int bfq_timeout = (HZ / 8);
++
++static struct kmem_cache *bfq_pool;
++
++/* Below this threshold (in ns), we consider thinktime immediate. */
++#define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
++
++/* hw_tag detection: parallel requests threshold and min samples needed. */
++#define BFQ_HW_QUEUE_THRESHOLD 4
++#define BFQ_HW_QUEUE_SAMPLES 32
++
++#define BFQQ_SEEK_THR (sector_t)(8 * 100)
++#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
++#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
++#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
++
++/* Min number of samples required to perform peak-rate update */
++#define BFQ_RATE_MIN_SAMPLES 32
++/* Min observation time interval required to perform a peak-rate update (ns) */
++#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
++/* Target observation time interval for a peak-rate update (ns) */
++#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
++
++/* Shift used for peak rate fixed precision calculations. */
++#define BFQ_RATE_SHIFT 16
++
++/*
++ * By default, BFQ computes the duration of the weight raising for
++ * interactive applications automatically, using the following formula:
++ * duration = (R / r) * T, where r is the peak rate of the device, and
++ * R and T are two reference parameters.
++ * In particular, R is the peak rate of the reference device (see below),
++ * and T is a reference time: given the systems that are likely to be
++ * installed on the reference device according to its speed class, T is
++ * about the maximum time needed, under BFQ and while reading two files in
++ * parallel, to load typical large applications on these systems.
++ * In practice, the slower/faster the device at hand is, the more/less it
++ * takes to load applications with respect to the reference device.
++ * Accordingly, the longer/shorter BFQ grants weight raising to interactive
++ * applications.
++ *
++ * BFQ uses four different reference pairs (R, T), depending on:
++ * . whether the device is rotational or non-rotational;
++ * . whether the device is slow, such as old or portable HDDs, as well as
++ * SD cards, or fast, such as newer HDDs and SSDs.
++ *
++ * The device's speed class is dynamically (re)detected in
++ * bfq_update_peak_rate() every time the estimated peak rate is updated.
++ *
++ * In the following definitions, R_slow[0]/R_fast[0] and
++ * T_slow[0]/T_fast[0] are the reference values for a slow/fast
++ * rotational device, whereas R_slow[1]/R_fast[1] and
++ * T_slow[1]/T_fast[1] are the reference values for a slow/fast
++ * non-rotational device. Finally, device_speed_thresh are the
++ * thresholds used to switch between speed classes. The reference
++ * rates are not the actual peak rates of the devices used as a
++ * reference, but slightly lower values. The reason for using these
++ * slightly lower values is that the peak-rate estimator tends to
++ * yield slightly lower values than the actual peak rate (it can yield
++ * the actual peak rate only if there is only one process doing I/O,
++ * and the process does sequential I/O).
++ *
++ * Both the reference peak rates and the thresholds are measured in
++ * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
++ */
++static int R_slow[2] = {1000, 10700};
++static int R_fast[2] = {14000, 33000};
++/*
++ * To improve readability, a conversion function is used to initialize the
++ * following arrays, which entails that they can be initialized only in a
++ * function.
++ */
++static int T_slow[2];
++static int T_fast[2];
++static int device_speed_thresh[2];
++
++#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
++ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
++
++#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
++
++static void bfq_schedule_dispatch(struct bfq_data *bfqd);
++
++#include "bfq-ioc.c"
++#include "bfq-sched.c"
++#include "bfq-cgroup-included.c"
++
++#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
++#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
++
++#define bfq_sample_valid(samples) ((samples) > 80)
++
++/*
++ * Scheduler run of queue, if there are requests pending and no one in the
++ * driver that will restart queueing.
++ */
++static void bfq_schedule_dispatch(struct bfq_data *bfqd)
++{
++ if (bfqd->queued != 0) {
++ bfq_log(bfqd, "schedule dispatch");
++ kblockd_schedule_work(&bfqd->unplug_work);
++ }
++}
++
++/*
++ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
++ * We choose the request that is closesr to the head right now. Distance
++ * behind the head is penalized and only allowed to a certain extent.
++ */
++static struct request *bfq_choose_req(struct bfq_data *bfqd,
++ struct request *rq1,
++ struct request *rq2,
++ sector_t last)
++{
++ sector_t s1, s2, d1 = 0, d2 = 0;
++ unsigned long back_max;
++#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
++#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
++ unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
++
++ if (!rq1 || rq1 == rq2)
++ return rq2;
++ if (!rq2)
++ return rq1;
++
++ if (rq_is_sync(rq1) && !rq_is_sync(rq2))
++ return rq1;
++ else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
++ return rq2;
++ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
++ return rq1;
++ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
++ return rq2;
++
++ s1 = blk_rq_pos(rq1);
++ s2 = blk_rq_pos(rq2);
++
++ /*
++ * By definition, 1KiB is 2 sectors.
++ */
++ back_max = bfqd->bfq_back_max * 2;
++
++ /*
++ * Strict one way elevator _except_ in the case where we allow
++ * short backward seeks which are biased as twice the cost of a
++ * similar forward seek.
++ */
++ if (s1 >= last)
++ d1 = s1 - last;
++ else if (s1 + back_max >= last)
++ d1 = (last - s1) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ1_WRAP;
++
++ if (s2 >= last)
++ d2 = s2 - last;
++ else if (s2 + back_max >= last)
++ d2 = (last - s2) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ2_WRAP;
++
++ /* Found required data */
++
++ /*
++ * By doing switch() on the bit mask "wrap" we avoid having to
++ * check two variables for all permutations: --> faster!
++ */
++ switch (wrap) {
++ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
++ if (d1 < d2)
++ return rq1;
++ else if (d2 < d1)
++ return rq2;
++
++ if (s1 >= s2)
++ return rq1;
++ else
++ return rq2;
++
++ case BFQ_RQ2_WRAP:
++ return rq1;
++ case BFQ_RQ1_WRAP:
++ return rq2;
++ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
++ default:
++ /*
++ * Since both rqs are wrapped,
++ * start with the one that's further behind head
++ * (--> only *one* back seek required),
++ * since back seek takes more time than forward.
++ */
++ if (s1 <= s2)
++ return rq1;
++ else
++ return rq2;
++ }
++}
++
++static struct bfq_queue *
++bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
++ sector_t sector, struct rb_node **ret_parent,
++ struct rb_node ***rb_link)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *bfqq = NULL;
++
++ parent = NULL;
++ p = &root->rb_node;
++ while (*p) {
++ struct rb_node **n;
++
++ parent = *p;
++ bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++
++ /*
++ * Sort strictly based on sector. Smallest to the left,
++ * largest to the right.
++ */
++ if (sector > blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_right;
++ else if (sector < blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_left;
++ else
++ break;
++ p = n;
++ bfqq = NULL;
++ }
++
++ *ret_parent = parent;
++ if (rb_link)
++ *rb_link = p;
++
++ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++ (unsigned long long) sector,
++ bfqq ? bfqq->pid : 0);
++
++ return bfqq;
++}
++
++static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *__bfqq;
++
++ if (bfqq->pos_root) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++
++ if (bfq_class_idle(bfqq))
++ return;
++ if (!bfqq->next_rq)
++ return;
++
++ bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
++ blk_rq_pos(bfqq->next_rq), &parent, &p);
++ if (!__bfqq) {
++ rb_link_node(&bfqq->pos_node, parent, p);
++ rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
++ } else
++ bfqq->pos_root = NULL;
++}
++
++/*
++ * Tell whether there are active queues or groups with differentiated weights.
++ */
++static bool bfq_differentiated_weights(struct bfq_data *bfqd)
++{
++ /*
++ * For weights to differ, at least one of the trees must contain
++ * at least two nodes.
++ */
++ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
++ (bfqd->queue_weights_tree.rb_node->rb_left ||
++ bfqd->queue_weights_tree.rb_node->rb_right)
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ ) ||
++ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
++ (bfqd->group_weights_tree.rb_node->rb_left ||
++ bfqd->group_weights_tree.rb_node->rb_right)
++#endif
++ );
++}
++
++/*
++ * The following function returns true if every queue must receive the
++ * same share of the throughput (this condition is used when deciding
++ * whether idling may be disabled, see the comments in the function
++ * bfq_bfqq_may_idle()).
++ *
++ * Such a scenario occurs when:
++ * 1) all active queues have the same weight,
++ * 2) all active groups at the same level in the groups tree have the same
++ * weight,
++ * 3) all active groups at the same level in the groups tree have the same
++ * number of children.
++ *
++ * Unfortunately, keeping the necessary state for evaluating exactly the
++ * above symmetry conditions would be quite complex and time-consuming.
++ * Therefore this function evaluates, instead, the following stronger
++ * sub-conditions, for which it is much easier to maintain the needed
++ * state:
++ * 1) all active queues have the same weight,
++ * 2) all active groups have the same weight,
++ * 3) all active groups have at most one active child each.
++ * In particular, the last two conditions are always true if hierarchical
++ * support and the cgroups interface are not enabled, thus no state needs
++ * to be maintained in this case.
++ */
++static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
++{
++ return !bfq_differentiated_weights(bfqd);
++}
++
++/*
++ * If the weight-counter tree passed as input contains no counter for
++ * the weight of the input entity, then add that counter; otherwise just
++ * increment the existing counter.
++ *
++ * Note that weight-counter trees contain few nodes in mostly symmetric
++ * scenarios. For example, if all queues have the same weight, then the
++ * weight-counter tree for the queues may contain at most one node.
++ * This holds even if low_latency is on, because weight-raised queues
++ * are not inserted in the tree.
++ * In most scenarios, the rate at which nodes are created/destroyed
++ * should be low too.
++ */
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ struct rb_node **new = &(root->rb_node), *parent = NULL;
++
++ /*
++ * Do not insert if the entity is already associated with a
++ * counter, which happens if:
++ * 1) the entity is associated with a queue,
++ * 2) a request arrival has caused the queue to become both
++ * non-weight-raised, and hence change its weight, and
++ * backlogged; in this respect, each of the two events
++ * causes an invocation of this function,
++ * 3) this is the invocation of this function caused by the
++ * second event. This second invocation is actually useless,
++ * and we handle this fact by exiting immediately. More
++ * efficient or clearer solutions might possibly be adopted.
++ */
++ if (entity->weight_counter)
++ return;
++
++ while (*new) {
++ struct bfq_weight_counter *__counter = container_of(*new,
++ struct bfq_weight_counter,
++ weights_node);
++ parent = *new;
++
++ if (entity->weight == __counter->weight) {
++ entity->weight_counter = __counter;
++ goto inc_counter;
++ }
++ if (entity->weight < __counter->weight)
++ new = &((*new)->rb_left);
++ else
++ new = &((*new)->rb_right);
++ }
++
++ entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
++ GFP_ATOMIC);
++
++ /*
++ * In the unlucky event of an allocation failure, we just
++ * exit. This will cause the weight of entity to not be
++ * considered in bfq_differentiated_weights, which, in its
++ * turn, causes the scenario to be deemed wrongly symmetric in
++ * case entity's weight would have been the only weight making
++ * the scenario asymmetric. On the bright side, no unbalance
++ * will however occur when entity becomes inactive again (the
++ * invocation of this function is triggered by an activation
++ * of entity). In fact, bfq_weights_tree_remove does nothing
++ * if !entity->weight_counter.
++ */
++ if (unlikely(!entity->weight_counter))
++ return;
++
++ entity->weight_counter->weight = entity->weight;
++ rb_link_node(&entity->weight_counter->weights_node, parent, new);
++ rb_insert_color(&entity->weight_counter->weights_node, root);
++
++inc_counter:
++ entity->weight_counter->num_active++;
++}
++
++/*
++ * Decrement the weight counter associated with the entity, and, if the
++ * counter reaches 0, remove the counter from the tree.
++ * See the comments to the function bfq_weights_tree_add() for considerations
++ * about overhead.
++ */
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ if (!entity->weight_counter)
++ return;
++
++ BUG_ON(RB_EMPTY_ROOT(root));
++ BUG_ON(entity->weight_counter->weight != entity->weight);
++
++ BUG_ON(!entity->weight_counter->num_active);
++ entity->weight_counter->num_active--;
++ if (entity->weight_counter->num_active > 0)
++ goto reset_entity_pointer;
++
++ rb_erase(&entity->weight_counter->weights_node, root);
++ kfree(entity->weight_counter);
++
++reset_entity_pointer:
++ entity->weight_counter = NULL;
++}
++
++/*
++ * Return expired entry, or NULL to just start from scratch in rbtree.
++ */
++static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
++ struct request *last)
++{
++ struct request *rq;
++
++ if (bfq_bfqq_fifo_expire(bfqq))
++ return NULL;
++
++ bfq_mark_bfqq_fifo_expire(bfqq);
++
++ rq = rq_entry_fifo(bfqq->fifo.next);
++
++ if (rq == last || ktime_get_ns() < rq->fifo_time)
++ return NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
++ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
++ return rq;
++}
++
++static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct request *last)
++{
++ struct rb_node *rbnext = rb_next(&last->rb_node);
++ struct rb_node *rbprev = rb_prev(&last->rb_node);
++ struct request *next, *prev = NULL;
++
++ BUG_ON(list_empty(&bfqq->fifo));
++
++ /* Follow expired path, else get first next available. */
++ next = bfq_check_fifo(bfqq, last);
++ if (next) {
++ BUG_ON(next == last);
++ return next;
++ }
++
++ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
++
++ if (rbprev)
++ prev = rb_entry_rq(rbprev);
++
++ if (rbnext)
++ next = rb_entry_rq(rbnext);
++ else {
++ rbnext = rb_first(&bfqq->sort_list);
++ if (rbnext && rbnext != &last->rb_node)
++ next = rb_entry_rq(rbnext);
++ }
++
++ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
++}
++
++/* see the definition of bfq_async_charge_factor for details */
++static unsigned long bfq_serv_to_charge(struct request *rq,
++ struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
++ return blk_rq_sectors(rq);
++
++ /*
++ * If there are no weight-raised queues, then amplify service
++ * by just the async charge factor; otherwise amplify service
++ * by twice the async charge factor, to further reduce latency
++ * for weight-raised queues.
++ */
++ if (bfqq->bfqd->wr_busy_queues == 0)
++ return blk_rq_sectors(rq) * bfq_async_charge_factor;
++
++ return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
++}
++
++/**
++ * bfq_updated_next_req - update the queue after a new next_rq selection.
++ * @bfqd: the device data the queue belongs to.
++ * @bfqq: the queue to update.
++ *
++ * If the first request of a queue changes we make sure that the queue
++ * has enough budget to serve at least its first request (if the
++ * request has grown). We do this because if the queue has not enough
++ * budget for its first request, it has to go through two dispatch
++ * rounds to actually get it dispatched.
++ */
++static void bfq_updated_next_req(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ struct request *next_rq = bfqq->next_rq;
++ unsigned long new_budget;
++
++ if (!next_rq)
++ return;
++
++ if (bfqq == bfqd->in_service_queue)
++ /*
++ * In order not to break guarantees, budgets cannot be
++ * changed after an entity has been selected.
++ */
++ return;
++
++ BUG_ON(entity->tree != &st->active);
++ BUG_ON(entity == entity->sched_data->in_service_entity);
++
++ new_budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ if (entity->budget != new_budget) {
++ entity->budget = new_budget;
++ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
++ new_budget);
++ bfq_requeue_bfqq(bfqd, bfqq);
++ }
++}
++
++static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
++{
++ u64 dur;
++
++ if (bfqd->bfq_wr_max_time > 0)
++ return bfqd->bfq_wr_max_time;
++
++ dur = bfqd->RT_prod;
++ do_div(dur, bfqd->peak_rate);
++
++ /*
++ * Limit duration between 3 and 13 seconds. Tests show that
++ * higher values than 13 seconds often yield the opposite of
++ * the desired result, i.e., worsen responsiveness by letting
++ * non-interactive and non-soft-real-time applications
++ * preserve weight raising for a too long time interval.
++ *
++ * On the other end, lower values than 3 seconds make it
++ * difficult for most interactive tasks to complete their jobs
++ * before weight-raising finishes.
++ */
++ if (dur > msecs_to_jiffies(13000))
++ dur = msecs_to_jiffies(13000);
++ else if (dur < msecs_to_jiffies(3000))
++ dur = msecs_to_jiffies(3000);
++
++ return dur;
++}
++
++static void
++bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
++ struct bfq_io_cq *bic, bool bfq_already_existing)
++{
++ unsigned int old_wr_coeff;
++ bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
++
++ if (bic->saved_idle_window)
++ bfq_mark_bfqq_idle_window(bfqq);
++ else
++ bfq_clear_bfqq_idle_window(bfqq);
++
++ if (bic->saved_IO_bound)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ else
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ if (unlikely(busy))
++ old_wr_coeff = bfqq->wr_coeff;
++
++ bfqq->wr_coeff = bic->saved_wr_coeff;
++ bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
++ BUG_ON(time_is_after_jiffies(bfqq->wr_start_at_switch_to_srt));
++ bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
++ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++
++ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
++ time_is_before_jiffies(bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time))) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "resume state: switching off wr (%lu + %lu < %lu)",
++ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time,
++ jiffies);
++
++ bfqq->wr_coeff = 1;
++ }
++
++ /* make sure weight will be updated, however we got here */
++ bfqq->entity.prio_changed = 1;
++
++ if (likely(!busy))
++ return;
++
++ if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) {
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ } else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) {
++ bfqd->wr_busy_queues--;
++ BUG_ON(bfqd->wr_busy_queues < 0);
++ }
++}
++
++static int bfqq_process_refs(struct bfq_queue *bfqq)
++{
++ int process_refs, io_refs;
++
++ lockdep_assert_held(bfqq->bfqd->queue->queue_lock);
++
++ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++ process_refs = bfqq->ref - io_refs - bfqq->entity.on_st;
++ BUG_ON(process_refs < 0);
++ return process_refs;
++}
++
++/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
++static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_queue *item;
++ struct hlist_node *n;
++
++ hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
++ hlist_del_init(&item->burst_list_node);
++ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++ bfqd->burst_size = 1;
++ bfqd->burst_parent_entity = bfqq->entity.parent;
++}
++
++/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
++static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ /* Increment burst size to take into account also bfqq */
++ bfqd->burst_size++;
++
++ bfq_log_bfqq(bfqd, bfqq, "add_to_burst %d", bfqd->burst_size);
++
++ BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh);
++
++ if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
++ struct bfq_queue *pos, *bfqq_item;
++ struct hlist_node *n;
++
++ /*
++ * Enough queues have been activated shortly after each
++ * other to consider this burst as large.
++ */
++ bfqd->large_burst = true;
++ bfq_log_bfqq(bfqd, bfqq, "add_to_burst: large burst started");
++
++ /*
++ * We can now mark all queues in the burst list as
++ * belonging to a large burst.
++ */
++ hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
++ burst_list_node) {
++ bfq_mark_bfqq_in_large_burst(bfqq_item);
++ bfq_log_bfqq(bfqd, bfqq_item, "marked in large burst");
++ }
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ bfq_log_bfqq(bfqd, bfqq, "marked in large burst");
++
++ /*
++ * From now on, and until the current burst finishes, any
++ * new queue being activated shortly after the last queue
++ * was inserted in the burst can be immediately marked as
++ * belonging to a large burst. So the burst list is not
++ * needed any more. Remove it.
++ */
++ hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
++ burst_list_node)
++ hlist_del_init(&pos->burst_list_node);
++ } else /*
++ * Burst not yet large: add bfqq to the burst list. Do
++ * not increment the ref counter for bfqq, because bfqq
++ * is removed from the burst list before freeing bfqq
++ * in put_queue.
++ */
++ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++}
++
++/*
++ * If many queues belonging to the same group happen to be created
++ * shortly after each other, then the processes associated with these
++ * queues have typically a common goal. In particular, bursts of queue
++ * creations are usually caused by services or applications that spawn
++ * many parallel threads/processes. Examples are systemd during boot,
++ * or git grep. To help these processes get their job done as soon as
++ * possible, it is usually better to not grant either weight-raising
++ * or device idling to their queues.
++ *
++ * In this comment we describe, firstly, the reasons why this fact
++ * holds, and, secondly, the next function, which implements the main
++ * steps needed to properly mark these queues so that they can then be
++ * treated in a different way.
++ *
++ * The above services or applications benefit mostly from a high
++ * throughput: the quicker the requests of the activated queues are
++ * cumulatively served, the sooner the target job of these queues gets
++ * completed. As a consequence, weight-raising any of these queues,
++ * which also implies idling the device for it, is almost always
++ * counterproductive. In most cases it just lowers throughput.
++ *
++ * On the other hand, a burst of queue creations may be caused also by
++ * the start of an application that does not consist of a lot of
++ * parallel I/O-bound threads. In fact, with a complex application,
++ * several short processes may need to be executed to start-up the
++ * application. In this respect, to start an application as quickly as
++ * possible, the best thing to do is in any case to privilege the I/O
++ * related to the application with respect to all other
++ * I/O. Therefore, the best strategy to start as quickly as possible
++ * an application that causes a burst of queue creations is to
++ * weight-raise all the queues created during the burst. This is the
++ * exact opposite of the best strategy for the other type of bursts.
++ *
++ * In the end, to take the best action for each of the two cases, the
++ * two types of bursts need to be distinguished. Fortunately, this
++ * seems relatively easy, by looking at the sizes of the bursts. In
++ * particular, we found a threshold such that only bursts with a
++ * larger size than that threshold are apparently caused by
++ * services or commands such as systemd or git grep. For brevity,
++ * hereafter we call just 'large' these bursts. BFQ *does not*
++ * weight-raise queues whose creation occurs in a large burst. In
++ * addition, for each of these queues BFQ performs or does not perform
++ * idling depending on which choice boosts the throughput more. The
++ * exact choice depends on the device and request pattern at
++ * hand.
++ *
++ * Unfortunately, false positives may occur while an interactive task
++ * is starting (e.g., an application is being started). The
++ * consequence is that the queues associated with the task do not
++ * enjoy weight raising as expected. Fortunately these false positives
++ * are very rare. They typically occur if some service happens to
++ * start doing I/O exactly when the interactive task starts.
++ *
++ * Turning back to the next function, it implements all the steps
++ * needed to detect the occurrence of a large burst and to properly
++ * mark all the queues belonging to it (so that they can then be
++ * treated in a different way). This goal is achieved by maintaining a
++ * "burst list" that holds, temporarily, the queues that belong to the
++ * burst in progress. The list is then used to mark these queues as
++ * belonging to a large burst if the burst does become large. The main
++ * steps are the following.
++ *
++ * . when the very first queue is created, the queue is inserted into the
++ * list (as it could be the first queue in a possible burst)
++ *
++ * . if the current burst has not yet become large, and a queue Q that does
++ * not yet belong to the burst is activated shortly after the last time
++ * at which a new queue entered the burst list, then the function appends
++ * Q to the burst list
++ *
++ * . if, as a consequence of the previous step, the burst size reaches
++ * the large-burst threshold, then
++ *
++ * . all the queues in the burst list are marked as belonging to a
++ * large burst
++ *
++ * . the burst list is deleted; in fact, the burst list already served
++ * its purpose (keeping temporarily track of the queues in a burst,
++ * so as to be able to mark them as belonging to a large burst in the
++ * previous sub-step), and now is not needed any more
++ *
++ * . the device enters a large-burst mode
++ *
++ * . if a queue Q that does not belong to the burst is created while
++ * the device is in large-burst mode and shortly after the last time
++ * at which a queue either entered the burst list or was marked as
++ * belonging to the current large burst, then Q is immediately marked
++ * as belonging to a large burst.
++ *
++ * . if a queue Q that does not belong to the burst is created a while
++ * later, i.e., not shortly after, than the last time at which a queue
++ * either entered the burst list or was marked as belonging to the
++ * current large burst, then the current burst is deemed as finished and:
++ *
++ * . the large-burst mode is reset if set
++ *
++ * . the burst list is emptied
++ *
++ * . Q is inserted in the burst list, as Q may be the first queue
++ * in a possible new burst (then the burst list contains just Q
++ * after this step).
++ */
++static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq is already in the burst list or is part of a large
++ * burst, or finally has just been split, then there is
++ * nothing else to do.
++ */
++ if (!hlist_unhashed(&bfqq->burst_list_node) ||
++ bfq_bfqq_in_large_burst(bfqq) ||
++ time_is_after_eq_jiffies(bfqq->split_time +
++ msecs_to_jiffies(10)))
++ return;
++
++ /*
++ * If bfqq's creation happens late enough, or bfqq belongs to
++ * a different group than the burst group, then the current
++ * burst is finished, and related data structures must be
++ * reset.
++ *
++ * In this respect, consider the special case where bfqq is
++ * the very first queue created after BFQ is selected for this
++ * device. In this case, last_ins_in_burst and
++ * burst_parent_entity are not yet significant when we get
++ * here. But it is easy to verify that, whether or not the
++ * following condition is true, bfqq will end up being
++ * inserted into the burst list. In particular the list will
++ * happen to contain only bfqq. And this is exactly what has
++ * to happen, as bfqq may be the first queue of the first
++ * burst.
++ */
++ if (time_is_before_jiffies(bfqd->last_ins_in_burst +
++ bfqd->bfq_burst_interval) ||
++ bfqq->entity.parent != bfqd->burst_parent_entity) {
++ bfqd->large_burst = false;
++ bfq_reset_burst_list(bfqd, bfqq);
++ bfq_log_bfqq(bfqd, bfqq,
++ "handle_burst: late activation or different group");
++ goto end;
++ }
++
++ /*
++ * If we get here, then bfqq is being activated shortly after the
++ * last queue. So, if the current burst is also large, we can mark
++ * bfqq as belonging to this large burst immediately.
++ */
++ if (bfqd->large_burst) {
++ bfq_log_bfqq(bfqd, bfqq, "handle_burst: marked in burst");
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ goto end;
++ }
++
++ /*
++ * If we get here, then a large-burst state has not yet been
++ * reached, but bfqq is being activated shortly after the last
++ * queue. Then we add bfqq to the burst.
++ */
++ bfq_add_to_burst(bfqd, bfqq);
++end:
++ /*
++ * At this point, bfqq either has been added to the current
++ * burst or has caused the current burst to terminate and a
++ * possible new burst to start. In particular, in the second
++ * case, bfqq has become the first queue in the possible new
++ * burst. In both cases last_ins_in_burst needs to be moved
++ * forward.
++ */
++ bfqd->last_ins_in_burst = jiffies;
++
++}
++
++static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ return entity->budget - entity->service;
++}
++
++/*
++ * If enough samples have been computed, return the current max budget
++ * stored in bfqd, which is dynamically updated according to the
++ * estimated disk peak rate; otherwise return the default max budget
++ */
++static int bfq_max_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < bfq_stats_min_budgets)
++ return bfq_default_max_budget;
++ else
++ return bfqd->bfq_max_budget;
++}
++
++/*
++ * Return min budget, which is a fraction of the current or default
++ * max budget (trying with 1/32)
++ */
++static int bfq_min_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < bfq_stats_min_budgets)
++ return bfq_default_max_budget / 32;
++ else
++ return bfqd->bfq_max_budget / 32;
++}
++
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool compensate,
++ enum bfqq_expiration reason);
++
++/*
++ * The next function, invoked after the input queue bfqq switches from
++ * idle to busy, updates the budget of bfqq. The function also tells
++ * whether the in-service queue should be expired, by returning
++ * true. The purpose of expiring the in-service queue is to give bfqq
++ * the chance to possibly preempt the in-service queue, and the reason
++ * for preempting the in-service queue is to achieve one of the two
++ * goals below.
++ *
++ * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
++ * expired because it has remained idle. In particular, bfqq may have
++ * expired for one of the following two reasons:
++ *
++ * - BFQ_BFQQ_NO_MORE_REQUEST bfqq did not enjoy any device idling and
++ * did not make it to issue a new request before its last request
++ * was served;
++ *
++ * - BFQ_BFQQ_TOO_IDLE bfqq did enjoy device idling, but did not issue
++ * a new request before the expiration of the idling-time.
++ *
++ * Even if bfqq has expired for one of the above reasons, the process
++ * associated with the queue may be however issuing requests greedily,
++ * and thus be sensitive to the bandwidth it receives (bfqq may have
++ * remained idle for other reasons: CPU high load, bfqq not enjoying
++ * idling, I/O throttling somewhere in the path from the process to
++ * the I/O scheduler, ...). But if, after every expiration for one of
++ * the above two reasons, bfqq has to wait for the service of at least
++ * one full budget of another queue before being served again, then
++ * bfqq is likely to get a much lower bandwidth or resource time than
++ * its reserved ones. To address this issue, two countermeasures need
++ * to be taken.
++ *
++ * First, the budget and the timestamps of bfqq need to be updated in
++ * a special way on bfqq reactivation: they need to be updated as if
++ * bfqq did not remain idle and did not expire. In fact, if they are
++ * computed as if bfqq expired and remained idle until reactivation,
++ * then the process associated with bfqq is treated as if, instead of
++ * being greedy, it stopped issuing requests when bfqq remained idle,
++ * and restarts issuing requests only on this reactivation. In other
++ * words, the scheduler does not help the process recover the "service
++ * hole" between bfqq expiration and reactivation. As a consequence,
++ * the process receives a lower bandwidth than its reserved one. In
++ * contrast, to recover this hole, the budget must be updated as if
++ * bfqq was not expired at all before this reactivation, i.e., it must
++ * be set to the value of the remaining budget when bfqq was
++ * expired. Along the same line, timestamps need to be assigned the
++ * value they had the last time bfqq was selected for service, i.e.,
++ * before last expiration. Thus timestamps need to be back-shifted
++ * with respect to their normal computation (see [1] for more details
++ * on this tricky aspect).
++ *
++ * Secondly, to allow the process to recover the hole, the in-service
++ * queue must be expired too, to give bfqq the chance to preempt it
++ * immediately. In fact, if bfqq has to wait for a full budget of the
++ * in-service queue to be completed, then it may become impossible to
++ * let the process recover the hole, even if the back-shifted
++ * timestamps of bfqq are lower than those of the in-service queue. If
++ * this happens for most or all of the holes, then the process may not
++ * receive its reserved bandwidth. In this respect, it is worth noting
++ * that, being the service of outstanding requests unpreemptible, a
++ * little fraction of the holes may however be unrecoverable, thereby
++ * causing a little loss of bandwidth.
++ *
++ * The last important point is detecting whether bfqq does need this
++ * bandwidth recovery. In this respect, the next function deems the
++ * process associated with bfqq greedy, and thus allows it to recover
++ * the hole, if: 1) the process is waiting for the arrival of a new
++ * request (which implies that bfqq expired for one of the above two
++ * reasons), and 2) such a request has arrived soon. The first
++ * condition is controlled through the flag non_blocking_wait_rq,
++ * while the second through the flag arrived_in_time. If both
++ * conditions hold, then the function computes the budget in the
++ * above-described special way, and signals that the in-service queue
++ * should be expired. Timestamp back-shifting is done later in
++ * __bfq_activate_entity.
++ *
++ * 2. Reduce latency. Even if timestamps are not backshifted to let
++ * the process associated with bfqq recover a service hole, bfqq may
++ * however happen to have, after being (re)activated, a lower finish
++ * timestamp than the in-service queue. That is, the next budget of
++ * bfqq may have to be completed before the one of the in-service
++ * queue. If this is the case, then preempting the in-service queue
++ * allows this goal to be achieved, apart from the unpreemptible,
++ * outstanding requests mentioned above.
++ *
++ * Unfortunately, regardless of which of the above two goals one wants
++ * to achieve, service trees need first to be updated to know whether
++ * the in-service queue must be preempted. To have service trees
++ * correctly updated, the in-service queue must be expired and
++ * rescheduled, and bfqq must be scheduled too. This is one of the
++ * most costly operations (in future versions, the scheduling
++ * mechanism may be re-designed in such a way to make it possible to
++ * know whether preemption is needed without needing to update service
++ * trees). In addition, queue preemptions almost always cause random
++ * I/O, and thus loss of throughput. Because of these facts, the next
++ * function adopts the following simple scheme to avoid both costly
++ * operations and too frequent preemptions: it requests the expiration
++ * of the in-service queue (unconditionally) only for queues that need
++ * to recover a hole, or that either are weight-raised or deserve to
++ * be weight-raised.
++ */
++static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool arrived_in_time,
++ bool wr_or_deserves_wr)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
++ /*
++ * We do not clear the flag non_blocking_wait_rq here, as
++ * the latter is used in bfq_activate_bfqq to signal
++ * that timestamps need to be back-shifted (and is
++ * cleared right after).
++ */
++
++ /*
++ * In next assignment we rely on that either
++ * entity->service or entity->budget are not updated
++ * on expiration if bfqq is empty (see
++ * __bfq_bfqq_recalc_budget). Thus both quantities
++ * remain unchanged after such an expiration, and the
++ * following statement therefore assigns to
++ * entity->budget the remaining budget on such an
++ * expiration. For clarity, entity->service is not
++ * updated on expiration in any case, and, in normal
++ * operation, is reset only when bfqq is selected for
++ * service (see bfq_get_next_queue).
++ */
++ BUG_ON(bfqq->max_budget < 0);
++ entity->budget = min_t(unsigned long,
++ bfq_bfqq_budget_left(bfqq),
++ bfqq->max_budget);
++
++ BUG_ON(entity->budget < 0);
++ return true;
++ }
++
++ BUG_ON(bfqq->max_budget < 0);
++ entity->budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(bfqq->next_rq, bfqq));
++ BUG_ON(entity->budget < 0);
++
++ bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
++ return wr_or_deserves_wr;
++}
++
++static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ unsigned int old_wr_coeff,
++ bool wr_or_deserves_wr,
++ bool interactive,
++ bool in_burst,
++ bool soft_rt)
++{
++ if (old_wr_coeff == 1 && wr_or_deserves_wr) {
++ /* start a weight-raising period */
++ if (interactive) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ } else {
++ bfqq->wr_start_at_switch_to_srt = jiffies;
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff *
++ BFQ_SOFTRT_WEIGHT_FACTOR;
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ }
++ /*
++ * If needed, further reduce budget to make sure it is
++ * close to bfqq's backlog, so as to reduce the
++ * scheduling-error component due to a too large
++ * budget. Do not care about throughput consequences,
++ * but only about latency. Finally, do not assign a
++ * too small budget either, to avoid increasing
++ * latency by causing too frequent expirations.
++ */
++ bfqq->entity.budget = min_t(unsigned long,
++ bfqq->entity.budget,
++ 2 * bfq_min_budget(bfqd));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais starting at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ } else if (old_wr_coeff > 1) {
++ if (interactive) { /* update wr coeff and duration */
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ } else if (in_burst) {
++ bfqq->wr_coeff = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais ending at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->
++ wr_cur_max_time));
++ } else if (soft_rt) {
++ /*
++ * The application is now or still meeting the
++ * requirements for being deemed soft rt. We
++ * can then correctly and safely (re)charge
++ * the weight-raising duration for the
++ * application with the weight-raising
++ * duration for soft rt applications.
++ *
++ * In particular, doing this recharge now, i.e.,
++ * before the weight-raising period for the
++ * application finishes, reduces the probability
++ * of the following negative scenario:
++ * 1) the weight of a soft rt application is
++ * raised at startup (as for any newly
++ * created application),
++ * 2) since the application is not interactive,
++ * at a certain time weight-raising is
++ * stopped for the application,
++ * 3) at that time the application happens to
++ * still have pending requests, and hence
++ * is destined to not have a chance to be
++ * deemed soft rt before these requests are
++ * completed (see the comments to the
++ * function bfq_bfqq_softrt_next_start()
++ * for details on soft rt detection),
++ * 4) these pending requests experience a high
++ * latency because the application is not
++ * weight-raised while they are pending.
++ */
++ if (bfqq->wr_cur_max_time !=
++ bfqd->bfq_wr_rt_max_time) {
++ bfqq->wr_start_at_switch_to_srt =
++ bfqq->last_wr_start_finish;
++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff *
++ BFQ_SOFTRT_WEIGHT_FACTOR;
++ bfq_log_bfqq(bfqd, bfqq,
++ "switching to soft_rt wr");
++ } else
++ bfq_log_bfqq(bfqd, bfqq,
++ "moving forward soft_rt wr duration");
++ bfqq->last_wr_start_finish = jiffies;
++ }
++ }
++}
++
++static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ return bfqq->dispatched == 0 &&
++ time_is_before_jiffies(
++ bfqq->budget_timeout +
++ bfqd->bfq_wr_min_idle_time);
++}
++
++static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ int old_wr_coeff,
++ struct request *rq,
++ bool *interactive)
++{
++ bool soft_rt, in_burst, wr_or_deserves_wr,
++ bfqq_wants_to_preempt,
++ idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
++ /*
++ * See the comments on
++ * bfq_bfqq_update_budg_for_activation for
++ * details on the usage of the next variable.
++ */
++ arrived_in_time = ktime_get_ns() <=
++ RQ_BIC(rq)->ttime.last_end_request +
++ bfqd->bfq_slice_idle * 3;
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "bfq_add_request non-busy: "
++ "jiffies %lu, in_time %d, idle_long %d busyw %d "
++ "wr_coeff %u",
++ jiffies, arrived_in_time,
++ idle_for_long_time,
++ bfq_bfqq_non_blocking_wait_rq(bfqq),
++ old_wr_coeff);
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ BUG_ON(bfqq == bfqd->in_service_queue);
++ bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
++
++ /*
++ * bfqq deserves to be weight-raised if:
++ * - it is sync,
++ * - it does not belong to a large burst,
++ * - it has been idle for enough time or is soft real-time,
++ * - is linked to a bfq_io_cq (it is not shared in any sense)
++ */
++ in_burst = bfq_bfqq_in_large_burst(bfqq);
++ soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
++ !in_burst &&
++ time_is_before_jiffies(bfqq->soft_rt_next_start);
++ *interactive =
++ !in_burst &&
++ idle_for_long_time;
++ wr_or_deserves_wr = bfqd->low_latency &&
++ (bfqq->wr_coeff > 1 ||
++ (bfq_bfqq_sync(bfqq) &&
++ bfqq->bic && (*interactive || soft_rt)));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "bfq_add_request: "
++ "in_burst %d, "
++ "soft_rt %d (next %lu), inter %d, bic %p",
++ bfq_bfqq_in_large_burst(bfqq), soft_rt,
++ bfqq->soft_rt_next_start,
++ *interactive,
++ bfqq->bic);
++
++ /*
++ * Using the last flag, update budget and check whether bfqq
++ * may want to preempt the in-service queue.
++ */
++ bfqq_wants_to_preempt =
++ bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
++ arrived_in_time,
++ wr_or_deserves_wr);
++
++ /*
++ * If bfqq happened to be activated in a burst, but has been
++ * idle for much more than an interactive queue, then we
++ * assume that, in the overall I/O initiated in the burst, the
++ * I/O associated with bfqq is finished. So bfqq does not need
++ * to be treated as a queue belonging to a burst
++ * anymore. Accordingly, we reset bfqq's in_large_burst flag
++ * if set, and remove bfqq from the burst list if it's
++ * there. We do not decrement burst_size, because the fact
++ * that bfqq does not need to belong to the burst list any
++ * more does not invalidate the fact that bfqq was created in
++ * a burst.
++ */
++ if (likely(!bfq_bfqq_just_created(bfqq)) &&
++ idle_for_long_time &&
++ time_is_before_jiffies(
++ bfqq->budget_timeout +
++ msecs_to_jiffies(10000))) {
++ hlist_del_init(&bfqq->burst_list_node);
++ bfq_clear_bfqq_in_large_burst(bfqq);
++ }
++
++ bfq_clear_bfqq_just_created(bfqq);
++
++ if (!bfq_bfqq_IO_bound(bfqq)) {
++ if (arrived_in_time) {
++ bfqq->requests_within_timer++;
++ if (bfqq->requests_within_timer >=
++ bfqd->bfq_requests_within_timer)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ } else
++ bfqq->requests_within_timer = 0;
++ bfq_log_bfqq(bfqd, bfqq, "requests in time %d",
++ bfqq->requests_within_timer);
++ }
++
++ if (bfqd->low_latency) {
++ if (unlikely(time_is_after_jiffies(bfqq->split_time)))
++ /* wraparound */
++ bfqq->split_time =
++ jiffies - bfqd->bfq_wr_min_idle_time - 1;
++
++ if (time_is_before_jiffies(bfqq->split_time +
++ bfqd->bfq_wr_min_idle_time)) {
++ bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
++ old_wr_coeff,
++ wr_or_deserves_wr,
++ *interactive,
++ in_burst,
++ soft_rt);
++
++ if (old_wr_coeff != bfqq->wr_coeff)
++ bfqq->entity.prio_changed = 1;
++ }
++ }
++
++ bfqq->last_idle_bklogged = jiffies;
++ bfqq->service_from_backlogged = 0;
++ bfq_clear_bfqq_softrt_update(bfqq);
++
++ bfq_add_bfqq_busy(bfqd, bfqq);
++
++ /*
++ * Expire in-service queue only if preemption may be needed
++ * for guarantees. In this respect, the function
++ * next_queue_may_preempt just checks a simple, necessary
++ * condition, and not a sufficient condition based on
++ * timestamps. In fact, for the latter condition to be
++ * evaluated, timestamps would need first to be updated, and
++ * this operation is quite costly (see the comments on the
++ * function bfq_bfqq_update_budg_for_activation).
++ */
++ if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
++ bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
++ next_queue_may_preempt(bfqd)) {
++ struct bfq_queue *in_serv =
++ bfqd->in_service_queue;
++ BUG_ON(in_serv == bfqq);
++
++ bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
++ false, BFQ_BFQQ_PREEMPTED);
++ }
++}
++
++static void bfq_add_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *next_rq, *prev;
++ unsigned int old_wr_coeff = bfqq->wr_coeff;
++ bool interactive = false;
++
++ bfq_log_bfqq(bfqd, bfqq, "add_request: size %u %s",
++ blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A");
++
++ if (bfqq->wr_coeff > 1) /* queue is being weight-raised */
++ bfq_log_bfqq(bfqd, bfqq,
++ "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqq->wr_coeff,
++ bfqq->entity.weight, bfqq->entity.orig_weight);
++
++ bfqq->queued[rq_is_sync(rq)]++;
++ bfqd->queued++;
++
++ elv_rb_add(&bfqq->sort_list, rq);
++
++ /*
++ * Check if this request is a better next-to-serve candidate.
++ */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
++ BUG_ON(!next_rq);
++ bfqq->next_rq = next_rq;
++
++ /*
++ * Adjust priority tree position, if next_rq changes.
++ */
++ if (prev != bfqq->next_rq)
++ bfq_pos_tree_add_move(bfqd, bfqq);
++
++ if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
++ bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
++ rq, &interactive);
++ else {
++ if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
++ time_is_before_jiffies(
++ bfqq->last_wr_start_finish +
++ bfqd->bfq_wr_min_inter_arr_async)) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "non-idle wrais starting, "
++ "wr_max_time %u wr_busy %d",
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqd->wr_busy_queues);
++ }
++ if (prev != bfqq->next_rq)
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ /*
++ * Assign jiffies to last_wr_start_finish in the following
++ * cases:
++ *
++ * . if bfqq is not going to be weight-raised, because, for
++ * non weight-raised queues, last_wr_start_finish stores the
++ * arrival time of the last request; as of now, this piece
++ * of information is used only for deciding whether to
++ * weight-raise async queues
++ *
++ * . if bfqq is not weight-raised, because, if bfqq is now
++ * switching to weight-raised, then last_wr_start_finish
++ * stores the time when weight-raising starts
++ *
++ * . if bfqq is interactive, because, regardless of whether
++ * bfqq is currently weight-raised, the weight-raising
++ * period must start or restart (this case is considered
++ * separately because it is not detected by the above
++ * conditions, if bfqq is already weight-raised)
++ *
++ * last_wr_start_finish has to be updated also if bfqq is soft
++ * real-time, because the weight-raising period is constantly
++ * restarted on idle-to-busy transitions for these queues, but
++ * this is already done in bfq_bfqq_handle_idle_busy_switch if
++ * needed.
++ */
++ if (bfqd->low_latency &&
++ (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
++ bfqq->last_wr_start_finish = jiffies;
++}
++
++static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
++ struct bio *bio)
++{
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (!bic)
++ return NULL;
++
++ bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
++ if (bfqq)
++ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
++
++ return NULL;
++}
++
++static sector_t get_sdist(sector_t last_pos, struct request *rq)
++{
++ sector_t sdist = 0;
++
++ if (last_pos) {
++ if (last_pos < blk_rq_pos(rq))
++ sdist = blk_rq_pos(rq) - last_pos;
++ else
++ sdist = last_pos - blk_rq_pos(rq);
++ }
++
++ return sdist;
++}
++
++static void bfq_activate_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ bfqd->rq_in_driver++;
++}
++
++static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ BUG_ON(bfqd->rq_in_driver == 0);
++ bfqd->rq_in_driver--;
++}
++
++static void bfq_remove_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ const int sync = rq_is_sync(rq);
++
++ BUG_ON(bfqq->entity.service > bfqq->entity.budget &&
++ bfqq == bfqd->in_service_queue);
++
++ if (bfqq->next_rq == rq) {
++ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ if (rq->queuelist.prev != &rq->queuelist)
++ list_del_init(&rq->queuelist);
++ BUG_ON(bfqq->queued[sync] == 0);
++ bfqq->queued[sync]--;
++ bfqd->queued--;
++ elv_rb_del(&bfqq->sort_list, rq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ bfqq->next_rq = NULL;
++
++ BUG_ON(bfqq->entity.budget < 0);
++
++ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
++ BUG_ON(bfqq->ref < 2); /* referred by rq and on tree */
++ bfq_del_bfqq_busy(bfqd, bfqq, false);
++ /*
++ * bfqq emptied. In normal operation, when
++ * bfqq is empty, bfqq->entity.service and
++ * bfqq->entity.budget must contain,
++ * respectively, the service received and the
++ * budget used last time bfqq emptied. These
++ * facts do not hold in this case, as at least
++ * this last removal occurred while bfqq is
++ * not in service. To avoid inconsistencies,
++ * reset both bfqq->entity.service and
++ * bfqq->entity.budget, if bfqq has still a
++ * process that may issue I/O requests to it.
++ */
++ bfqq->entity.budget = bfqq->entity.service = 0;
++ }
++
++ /*
++ * Remove queue from request-position tree as it is empty.
++ */
++ if (bfqq->pos_root) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++ }
++
++ if (rq->cmd_flags & REQ_META) {
++ BUG_ON(bfqq->meta_pending == 0);
++ bfqq->meta_pending--;
++ }
++ bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
++}
++
++static enum elv_merge bfq_merge(struct request_queue *q, struct request **req,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct request *__rq;
++
++ __rq = bfq_find_rq_fmerge(bfqd, bio);
++ if (__rq && elv_bio_merge_ok(__rq, bio)) {
++ *req = __rq;
++ return ELEVATOR_FRONT_MERGE;
++ }
++
++ return ELEVATOR_NO_MERGE;
++}
++
++static void bfq_merged_request(struct request_queue *q, struct request *req,
++ enum elv_merge type)
++{
++ if (type == ELEVATOR_FRONT_MERGE &&
++ rb_prev(&req->rb_node) &&
++ blk_rq_pos(req) <
++ blk_rq_pos(container_of(rb_prev(&req->rb_node),
++ struct request, rb_node))) {
++ struct bfq_queue *bfqq = RQ_BFQQ(req);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *prev, *next_rq;
++
++ /* Reposition request in its sort_list */
++ elv_rb_del(&bfqq->sort_list, req);
++ elv_rb_add(&bfqq->sort_list, req);
++ /* Choose next request to be served for bfqq */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
++ bfqd->last_position);
++ BUG_ON(!next_rq);
++ bfqq->next_rq = next_rq;
++ /*
++ * If next_rq changes, update both the queue's budget to
++ * fit the new request and the queue's position in its
++ * rq_pos_tree.
++ */
++ if (prev != bfqq->next_rq) {
++ bfq_updated_next_req(bfqd, bfqq);
++ bfq_pos_tree_add_move(bfqd, bfqq);
++ }
++ }
++}
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static void bfq_bio_merged(struct request_queue *q, struct request *req,
++ struct bio *bio)
++{
++ bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_opf);
++}
++#endif
++
++static void bfq_merged_requests(struct request_queue *q, struct request *rq,
++ struct request *next)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
++
++ /*
++ * If next and rq belong to the same bfq_queue and next is older
++ * than rq, then reposition rq in the fifo (by substituting next
++ * with rq). Otherwise, if next and rq belong to different
++ * bfq_queues, never reposition rq: in fact, we would have to
++ * reposition it with respect to next's position in its own fifo,
++ * which would most certainly be too expensive with respect to
++ * the benefits.
++ */
++ if (bfqq == next_bfqq &&
++ !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
++ next->fifo_time < rq->fifo_time) {
++ list_del_init(&rq->queuelist);
++ list_replace_init(&next->queuelist, &rq->queuelist);
++ rq->fifo_time = next->fifo_time;
++ }
++
++ if (bfqq->next_rq == next)
++ bfqq->next_rq = rq;
++
++ bfq_remove_request(next);
++ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
++}
++
++/* Must be called with bfqq != NULL */
++static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
++{
++ BUG_ON(!bfqq);
++
++ if (bfq_bfqq_busy(bfqq)) {
++ bfqq->bfqd->wr_busy_queues--;
++ BUG_ON(bfqq->bfqd->wr_busy_queues < 0);
++ }
++ bfqq->wr_coeff = 1;
++ bfqq->wr_cur_max_time = 0;
++ bfqq->last_wr_start_finish = jiffies;
++ /*
++ * Trigger a weight change on the next invocation of
++ * __bfq_entity_update_weight_prio.
++ */
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "end_wr: wrais ending at %lu, rais_max_time %u",
++ bfqq->last_wr_start_finish,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "end_wr: wr_busy %d",
++ bfqq->bfqd->wr_busy_queues);
++}
++
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ if (bfqg->async_bfqq[i][j])
++ bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
++ if (bfqg->async_idle_bfqq)
++ bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
++}
++
++static void bfq_end_wr(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ bfq_end_wr_async(bfqd);
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++}
++
++static sector_t bfq_io_struct_pos(void *io_struct, bool request)
++{
++ if (request)
++ return blk_rq_pos(io_struct);
++ else
++ return ((struct bio *)io_struct)->bi_iter.bi_sector;
++}
++
++static int bfq_rq_close_to_sector(void *io_struct, bool request,
++ sector_t sector)
++{
++ return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
++ BFQQ_CLOSE_THR;
++}
++
++static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ sector_t sector)
++{
++ struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
++ struct rb_node *parent, *node;
++ struct bfq_queue *__bfqq;
++
++ if (RB_EMPTY_ROOT(root))
++ return NULL;
++
++ /*
++ * First, if we find a request starting at the end of the last
++ * request, choose it.
++ */
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
++ if (__bfqq)
++ return __bfqq;
++
++ /*
++ * If the exact sector wasn't found, the parent of the NULL leaf
++ * will contain the closest sector (rq_pos_tree sorted by
++ * next_request position).
++ */
++ __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ if (blk_rq_pos(__bfqq->next_rq) < sector)
++ node = rb_next(&__bfqq->pos_node);
++ else
++ node = rb_prev(&__bfqq->pos_node);
++ if (!node)
++ return NULL;
++
++ __bfqq = rb_entry(node, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ return NULL;
++}
++
++static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
++ struct bfq_queue *cur_bfqq,
++ sector_t sector)
++{
++ struct bfq_queue *bfqq;
++
++ /*
++ * We shall notice if some of the queues are cooperating,
++ * e.g., working closely on the same area of the device. In
++ * that case, we can group them together and: 1) don't waste
++ * time idling, and 2) serve the union of their requests in
++ * the best possible order for throughput.
++ */
++ bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
++ if (!bfqq || bfqq == cur_bfqq)
++ return NULL;
++
++ return bfqq;
++}
++
++static struct bfq_queue *
++bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ int process_refs, new_process_refs;
++ struct bfq_queue *__bfqq;
++
++ /*
++ * If there are no process references on the new_bfqq, then it is
++ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
++ * may have dropped their last reference (not just their last process
++ * reference).
++ */
++ if (!bfqq_process_refs(new_bfqq))
++ return NULL;
++
++ /* Avoid a circular list and skip interim queue merges. */
++ while ((__bfqq = new_bfqq->new_bfqq)) {
++ if (__bfqq == bfqq)
++ return NULL;
++ new_bfqq = __bfqq;
++ }
++
++ process_refs = bfqq_process_refs(bfqq);
++ new_process_refs = bfqq_process_refs(new_bfqq);
++ /*
++ * If the process for the bfqq has gone away, there is no
++ * sense in merging the queues.
++ */
++ if (process_refs == 0 || new_process_refs == 0)
++ return NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
++ new_bfqq->pid);
++
++ /*
++ * Merging is just a redirection: the requests of the process
++ * owning one of the two queues are redirected to the other queue.
++ * The latter queue, in its turn, is set as shared if this is the
++ * first time that the requests of some process are redirected to
++ * it.
++ *
++ * We redirect bfqq to new_bfqq and not the opposite, because we
++ * are in the context of the process owning bfqq, hence we have
++ * the io_cq of this process. So we can immediately configure this
++ * io_cq to redirect the requests of the process to new_bfqq.
++ *
++ * NOTE, even if new_bfqq coincides with the in-service queue, the
++ * io_cq of new_bfqq is not available, because, if the in-service
++ * queue is shared, bfqd->in_service_bic may not point to the
++ * io_cq of the in-service queue.
++ * Redirecting the requests of the process owning bfqq to the
++ * currently in-service queue is in any case the best option, as
++ * we feed the in-service queue with new requests close to the
++ * last request served and, by doing so, hopefully increase the
++ * throughput.
++ */
++ bfqq->new_bfqq = new_bfqq;
++ new_bfqq->ref += process_refs;
++ return new_bfqq;
++}
++
++static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
++ struct bfq_queue *new_bfqq)
++{
++ if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
++ (bfqq->ioprio_class != new_bfqq->ioprio_class))
++ return false;
++
++ /*
++ * If either of the queues has already been detected as seeky,
++ * then merging it with the other queue is unlikely to lead to
++ * sequential I/O.
++ */
++ if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
++ return false;
++
++ /*
++ * Interleaved I/O is known to be done by (some) applications
++ * only for reads, so it does not make sense to merge async
++ * queues.
++ */
++ if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
++ return false;
++
++ return true;
++}
++
++/*
++ * If this function returns true, then bfqq cannot be merged. The idea
++ * is that true cooperation happens very early after processes start
++ * to do I/O. Usually, late cooperations are just accidental false
++ * positives. In case bfqq is weight-raised, such false positives
++ * would evidently degrade latency guarantees for bfqq.
++ */
++static bool wr_from_too_long(struct bfq_queue *bfqq)
++{
++ return bfqq->wr_coeff > 1 &&
++ time_is_before_jiffies(bfqq->last_wr_start_finish +
++ msecs_to_jiffies(100));
++}
++
++/*
++ * Attempt to schedule a merge of bfqq with the currently in-service
++ * queue or with a close queue among the scheduled queues. Return
++ * NULL if no merge was scheduled, a pointer to the shared bfq_queue
++ * structure otherwise.
++ *
++ * The OOM queue is not allowed to participate to cooperation: in fact, since
++ * the requests temporarily redirected to the OOM queue could be redirected
++ * again to dedicated queues at any time, the state needed to correctly
++ * handle merging with the OOM queue would be quite complex and expensive
++ * to maintain. Besides, in such a critical condition as an out of memory,
++ * the benefits of queue merging may be little relevant, or even negligible.
++ *
++ * Weight-raised queues can be merged only if their weight-raising
++ * period has just started. In fact cooperating processes are usually
++ * started together. Thus, with this filter we avoid false positives
++ * that would jeopardize low-latency guarantees.
++ *
++ * WARNING: queue merging may impair fairness among non-weight raised
++ * queues, for at least two reasons: 1) the original weight of a
++ * merged queue may change during the merged state, 2) even being the
++ * weight the same, a merged queue may be bloated with many more
++ * requests than the ones produced by its originally-associated
++ * process.
++ */
++static struct bfq_queue *
++bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ void *io_struct, bool request)
++{
++ struct bfq_queue *in_service_bfqq, *new_bfqq;
++
++ if (bfqq->new_bfqq)
++ return bfqq->new_bfqq;
++
++ if (io_struct && wr_from_too_long(bfqq) &&
++ likely(bfqq != &bfqd->oom_bfqq))
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have looked for coop, but bfq%d wr",
++ bfqq->pid);
++
++ if (!io_struct ||
++ wr_from_too_long(bfqq) ||
++ unlikely(bfqq == &bfqd->oom_bfqq))
++ return NULL;
++
++ /* If there is only one backlogged queue, don't search. */
++ if (bfqd->busy_queues == 1)
++ return NULL;
++
++ in_service_bfqq = bfqd->in_service_queue;
++
++ if (in_service_bfqq && in_service_bfqq != bfqq &&
++ bfqd->in_service_bic && wr_from_too_long(in_service_bfqq)
++ && likely(in_service_bfqq == &bfqd->oom_bfqq))
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have tried merge with in-service-queue, but wr");
++
++ if (!in_service_bfqq || in_service_bfqq == bfqq ||
++ !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) ||
++ unlikely(in_service_bfqq == &bfqd->oom_bfqq))
++ goto check_scheduled;
++
++ if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++ bfqq->entity.parent == in_service_bfqq->entity.parent &&
++ bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
++ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
++ if (new_bfqq)
++ return new_bfqq;
++ }
++ /*
++ * Check whether there is a cooperator among currently scheduled
++ * queues. The only thing we need is that the bio/request is not
++ * NULL, as we need it to establish whether a cooperator exists.
++ */
++check_scheduled:
++ new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
++ bfq_io_struct_pos(io_struct, request));
++
++ BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent);
++
++ if (new_bfqq && wr_from_too_long(new_bfqq) &&
++ likely(new_bfqq != &bfqd->oom_bfqq) &&
++ bfq_may_be_close_cooperator(bfqq, new_bfqq))
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have merged with bfq%d, but wr",
++ new_bfqq->pid);
++
++ if (new_bfqq && !wr_from_too_long(new_bfqq) &&
++ likely(new_bfqq != &bfqd->oom_bfqq) &&
++ bfq_may_be_close_cooperator(bfqq, new_bfqq))
++ return bfq_setup_merge(bfqq, new_bfqq);
++
++ return NULL;
++}
++
++static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
++{
++ struct bfq_io_cq *bic = bfqq->bic;
++
++ /*
++ * If !bfqq->bic, the queue is already shared or its requests
++ * have already been redirected to a shared queue; both idle window
++ * and weight raising state have already been saved. Do nothing.
++ */
++ if (!bic)
++ return;
++
++ bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
++ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
++ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
++ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
++ bic->saved_wr_coeff = bfqq->wr_coeff;
++ bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
++ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
++ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++}
++
++static void bfq_get_bic_reference(struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq->bic has a non-NULL value, the bic to which it belongs
++ * is about to begin using a shared bfq_queue.
++ */
++ if (bfqq->bic)
++ atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
++}
++
++static void
++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
++ (unsigned long) new_bfqq->pid);
++ /* Save weight raising and idle window of the merged queues */
++ bfq_bfqq_save_state(bfqq);
++ bfq_bfqq_save_state(new_bfqq);
++ if (bfq_bfqq_IO_bound(bfqq))
++ bfq_mark_bfqq_IO_bound(new_bfqq);
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ /*
++ * If bfqq is weight-raised, then let new_bfqq inherit
++ * weight-raising. To reduce false positives, neglect the case
++ * where bfqq has just been created, but has not yet made it
++ * to be weight-raised (which may happen because EQM may merge
++ * bfqq even before bfq_add_request is executed for the first
++ * time for bfqq). Handling this case would however be very
++ * easy, thanks to the flag just_created.
++ */
++ if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
++ new_bfqq->wr_coeff = bfqq->wr_coeff;
++ new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
++ new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
++ new_bfqq->wr_start_at_switch_to_srt =
++ bfqq->wr_start_at_switch_to_srt;
++ if (bfq_bfqq_busy(new_bfqq)) {
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ }
++
++ new_bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqd, new_bfqq,
++ "wr start after merge with %d, rais_max_time %u",
++ bfqq->pid,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
++ bfqq->wr_coeff = 1;
++ bfqq->entity.prio_changed = 1;
++ if (bfq_bfqq_busy(bfqq)) {
++ bfqd->wr_busy_queues--;
++ BUG_ON(bfqd->wr_busy_queues < 0);
++ }
++
++ }
++
++ bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
++ bfqd->wr_busy_queues);
++
++ /*
++ * Grab a reference to the bic, to prevent it from being destroyed
++ * before being possibly touched by a bfq_split_bfqq().
++ */
++ bfq_get_bic_reference(bfqq);
++ bfq_get_bic_reference(new_bfqq);
++ /*
++ * Merge queues (that is, let bic redirect its requests to new_bfqq)
++ */
++ bic_set_bfqq(bic, new_bfqq, 1);
++ bfq_mark_bfqq_coop(new_bfqq);
++ /*
++ * new_bfqq now belongs to at least two bics (it is a shared queue):
++ * set new_bfqq->bic to NULL. bfqq either:
++ * - does not belong to any bic any more, and hence bfqq->bic must
++ * be set to NULL, or
++ * - is a queue whose owning bics have already been redirected to a
++ * different queue, hence the queue is destined to not belong to
++ * any bic soon and bfqq->bic is already NULL (therefore the next
++ * assignment causes no harm).
++ */
++ new_bfqq->bic = NULL;
++ bfqq->bic = NULL;
++ /* release process reference to bfqq */
++ bfq_put_queue(bfqq);
++}
++
++static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ bool is_sync = op_is_sync(bio->bi_opf);
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq, *new_bfqq;
++
++ /*
++ * Disallow merge of a sync bio into an async request.
++ */
++ if (is_sync && !rq_is_sync(rq))
++ return false;
++
++ /*
++ * Lookup the bfqq that this bio will be queued with. Allow
++ * merge only if rq is queued there.
++ * Queue lock is held here.
++ */
++ bic = bfq_bic_lookup(bfqd, current->io_context);
++ if (!bic)
++ return false;
++
++ bfqq = bic_to_bfqq(bic, is_sync);
++ /*
++ * We take advantage of this function to perform an early merge
++ * of the queues of possible cooperating processes.
++ */
++ if (bfqq) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
++ if (new_bfqq) {
++ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
++ /*
++ * If we get here, the bio will be queued in the
++ * shared queue, i.e., new_bfqq, so use new_bfqq
++ * to decide whether bio and rq can be merged.
++ */
++ bfqq = new_bfqq;
++ }
++ }
++
++ return bfqq == RQ_BFQQ(rq);
++}
++
++static int bfq_allow_rq_merge(struct request_queue *q, struct request *rq,
++ struct request *next)
++{
++ return RQ_BFQQ(rq) == RQ_BFQQ(next);
++}
++
++/*
++ * Set the maximum time for the in-service queue to consume its
++ * budget. This prevents seeky processes from lowering the throughput.
++ * In practice, a time-slice service scheme is used with seeky
++ * processes.
++ */
++static void bfq_set_budget_timeout(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ unsigned int timeout_coeff;
++
++ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
++ timeout_coeff = 1;
++ else
++ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
++
++ bfqd->last_budget_start = ktime_get();
++
++ bfqq->budget_timeout = jiffies +
++ bfqd->bfq_timeout * timeout_coeff;
++
++ bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++ jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff));
++}
++
++static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ if (bfqq) {
++ bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
++ bfq_mark_bfqq_must_alloc(bfqq);
++ bfq_clear_bfqq_fifo_expire(bfqq);
++
++ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
++
++ BUG_ON(bfqq == bfqd->in_service_queue);
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
++ bfqq->wr_coeff > 1 &&
++ bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
++ time_is_before_jiffies(bfqq->budget_timeout)) {
++ /*
++ * For soft real-time queues, move the start
++ * of the weight-raising period forward by the
++ * time the queue has not received any
++ * service. Otherwise, a relatively long
++ * service delay is likely to cause the
++ * weight-raising period of the queue to end,
++ * because of the short duration of the
++ * weight-raising period of a soft real-time
++ * queue. It is worth noting that this move
++ * is not so dangerous for the other queues,
++ * because soft real-time queues are not
++ * greedy.
++ *
++ * To not add a further variable, we use the
++ * overloaded field budget_timeout to
++ * determine for how long the queue has not
++ * received service, i.e., how much time has
++ * elapsed since the queue expired. However,
++ * this is a little imprecise, because
++ * budget_timeout is set to jiffies if bfqq
++ * not only expires, but also remains with no
++ * request.
++ */
++ if (time_after(bfqq->budget_timeout,
++ bfqq->last_wr_start_finish))
++ bfqq->last_wr_start_finish +=
++ jiffies - bfqq->budget_timeout;
++ else
++ bfqq->last_wr_start_finish = jiffies;
++
++ if (time_is_after_jiffies(bfqq->last_wr_start_finish)) {
++ pr_crit(
++ "BFQ WARNING:last %lu budget %lu jiffies %lu",
++ bfqq->last_wr_start_finish,
++ bfqq->budget_timeout,
++ jiffies);
++ pr_crit("diff %lu", jiffies -
++ max_t(unsigned long,
++ bfqq->last_wr_start_finish,
++ bfqq->budget_timeout));
++ bfqq->last_wr_start_finish = jiffies;
++ }
++ }
++
++ bfq_set_budget_timeout(bfqd, bfqq);
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_in_service_queue, cur-budget = %d",
++ bfqq->entity.budget);
++ } else
++ bfq_log(bfqd, "set_in_service_queue: NULL");
++
++ bfqd->in_service_queue = bfqq;
++}
++
++/*
++ * Get and set a new queue for service.
++ */
++static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
++
++ __bfq_set_in_service_queue(bfqd, bfqq);
++ return bfqq;
++}
++
++static void bfq_arm_slice_timer(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfqd->in_service_queue;
++ struct bfq_io_cq *bic;
++ u32 sl;
++
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ /* Processes have exited, don't wait. */
++ bic = bfqd->in_service_bic;
++ if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0)
++ return;
++
++ bfq_mark_bfqq_wait_request(bfqq);
++
++ /*
++ * We don't want to idle for seeks, but we do want to allow
++ * fair distribution of slice time for a process doing back-to-back
++ * seeks. So allow a little bit of time for him to submit a new rq.
++ *
++ * To prevent processes with (partly) seeky workloads from
++ * being too ill-treated, grant them a small fraction of the
++ * assigned budget before reducing the waiting time to
++ * BFQ_MIN_TT. This happened to help reduce latency.
++ */
++ sl = bfqd->bfq_slice_idle;
++ /*
++ * Unless the queue is being weight-raised or the scenario is
++ * asymmetric, grant only minimum idle time if the queue
++ * is seeky. A long idling is preserved for a weight-raised
++ * queue, or, more in general, in an asymemtric scenario,
++ * because a long idling is needed for guaranteeing to a queue
++ * its reserved share of the throughput (in particular, it is
++ * needed if the queue has a higher weight than some other
++ * queue).
++ */
++ if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
++ bfq_symmetric_scenario(bfqd))
++ sl = min_t(u32, sl, BFQ_MIN_TT);
++
++ bfqd->last_idling_start = ktime_get();
++ hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
++ HRTIMER_MODE_REL);
++ bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
++ bfq_log(bfqd, "arm idle: %ld/%ld ms",
++ sl / NSEC_PER_MSEC, bfqd->bfq_slice_idle / NSEC_PER_MSEC);
++}
++
++/*
++ * In autotuning mode, max_budget is dynamically recomputed as the
++ * amount of sectors transferred in timeout at the estimated peak
++ * rate. This enables BFQ to utilize a full timeslice with a full
++ * budget, even if the in-service queue is served at peak rate. And
++ * this maximises throughput with sequential workloads.
++ */
++static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
++{
++ return (u64)bfqd->peak_rate * USEC_PER_MSEC *
++ jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
++}
++
++/*
++ * Update parameters related to throughput and responsiveness, as a
++ * function of the estimated peak rate. See comments on
++ * bfq_calc_max_budget(), and on T_slow and T_fast arrays.
++ */
++static void update_thr_responsiveness_params(struct bfq_data *bfqd)
++{
++ int dev_type = blk_queue_nonrot(bfqd->queue);
++
++ if (bfqd->bfq_user_max_budget == 0) {
++ bfqd->bfq_max_budget =
++ bfq_calc_max_budget(bfqd);
++ BUG_ON(bfqd->bfq_max_budget < 0);
++ bfq_log(bfqd, "new max_budget = %d",
++ bfqd->bfq_max_budget);
++ }
++
++ if (bfqd->device_speed == BFQ_BFQD_FAST &&
++ bfqd->peak_rate < device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_SLOW;
++ bfqd->RT_prod = R_slow[dev_type] *
++ T_slow[dev_type];
++ } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
++ bfqd->peak_rate > device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_FAST;
++ bfqd->RT_prod = R_fast[dev_type] *
++ T_fast[dev_type];
++ }
++
++ bfq_log(bfqd,
++"dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec",
++ dev_type == 0 ? "ROT" : "NONROT",
++ bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW",
++ bfqd->device_speed == BFQ_BFQD_FAST ?
++ (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT :
++ (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT,
++ (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>>
++ BFQ_RATE_SHIFT);
++}
++
++static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq)
++{
++ if (rq != NULL) { /* new rq dispatch now, reset accordingly */
++ bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns() ;
++ bfqd->peak_rate_samples = 1;
++ bfqd->sequential_samples = 0;
++ bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
++ blk_rq_sectors(rq);
++ } else /* no new rq dispatched, just reset the number of samples */
++ bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
++
++ bfq_log(bfqd,
++ "reset_rate_computation at end, sample %u/%u tot_sects %llu",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ bfqd->tot_sectors_dispatched);
++}
++
++static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
++{
++ u32 rate, weight, divisor;
++
++ /*
++ * For the convergence property to hold (see comments on
++ * bfq_update_peak_rate()) and for the assessment to be
++ * reliable, a minimum number of samples must be present, and
++ * a minimum amount of time must have elapsed. If not so, do
++ * not compute new rate. Just reset parameters, to get ready
++ * for a new evaluation attempt.
++ */
++ if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
++ bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) {
++ bfq_log(bfqd,
++ "update_rate_reset: only resetting, delta_first %lluus samples %d",
++ bfqd->delta_from_first>>10, bfqd->peak_rate_samples);
++ goto reset_computation;
++ }
++
++ /*
++ * If a new request completion has occurred after last
++ * dispatch, then, to approximate the rate at which requests
++ * have been served by the device, it is more precise to
++ * extend the observation interval to the last completion.
++ */
++ bfqd->delta_from_first =
++ max_t(u64, bfqd->delta_from_first,
++ bfqd->last_completion - bfqd->first_dispatch);
++
++ BUG_ON(bfqd->delta_from_first == 0);
++ /*
++ * Rate computed in sects/usec, and not sects/nsec, for
++ * precision issues.
++ */
++ rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
++ div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
++
++ bfq_log(bfqd,
++"update_rate_reset: tot_sects %llu delta_first %lluus rate %llu sects/s (%d)",
++ bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10,
++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
++ rate > 20<<BFQ_RATE_SHIFT);
++
++ /*
++ * Peak rate not updated if:
++ * - the percentage of sequential dispatches is below 3/4 of the
++ * total, and rate is below the current estimated peak rate
++ * - rate is unreasonably high (> 20M sectors/sec)
++ */
++ if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
++ rate <= bfqd->peak_rate) ||
++ rate > 20<<BFQ_RATE_SHIFT) {
++ bfq_log(bfqd,
++ "update_rate_reset: goto reset, samples %u/%u rate/peak %llu/%llu",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
++ goto reset_computation;
++ } else {
++ bfq_log(bfqd,
++ "update_rate_reset: do update, samples %u/%u rate/peak %llu/%llu",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
++ }
++
++ /*
++ * We have to update the peak rate, at last! To this purpose,
++ * we use a low-pass filter. We compute the smoothing constant
++ * of the filter as a function of the 'weight' of the new
++ * measured rate.
++ *
++ * As can be seen in next formulas, we define this weight as a
++ * quantity proportional to how sequential the workload is,
++ * and to how long the observation time interval is.
++ *
++ * The weight runs from 0 to 8. The maximum value of the
++ * weight, 8, yields the minimum value for the smoothing
++ * constant. At this minimum value for the smoothing constant,
++ * the measured rate contributes for half of the next value of
++ * the estimated peak rate.
++ *
++ * So, the first step is to compute the weight as a function
++ * of how sequential the workload is. Note that the weight
++ * cannot reach 9, because bfqd->sequential_samples cannot
++ * become equal to bfqd->peak_rate_samples, which, in its
++ * turn, holds true because bfqd->sequential_samples is not
++ * incremented for the first sample.
++ */
++ weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
++
++ /*
++ * Second step: further refine the weight as a function of the
++ * duration of the observation interval.
++ */
++ weight = min_t(u32, 8,
++ div_u64(weight * bfqd->delta_from_first,
++ BFQ_RATE_REF_INTERVAL));
++
++ /*
++ * Divisor ranging from 10, for minimum weight, to 2, for
++ * maximum weight.
++ */
++ divisor = 10 - weight;
++ BUG_ON(divisor == 0);
++
++ /*
++ * Finally, update peak rate:
++ *
++ * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
++ */
++ bfqd->peak_rate *= divisor-1;
++ bfqd->peak_rate /= divisor;
++ rate /= divisor; /* smoothing constant alpha = 1/divisor */
++
++ bfq_log(bfqd,
++ "update_rate_reset: divisor %d tmp_peak_rate %llu tmp_rate %u",
++ divisor,
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT),
++ (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT));
++
++ BUG_ON(bfqd->peak_rate == 0);
++ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
++
++ bfqd->peak_rate += rate;
++ update_thr_responsiveness_params(bfqd);
++ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
++
++reset_computation:
++ bfq_reset_rate_computation(bfqd, rq);
++}
++
++/*
++ * Update the read/write peak rate (the main quantity used for
++ * auto-tuning, see update_thr_responsiveness_params()).
++ *
++ * It is not trivial to estimate the peak rate (correctly): because of
++ * the presence of sw and hw queues between the scheduler and the
++ * device components that finally serve I/O requests, it is hard to
++ * say exactly when a given dispatched request is served inside the
++ * device, and for how long. As a consequence, it is hard to know
++ * precisely at what rate a given set of requests is actually served
++ * by the device.
++ *
++ * On the opposite end, the dispatch time of any request is trivially
++ * available, and, from this piece of information, the "dispatch rate"
++ * of requests can be immediately computed. So, the idea in the next
++ * function is to use what is known, namely request dispatch times
++ * (plus, when useful, request completion times), to estimate what is
++ * unknown, namely in-device request service rate.
++ *
++ * The main issue is that, because of the above facts, the rate at
++ * which a certain set of requests is dispatched over a certain time
++ * interval can vary greatly with respect to the rate at which the
++ * same requests are then served. But, since the size of any
++ * intermediate queue is limited, and the service scheme is lossless
++ * (no request is silently dropped), the following obvious convergence
++ * property holds: the number of requests dispatched MUST become
++ * closer and closer to the number of requests completed as the
++ * observation interval grows. This is the key property used in
++ * the next function to estimate the peak service rate as a function
++ * of the observed dispatch rate. The function assumes to be invoked
++ * on every request dispatch.
++ */
++static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
++{
++ u64 now_ns = ktime_get_ns();
++
++ if (bfqd->peak_rate_samples == 0) { /* first dispatch */
++ bfq_log(bfqd,
++ "update_peak_rate: goto reset, samples %d",
++ bfqd->peak_rate_samples) ;
++ bfq_reset_rate_computation(bfqd, rq);
++ goto update_last_values; /* will add one sample */
++ }
++
++ /*
++ * Device idle for very long: the observation interval lasting
++ * up to this dispatch cannot be a valid observation interval
++ * for computing a new peak rate (similarly to the late-
++ * completion event in bfq_completed_request()). Go to
++ * update_rate_and_reset to have the following three steps
++ * taken:
++ * - close the observation interval at the last (previous)
++ * request dispatch or completion
++ * - compute rate, if possible, for that observation interval
++ * - start a new observation interval with this dispatch
++ */
++ if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
++ bfqd->rq_in_driver == 0) {
++ bfq_log(bfqd,
++"update_peak_rate: jumping to updating&resetting delta_last %lluus samples %d",
++ (now_ns - bfqd->last_dispatch)>>10,
++ bfqd->peak_rate_samples) ;
++ goto update_rate_and_reset;
++ }
++
++ /* Update sampling information */
++ bfqd->peak_rate_samples++;
++
++ if ((bfqd->rq_in_driver > 0 ||
++ now_ns - bfqd->last_completion < BFQ_MIN_TT)
++ && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
++ bfqd->sequential_samples++;
++
++ bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
++
++ /* Reset max observed rq size every 32 dispatches */
++ if (likely(bfqd->peak_rate_samples % 32))
++ bfqd->last_rq_max_size =
++ max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
++ else
++ bfqd->last_rq_max_size = blk_rq_sectors(rq);
++
++ bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
++
++ bfq_log(bfqd,
++ "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ bfqd->tot_sectors_dispatched,
++ bfqd->delta_from_first>>10);
++
++ /* Target observation interval not yet reached, go on sampling */
++ if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
++ goto update_last_values;
++
++update_rate_and_reset:
++ bfq_update_rate_reset(bfqd, rq);
++update_last_values:
++ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
++ bfqd->last_dispatch = now_ns;
++
++ bfq_log(bfqd,
++ "update_peak_rate: delta_first %lluus last_pos %llu peak_rate %llu",
++ (now_ns - bfqd->first_dispatch)>>10,
++ (unsigned long long) bfqd->last_position,
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
++ bfq_log(bfqd,
++ "update_peak_rate: samples at end %d", bfqd->peak_rate_samples);
++}
++
++/*
++ * Move request from internal lists to the dispatch list of the request queue
++ */
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ /*
++ * For consistency, the next instruction should have been executed
++ * after removing the request from the queue and dispatching it.
++ * We execute instead this instruction before bfq_remove_request()
++ * (and hence introduce a temporary inconsistency), for efficiency.
++ * In fact, in a forced_dispatch, this prevents two counters related
++ * to bfqq->dispatched to risk to be uselessly decremented if bfqq
++ * is not in service, and then to be incremented again after
++ * incrementing bfqq->dispatched.
++ */
++ bfqq->dispatched++;
++ bfq_update_peak_rate(q->elevator->elevator_data, rq);
++
++ bfq_remove_request(rq);
++ elv_dispatch_sort(q, rq);
++}
++
++static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ /*
++ * If this bfqq is shared between multiple processes, check
++ * to make sure that those processes are still issuing I/Os
++ * within the mean seek distance. If not, it may be time to
++ * break the queues apart again.
++ */
++ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
++ bfq_mark_bfqq_split_coop(bfqq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ if (bfqq->dispatched == 0)
++ /*
++ * Overloading budget_timeout field to store
++ * the time at which the queue remains with no
++ * backlog and no outstanding request; used by
++ * the weight-raising mechanism.
++ */
++ bfqq->budget_timeout = jiffies;
++
++ bfq_del_bfqq_busy(bfqd, bfqq, true);
++ } else {
++ bfq_requeue_bfqq(bfqd, bfqq);
++ /*
++ * Resort priority tree of potential close cooperators.
++ */
++ bfq_pos_tree_add_move(bfqd, bfqq);
++ }
++
++ /*
++ * All in-service entities must have been properly deactivated
++ * or requeued before executing the next function, which
++ * resets all in-service entites as no more in service.
++ */
++ __bfq_bfqd_reset_in_service(bfqd);
++}
++
++/**
++ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
++ * @bfqd: device data.
++ * @bfqq: queue to update.
++ * @reason: reason for expiration.
++ *
++ * Handle the feedback on @bfqq budget at queue expiration.
++ * See the body for detailed comments.
++ */
++static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ enum bfqq_expiration reason)
++{
++ struct request *next_rq;
++ int budget, min_budget;
++
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ min_budget = bfq_min_budget(bfqd);
++
++ if (bfqq->wr_coeff == 1)
++ budget = bfqq->max_budget;
++ else /*
++ * Use a constant, low budget for weight-raised queues,
++ * to help achieve a low latency. Keep it slightly higher
++ * than the minimum possible budget, to cause a little
++ * bit fewer expirations.
++ */
++ budget = 2 * min_budget;
++
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
++ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
++ budget, bfq_min_budget(bfqd));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
++
++ if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
++ switch (reason) {
++ /*
++ * Caveat: in all the following cases we trade latency
++ * for throughput.
++ */
++ case BFQ_BFQQ_TOO_IDLE:
++ /*
++ * This is the only case where we may reduce
++ * the budget: if there is no request of the
++ * process still waiting for completion, then
++ * we assume (tentatively) that the timer has
++ * expired because the batch of requests of
++ * the process could have been served with a
++ * smaller budget. Hence, betting that
++ * process will behave in the same way when it
++ * becomes backlogged again, we reduce its
++ * next budget. As long as we guess right,
++ * this budget cut reduces the latency
++ * experienced by the process.
++ *
++ * However, if there are still outstanding
++ * requests, then the process may have not yet
++ * issued its next request just because it is
++ * still waiting for the completion of some of
++ * the still outstanding ones. So in this
++ * subcase we do not reduce its budget, on the
++ * contrary we increase it to possibly boost
++ * the throughput, as discussed in the
++ * comments to the BUDGET_TIMEOUT case.
++ */
++ if (bfqq->dispatched > 0) /* still outstanding reqs */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ else {
++ if (budget > 5 * min_budget)
++ budget -= 4 * min_budget;
++ else
++ budget = min_budget;
++ }
++ break;
++ case BFQ_BFQQ_BUDGET_TIMEOUT:
++ /*
++ * We double the budget here because it gives
++ * the chance to boost the throughput if this
++ * is not a seeky process (and has bumped into
++ * this timeout because of, e.g., ZBR).
++ */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_BUDGET_EXHAUSTED:
++ /*
++ * The process still has backlog, and did not
++ * let either the budget timeout or the disk
++ * idling timeout expire. Hence it is not
++ * seeky, has a short thinktime and may be
++ * happy with a higher budget too. So
++ * definitely increase the budget of this good
++ * candidate to boost the disk throughput.
++ */
++ budget = min(budget * 4, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_NO_MORE_REQUESTS:
++ /*
++ * For queues that expire for this reason, it
++ * is particularly important to keep the
++ * budget close to the actual service they
++ * need. Doing so reduces the timestamp
++ * misalignment problem described in the
++ * comments in the body of
++ * __bfq_activate_entity. In fact, suppose
++ * that a queue systematically expires for
++ * BFQ_BFQQ_NO_MORE_REQUESTS and presents a
++ * new request in time to enjoy timestamp
++ * back-shifting. The larger the budget of the
++ * queue is with respect to the service the
++ * queue actually requests in each service
++ * slot, the more times the queue can be
++ * reactivated with the same virtual finish
++ * time. It follows that, even if this finish
++ * time is pushed to the system virtual time
++ * to reduce the consequent timestamp
++ * misalignment, the queue unjustly enjoys for
++ * many re-activations a lower finish time
++ * than all newly activated queues.
++ *
++ * The service needed by bfqq is measured
++ * quite precisely by bfqq->entity.service.
++ * Since bfqq does not enjoy device idling,
++ * bfqq->entity.service is equal to the number
++ * of sectors that the process associated with
++ * bfqq requested to read/write before waiting
++ * for request completions, or blocking for
++ * other reasons.
++ */
++ budget = max_t(int, bfqq->entity.service, min_budget);
++ break;
++ default:
++ return;
++ }
++ } else if (!bfq_bfqq_sync(bfqq))
++ /*
++ * Async queues get always the maximum possible
++ * budget, as for them we do not care about latency
++ * (in addition, their ability to dispatch is limited
++ * by the charging factor).
++ */
++ budget = bfqd->bfq_max_budget;
++
++ bfqq->max_budget = budget;
++
++ if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
++ !bfqd->bfq_user_max_budget)
++ bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
++
++ /*
++ * If there is still backlog, then assign a new budget, making
++ * sure that it is large enough for the next request. Since
++ * the finish time of bfqq must be kept in sync with the
++ * budget, be sure to call __bfq_bfqq_expire() *after* this
++ * update.
++ *
++ * If there is no backlog, then no need to update the budget;
++ * it will be updated on the arrival of a new request.
++ */
++ next_rq = bfqq->next_rq;
++ if (next_rq) {
++ BUG_ON(reason == BFQ_BFQQ_TOO_IDLE ||
++ reason == BFQ_BFQQ_NO_MORE_REQUESTS);
++ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ BUG_ON(!bfq_bfqq_busy(bfqq));
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
++ next_rq ? blk_rq_sectors(next_rq) : 0,
++ bfqq->entity.budget);
++}
++
++/*
++ * Return true if the process associated with bfqq is "slow". The slow
++ * flag is used, in addition to the budget timeout, to reduce the
++ * amount of service provided to seeky processes, and thus reduce
++ * their chances to lower the throughput. More details in the comments
++ * on the function bfq_bfqq_expire().
++ *
++ * An important observation is in order: as discussed in the comments
++ * on the function bfq_update_peak_rate(), with devices with internal
++ * queues, it is hard if ever possible to know when and for how long
++ * an I/O request is processed by the device (apart from the trivial
++ * I/O pattern where a new request is dispatched only after the
++ * previous one has been completed). This makes it hard to evaluate
++ * the real rate at which the I/O requests of each bfq_queue are
++ * served. In fact, for an I/O scheduler like BFQ, serving a
++ * bfq_queue means just dispatching its requests during its service
++ * slot (i.e., until the budget of the queue is exhausted, or the
++ * queue remains idle, or, finally, a timeout fires). But, during the
++ * service slot of a bfq_queue, around 100 ms at most, the device may
++ * be even still processing requests of bfq_queues served in previous
++ * service slots. On the opposite end, the requests of the in-service
++ * bfq_queue may be completed after the service slot of the queue
++ * finishes.
++ *
++ * Anyway, unless more sophisticated solutions are used
++ * (where possible), the sum of the sizes of the requests dispatched
++ * during the service slot of a bfq_queue is probably the only
++ * approximation available for the service received by the bfq_queue
++ * during its service slot. And this sum is the quantity used in this
++ * function to evaluate the I/O speed of a process.
++ */
++static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ bool compensate, enum bfqq_expiration reason,
++ unsigned long *delta_ms)
++{
++ ktime_t delta_ktime;
++ u32 delta_usecs;
++ bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
++
++ if (!bfq_bfqq_sync(bfqq))
++ return false;
++
++ if (compensate)
++ delta_ktime = bfqd->last_idling_start;
++ else
++ delta_ktime = ktime_get();
++ delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
++ delta_usecs = ktime_to_us(delta_ktime);
++
++ /* don't use too short time intervals */
++ if (delta_usecs < 1000) {
++ if (blk_queue_nonrot(bfqd->queue))
++ /*
++ * give same worst-case guarantees as idling
++ * for seeky
++ */
++ *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
++ else /* charge at least one seek */
++ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
++
++ bfq_log(bfqd, "bfq_bfqq_is_slow: too short %u", delta_usecs);
++
++ return slow;
++ }
++
++ *delta_ms = delta_usecs / USEC_PER_MSEC;
++
++ /*
++ * Use only long (> 20ms) intervals to filter out excessive
++ * spikes in service rate estimation.
++ */
++ if (delta_usecs > 20000) {
++ /*
++ * Caveat for rotational devices: processes doing I/O
++ * in the slower disk zones tend to be slow(er) even
++ * if not seeky. In this respect, the estimated peak
++ * rate is likely to be an average over the disk
++ * surface. Accordingly, to not be too harsh with
++ * unlucky processes, a process is deemed slow only if
++ * its rate has been lower than half of the estimated
++ * peak rate.
++ */
++ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
++ bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d",
++ bfqq->entity.service, bfqd->bfq_max_budget);
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
++
++ return slow;
++}
++
++/*
++ * To be deemed as soft real-time, an application must meet two
++ * requirements. First, the application must not require an average
++ * bandwidth higher than the approximate bandwidth required to playback or
++ * record a compressed high-definition video.
++ * The next function is invoked on the completion of the last request of a
++ * batch, to compute the next-start time instant, soft_rt_next_start, such
++ * that, if the next request of the application does not arrive before
++ * soft_rt_next_start, then the above requirement on the bandwidth is met.
++ *
++ * The second requirement is that the request pattern of the application is
++ * isochronous, i.e., that, after issuing a request or a batch of requests,
++ * the application stops issuing new requests until all its pending requests
++ * have been completed. After that, the application may issue a new batch,
++ * and so on.
++ * For this reason the next function is invoked to compute
++ * soft_rt_next_start only for applications that meet this requirement,
++ * whereas soft_rt_next_start is set to infinity for applications that do
++ * not.
++ *
++ * Unfortunately, even a greedy application may happen to behave in an
++ * isochronous way if the CPU load is high. In fact, the application may
++ * stop issuing requests while the CPUs are busy serving other processes,
++ * then restart, then stop again for a while, and so on. In addition, if
++ * the disk achieves a low enough throughput with the request pattern
++ * issued by the application (e.g., because the request pattern is random
++ * and/or the device is slow), then the application may meet the above
++ * bandwidth requirement too. To prevent such a greedy application to be
++ * deemed as soft real-time, a further rule is used in the computation of
++ * soft_rt_next_start: soft_rt_next_start must be higher than the current
++ * time plus the maximum time for which the arrival of a request is waited
++ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
++ * This filters out greedy applications, as the latter issue instead their
++ * next request as soon as possible after the last one has been completed
++ * (in contrast, when a batch of requests is completed, a soft real-time
++ * application spends some time processing data).
++ *
++ * Unfortunately, the last filter may easily generate false positives if
++ * only bfqd->bfq_slice_idle is used as a reference time interval and one
++ * or both the following cases occur:
++ * 1) HZ is so low that the duration of a jiffy is comparable to or higher
++ * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
++ * HZ=100.
++ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
++ * for a while, then suddenly 'jump' by several units to recover the lost
++ * increments. This seems to happen, e.g., inside virtual machines.
++ * To address this issue, we do not use as a reference time interval just
++ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
++ * particular we add the minimum number of jiffies for which the filter
++ * seems to be quite precise also in embedded systems and KVM/QEMU virtual
++ * machines.
++ */
++static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqd, bfqq,
++"softrt_next_start: service_blkg %lu soft_rate %u sects/sec interval %u",
++ bfqq->service_from_backlogged,
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies_to_msecs(HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate));
++
++ return max(bfqq->last_idle_bklogged +
++ HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
++}
++
++/*
++ * Return the farthest future time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_greatest_from_now(void)
++{
++ return jiffies + MAX_JIFFY_OFFSET;
++}
++
++/*
++ * Return the farthest past time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_smallest_from_now(void)
++{
++ return jiffies - MAX_JIFFY_OFFSET;
++}
++
++/**
++ * bfq_bfqq_expire - expire a queue.
++ * @bfqd: device owning the queue.
++ * @bfqq: the queue to expire.
++ * @compensate: if true, compensate for the time spent idling.
++ * @reason: the reason causing the expiration.
++ *
++ * If the process associated with bfqq does slow I/O (e.g., because it
++ * issues random requests), we charge bfqq with the time it has been
++ * in service instead of the service it has received (see
++ * bfq_bfqq_charge_time for details on how this goal is achieved). As
++ * a consequence, bfqq will typically get higher timestamps upon
++ * reactivation, and hence it will be rescheduled as if it had
++ * received more service than what it has actually received. In the
++ * end, bfqq receives less service in proportion to how slowly its
++ * associated process consumes its budgets (and hence how seriously it
++ * tends to lower the throughput). In addition, this time-charging
++ * strategy guarantees time fairness among slow processes. In
++ * contrast, if the process associated with bfqq is not slow, we
++ * charge bfqq exactly with the service it has received.
++ *
++ * Charging time to the first type of queues and the exact service to
++ * the other has the effect of using the WF2Q+ policy to schedule the
++ * former on a timeslice basis, without violating service domain
++ * guarantees among the latter.
++ */
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool compensate,
++ enum bfqq_expiration reason)
++{
++ bool slow;
++ unsigned long delta = 0;
++ struct bfq_entity *entity = &bfqq->entity;
++ int ref;
++
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ /*
++ * Check whether the process is slow (see bfq_bfqq_is_slow).
++ */
++ slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
++
++ /*
++ * Increase service_from_backlogged before next statement,
++ * because the possible next invocation of
++ * bfq_bfqq_charge_time would likely inflate
++ * entity->service. In contrast, service_from_backlogged must
++ * contain real service, to enable the soft real-time
++ * heuristic to correctly compute the bandwidth consumed by
++ * bfqq.
++ */
++ bfqq->service_from_backlogged += entity->service;
++
++ /*
++ * As above explained, charge slow (typically seeky) and
++ * timed-out queues with the time and not the service
++ * received, to favor sequential workloads.
++ *
++ * Processes doing I/O in the slower disk zones will tend to
++ * be slow(er) even if not seeky. Therefore, since the
++ * estimated peak rate is actually an average over the disk
++ * surface, these processes may timeout just for bad luck. To
++ * avoid punishing them, do not charge time to processes that
++ * succeeded in consuming at least 2/3 of their budget. This
++ * allows BFQ to preserve enough elasticity to still perform
++ * bandwidth, and not time, distribution with little unlucky
++ * or quasi-sequential processes.
++ */
++ if (bfqq->wr_coeff == 1 &&
++ (slow ||
++ (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++ bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
++ bfq_bfqq_charge_time(bfqd, bfqq, delta);
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ if (reason == BFQ_BFQQ_TOO_IDLE &&
++ entity->service <= 2 * entity->budget / 10)
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ if (bfqd->low_latency && bfqq->wr_coeff == 1)
++ bfqq->last_wr_start_finish = jiffies;
++
++ if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ /*
++ * If we get here, and there are no outstanding
++ * requests, then the request pattern is isochronous
++ * (see the comments on the function
++ * bfq_bfqq_softrt_next_start()). Thus we can compute
++ * soft_rt_next_start. If, instead, the queue still
++ * has outstanding requests, then we have to wait for
++ * the completion of all the outstanding requests to
++ * discover whether the request pattern is actually
++ * isochronous.
++ */
++ BUG_ON(bfqd->busy_queues < 1);
++ if (bfqq->dispatched == 0) {
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++ bfq_log_bfqq(bfqd, bfqq, "new soft_rt_next %lu",
++ bfqq->soft_rt_next_start);
++ } else {
++ /*
++ * The application is still waiting for the
++ * completion of one or more requests:
++ * prevent it from possibly being incorrectly
++ * deemed as soft real-time by setting its
++ * soft_rt_next_start to infinity. In fact,
++ * without this assignment, the application
++ * would be incorrectly deemed as soft
++ * real-time if:
++ * 1) it issued a new request before the
++ * completion of all its in-flight
++ * requests, and
++ * 2) at that time, its soft_rt_next_start
++ * happened to be in the past.
++ */
++ bfqq->soft_rt_next_start =
++ bfq_greatest_from_now();
++ /*
++ * Schedule an update of soft_rt_next_start to when
++ * the task may be discovered to be isochronous.
++ */
++ bfq_mark_bfqq_softrt_update(bfqq);
++ }
++ }
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "expire (%d, slow %d, num_disp %d, idle_win %d, weight %d)",
++ reason, slow, bfqq->dispatched,
++ bfq_bfqq_idle_window(bfqq), entity->weight);
++
++ /*
++ * Increase, decrease or leave budget unchanged according to
++ * reason.
++ */
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
++ BUG_ON(bfqq->next_rq == NULL &&
++ bfqq->entity.budget < bfqq->entity.service);
++ ref = bfqq->ref;
++ __bfq_bfqq_expire(bfqd, bfqq);
++
++ BUG_ON(ref > 1 &&
++ !bfq_bfqq_busy(bfqq) && reason == BFQ_BFQQ_BUDGET_EXHAUSTED &&
++ !bfq_class_idle(bfqq));
++
++ /* mark bfqq as waiting a request only if a bic still points to it */
++ if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
++ reason != BFQ_BFQQ_BUDGET_TIMEOUT &&
++ reason != BFQ_BFQQ_BUDGET_EXHAUSTED)
++ bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
++}
++
++/*
++ * Budget timeout is not implemented through a dedicated timer, but
++ * just checked on request arrivals and completions, as well as on
++ * idle timer expirations.
++ */
++static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
++{
++ return time_is_before_eq_jiffies(bfqq->budget_timeout);
++}
++
++/*
++ * If we expire a queue that is actively waiting (i.e., with the
++ * device idled) for the arrival of a new request, then we may incur
++ * the timestamp misalignment problem described in the body of the
++ * function __bfq_activate_entity. Hence we return true only if this
++ * condition does not hold, or if the queue is slow enough to deserve
++ * only to be kicked off for preserving a high throughput.
++ */
++static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "may_budget_timeout: wait_request %d left %d timeout %d",
++ bfq_bfqq_wait_request(bfqq),
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
++ bfq_bfqq_budget_timeout(bfqq));
++
++ return (!bfq_bfqq_wait_request(bfqq) ||
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
++ &&
++ bfq_bfqq_budget_timeout(bfqq);
++}
++
++/*
++ * For a queue that becomes empty, device idling is allowed only if
++ * this function returns true for that queue. As a consequence, since
++ * device idling plays a critical role for both throughput boosting
++ * and service guarantees, the return value of this function plays a
++ * critical role as well.
++ *
++ * In a nutshell, this function returns true only if idling is
++ * beneficial for throughput or, even if detrimental for throughput,
++ * idling is however necessary to preserve service guarantees (low
++ * latency, desired throughput distribution, ...). In particular, on
++ * NCQ-capable devices, this function tries to return false, so as to
++ * help keep the drives' internal queues full, whenever this helps the
++ * device boost the throughput without causing any service-guarantee
++ * issue.
++ *
++ * In more detail, the return value of this function is obtained by,
++ * first, computing a number of boolean variables that take into
++ * account throughput and service-guarantee issues, and, then,
++ * combining these variables in a logical expression. Most of the
++ * issues taken into account are not trivial. We discuss these issues
++ * while introducing the variables.
++ */
++static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++ bool idling_boosts_thr, idling_boosts_thr_without_issues,
++ idling_needed_for_service_guarantees,
++ asymmetric_scenario;
++
++ if (bfqd->strict_guarantees)
++ return true;
++
++ /*
++ * The next variable takes into account the cases where idling
++ * boosts the throughput.
++ *
++ * The value of the variable is computed considering, first, that
++ * idling is virtually always beneficial for the throughput if:
++ * (a) the device is not NCQ-capable, or
++ * (b) regardless of the presence of NCQ, the device is rotational
++ * and the request pattern for bfqq is I/O-bound and sequential.
++ *
++ * Secondly, and in contrast to the above item (b), idling an
++ * NCQ-capable flash-based device would not boost the
++ * throughput even with sequential I/O; rather it would lower
++ * the throughput in proportion to how fast the device
++ * is. Accordingly, the next variable is true if any of the
++ * above conditions (a) and (b) is true, and, in particular,
++ * happens to be false if bfqd is an NCQ-capable flash-based
++ * device.
++ */
++ idling_boosts_thr = !bfqd->hw_tag ||
++ (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) &&
++ bfq_bfqq_idle_window(bfqq));
++
++ /*
++ * The value of the next variable,
++ * idling_boosts_thr_without_issues, is equal to that of
++ * idling_boosts_thr, unless a special case holds. In this
++ * special case, described below, idling may cause problems to
++ * weight-raised queues.
++ *
++ * When the request pool is saturated (e.g., in the presence
++ * of write hogs), if the processes associated with
++ * non-weight-raised queues ask for requests at a lower rate,
++ * then processes associated with weight-raised queues have a
++ * higher probability to get a request from the pool
++ * immediately (or at least soon) when they need one. Thus
++ * they have a higher probability to actually get a fraction
++ * of the device throughput proportional to their high
++ * weight. This is especially true with NCQ-capable drives,
++ * which enqueue several requests in advance, and further
++ * reorder internally-queued requests.
++ *
++ * For this reason, we force to false the value of
++ * idling_boosts_thr_without_issues if there are weight-raised
++ * busy queues. In this case, and if bfqq is not weight-raised,
++ * this guarantees that the device is not idled for bfqq (if,
++ * instead, bfqq is weight-raised, then idling will be
++ * guaranteed by another variable, see below). Combined with
++ * the timestamping rules of BFQ (see [1] for details), this
++ * behavior causes bfqq, and hence any sync non-weight-raised
++ * queue, to get a lower number of requests served, and thus
++ * to ask for a lower number of requests from the request
++ * pool, before the busy weight-raised queues get served
++ * again. This often mitigates starvation problems in the
++ * presence of heavy write workloads and NCQ, thereby
++ * guaranteeing a higher application and system responsiveness
++ * in these hostile scenarios.
++ */
++ idling_boosts_thr_without_issues = idling_boosts_thr &&
++ bfqd->wr_busy_queues == 0;
++
++ /*
++ * There is then a case where idling must be performed not
++ * for throughput concerns, but to preserve service
++ * guarantees.
++ *
++ * To introduce this case, we can note that allowing the drive
++ * to enqueue more than one request at a time, and hence
++ * delegating de facto final scheduling decisions to the
++ * drive's internal scheduler, entails loss of control on the
++ * actual request service order. In particular, the critical
++ * situation is when requests from different processes happen
++ * to be present, at the same time, in the internal queue(s)
++ * of the drive. In such a situation, the drive, by deciding
++ * the service order of the internally-queued requests, does
++ * determine also the actual throughput distribution among
++ * these processes. But the drive typically has no notion or
++ * concern about per-process throughput distribution, and
++ * makes its decisions only on a per-request basis. Therefore,
++ * the service distribution enforced by the drive's internal
++ * scheduler is likely to coincide with the desired
++ * device-throughput distribution only in a completely
++ * symmetric scenario where:
++ * (i) each of these processes must get the same throughput as
++ * the others;
++ * (ii) all these processes have the same I/O pattern
++ * (either sequential or random).
++ * In fact, in such a scenario, the drive will tend to treat
++ * the requests of each of these processes in about the same
++ * way as the requests of the others, and thus to provide
++ * each of these processes with about the same throughput
++ * (which is exactly the desired throughput distribution). In
++ * contrast, in any asymmetric scenario, device idling is
++ * certainly needed to guarantee that bfqq receives its
++ * assigned fraction of the device throughput (see [1] for
++ * details).
++ *
++ * We address this issue by controlling, actually, only the
++ * symmetry sub-condition (i), i.e., provided that
++ * sub-condition (i) holds, idling is not performed,
++ * regardless of whether sub-condition (ii) holds. In other
++ * words, only if sub-condition (i) holds, then idling is
++ * allowed, and the device tends to be prevented from queueing
++ * many requests, possibly of several processes. The reason
++ * for not controlling also sub-condition (ii) is that we
++ * exploit preemption to preserve guarantees in case of
++ * symmetric scenarios, even if (ii) does not hold, as
++ * explained in the next two paragraphs.
++ *
++ * Even if a queue, say Q, is expired when it remains idle, Q
++ * can still preempt the new in-service queue if the next
++ * request of Q arrives soon (see the comments on
++ * bfq_bfqq_update_budg_for_activation). If all queues and
++ * groups have the same weight, this form of preemption,
++ * combined with the hole-recovery heuristic described in the
++ * comments on function bfq_bfqq_update_budg_for_activation,
++ * are enough to preserve a correct bandwidth distribution in
++ * the mid term, even without idling. In fact, even if not
++ * idling allows the internal queues of the device to contain
++ * many requests, and thus to reorder requests, we can rather
++ * safely assume that the internal scheduler still preserves a
++ * minimum of mid-term fairness. The motivation for using
++ * preemption instead of idling is that, by not idling,
++ * service guarantees are preserved without minimally
++ * sacrificing throughput. In other words, both a high
++ * throughput and its desired distribution are obtained.
++ *
++ * More precisely, this preemption-based, idleless approach
++ * provides fairness in terms of IOPS, and not sectors per
++ * second. This can be seen with a simple example. Suppose
++ * that there are two queues with the same weight, but that
++ * the first queue receives requests of 8 sectors, while the
++ * second queue receives requests of 1024 sectors. In
++ * addition, suppose that each of the two queues contains at
++ * most one request at a time, which implies that each queue
++ * always remains idle after it is served. Finally, after
++ * remaining idle, each queue receives very quickly a new
++ * request. It follows that the two queues are served
++ * alternatively, preempting each other if needed. This
++ * implies that, although both queues have the same weight,
++ * the queue with large requests receives a service that is
++ * 1024/8 times as high as the service received by the other
++ * queue.
++ *
++ * On the other hand, device idling is performed, and thus
++ * pure sector-domain guarantees are provided, for the
++ * following queues, which are likely to need stronger
++ * throughput guarantees: weight-raised queues, and queues
++ * with a higher weight than other queues. When such queues
++ * are active, sub-condition (i) is false, which triggers
++ * device idling.
++ *
++ * According to the above considerations, the next variable is
++ * true (only) if sub-condition (i) holds. To compute the
++ * value of this variable, we not only use the return value of
++ * the function bfq_symmetric_scenario(), but also check
++ * whether bfqq is being weight-raised, because
++ * bfq_symmetric_scenario() does not take into account also
++ * weight-raised queues (see comments on
++ * bfq_weights_tree_add()).
++ *
++ * As a side note, it is worth considering that the above
++ * device-idling countermeasures may however fail in the
++ * following unlucky scenario: if idling is (correctly)
++ * disabled in a time period during which all symmetry
++ * sub-conditions hold, and hence the device is allowed to
++ * enqueue many requests, but at some later point in time some
++ * sub-condition stops to hold, then it may become impossible
++ * to let requests be served in the desired order until all
++ * the requests already queued in the device have been served.
++ */
++ asymmetric_scenario = bfqq->wr_coeff > 1 ||
++ !bfq_symmetric_scenario(bfqd);
++
++ /*
++ * Finally, there is a case where maximizing throughput is the
++ * best choice even if it may cause unfairness toward
++ * bfqq. Such a case is when bfqq became active in a burst of
++ * queue activations. Queues that became active during a large
++ * burst benefit only from throughput, as discussed in the
++ * comments on bfq_handle_burst. Thus, if bfqq became active
++ * in a burst and not idling the device maximizes throughput,
++ * then the device must no be idled, because not idling the
++ * device provides bfqq and all other queues in the burst with
++ * maximum benefit. Combining this and the above case, we can
++ * now establish when idling is actually needed to preserve
++ * service guarantees.
++ */
++ idling_needed_for_service_guarantees =
++ asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
++
++ /*
++ * We have now all the components we need to compute the return
++ * value of the function, which is true only if both the following
++ * conditions hold:
++ * 1) bfqq is sync, because idling make sense only for sync queues;
++ * 2) idling either boosts the throughput (without issues), or
++ * is necessary to preserve service guarantees.
++ */
++ bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d",
++ bfq_bfqq_sync(bfqq), idling_boosts_thr);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "may_idle: wr_busy %d boosts %d IO-bound %d guar %d",
++ bfqd->wr_busy_queues,
++ idling_boosts_thr_without_issues,
++ bfq_bfqq_IO_bound(bfqq),
++ idling_needed_for_service_guarantees);
++
++ return bfq_bfqq_sync(bfqq) &&
++ (idling_boosts_thr_without_issues ||
++ idling_needed_for_service_guarantees);
++}
++
++/*
++ * If the in-service queue is empty but the function bfq_bfqq_may_idle
++ * returns true, then:
++ * 1) the queue must remain in service and cannot be expired, and
++ * 2) the device must be idled to wait for the possible arrival of a new
++ * request for the queue.
++ * See the comments on the function bfq_bfqq_may_idle for the reasons
++ * why performing device idling is the best choice to boost the throughput
++ * and preserve service guarantees when bfq_bfqq_may_idle itself
++ * returns true.
++ */
++static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
++ bfq_bfqq_may_idle(bfqq);
++}
++
++/*
++ * Select a queue for service. If we have a current queue in service,
++ * check whether to continue servicing it, or retrieve and set a new one.
++ */
++static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++ struct request *next_rq;
++ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++
++ bfqq = bfqd->in_service_queue;
++ if (!bfqq)
++ goto new_queue;
++
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
++
++ if (bfq_may_expire_for_budg_timeout(bfqq) &&
++ !hrtimer_active(&bfqd->idle_slice_timer) &&
++ !bfq_bfqq_must_idle(bfqq))
++ goto expire;
++
++check_queue:
++ /*
++ * This loop is rarely executed more than once. Even when it
++ * happens, it is much more convenient to re-execute this loop
++ * than to return NULL and trigger a new dispatch to get a
++ * request served.
++ */
++ next_rq = bfqq->next_rq;
++ /*
++ * If bfqq has requests queued and it has enough budget left to
++ * serve them, keep the queue, otherwise expire it.
++ */
++ if (next_rq) {
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ if (bfq_serv_to_charge(next_rq, bfqq) >
++ bfq_bfqq_budget_left(bfqq)) {
++ /*
++ * Expire the queue for budget exhaustion,
++ * which makes sure that the next budget is
++ * enough to serve the next request, even if
++ * it comes from the fifo expired path.
++ */
++ reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
++ goto expire;
++ } else {
++ /*
++ * The idle timer may be pending because we may
++ * not disable disk idling even when a new request
++ * arrives.
++ */
++ if (bfq_bfqq_wait_request(bfqq)) {
++ BUG_ON(!hrtimer_active(&bfqd->idle_slice_timer));
++ /*
++ * If we get here: 1) at least a new request
++ * has arrived but we have not disabled the
++ * timer because the request was too small,
++ * 2) then the block layer has unplugged
++ * the device, causing the dispatch to be
++ * invoked.
++ *
++ * Since the device is unplugged, now the
++ * requests are probably large enough to
++ * provide a reasonable throughput.
++ * So we disable idling.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
++ bfqg_stats_update_idle_time(bfqq_group(bfqq));
++ }
++ goto keep_queue;
++ }
++ }
++
++ /*
++ * No requests pending. However, if the in-service queue is idling
++ * for a new request, or has requests waiting for a completion and
++ * may idle after their completion, then keep it anyway.
++ */
++ if (hrtimer_active(&bfqd->idle_slice_timer) ||
++ (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
++ bfqq = NULL;
++ goto keep_queue;
++ }
++
++ reason = BFQ_BFQQ_NO_MORE_REQUESTS;
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, false, reason);
++new_queue:
++ bfqq = bfq_set_in_service_queue(bfqd);
++ if (bfqq) {
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
++ goto check_queue;
++ }
++keep_queue:
++ if (bfqq)
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
++ else
++ bfq_log(bfqd, "select_queue: no queue returned");
++
++ return bfqq;
++}
++
++static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
++ BUG_ON(bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
++ time_is_after_jiffies(bfqq->last_wr_start_finish));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqq->wr_coeff,
++ bfqq->entity.weight, bfqq->entity.orig_weight);
++
++ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight !=
++ entity->orig_weight * bfqq->wr_coeff);
++ if (entity->prio_changed)
++ bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
++
++ /*
++ * If the queue was activated in a burst, or too much
++ * time has elapsed from the beginning of this
++ * weight-raising period, then end weight raising.
++ */
++ if (bfq_bfqq_in_large_burst(bfqq))
++ bfq_bfqq_end_wr(bfqq);
++ else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time)) {
++ if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
++ time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
++ bfq_wr_duration(bfqd)))
++ bfq_bfqq_end_wr(bfqq);
++ else {
++ /* switch back to interactive wr */
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ bfqq->last_wr_start_finish =
++ bfqq->wr_start_at_switch_to_srt;
++ BUG_ON(time_is_after_jiffies(
++ bfqq->last_wr_start_finish));
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "back to interactive wr");
++ }
++ }
++ }
++ /*
++ * To improve latency (for this or other queues), immediately
++ * update weight both if it must be raised and if it must be
++ * lowered. Since, entity may be on some active tree here, and
++ * might have a pending change of its ioprio class, invoke
++ * next function with the last parameter unset (see the
++ * comments on the function).
++ */
++ if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
++ __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
++ entity, false);
++}
++
++/*
++ * Dispatch one request from bfqq, moving it to the request queue
++ * dispatch list.
++ */
++static int bfq_dispatch_request(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++ struct request *rq = bfqq->next_rq;
++ unsigned long service_to_charge;
++
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++ BUG_ON(!rq);
++ service_to_charge = bfq_serv_to_charge(rq, bfqq);
++
++ BUG_ON(service_to_charge > bfq_bfqq_budget_left(bfqq));
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ bfq_bfqq_served(bfqq, service_to_charge);
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ bfq_dispatch_insert(bfqd->queue, rq);
++
++ /*
++ * If weight raising has to terminate for bfqq, then next
++ * function causes an immediate update of bfqq's weight,
++ * without waiting for next activation. As a consequence, on
++ * expiration, bfqq will be timestamped as if has never been
++ * weight-raised during this service slot, even if it has
++ * received part or even most of the service as a
++ * weight-raised queue. This inflates bfqq's timestamps, which
++ * is beneficial, as bfqq is then more willing to leave the
++ * device immediately to possible other weight-raised queues.
++ */
++ bfq_update_wr_data(bfqd, bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "dispatched %u sec req (%llu), budg left %d",
++ blk_rq_sectors(rq),
++ (unsigned long long) blk_rq_pos(rq),
++ bfq_bfqq_budget_left(bfqq));
++
++ dispatched++;
++
++ if (!bfqd->in_service_bic) {
++ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
++ bfqd->in_service_bic = RQ_BIC(rq);
++ }
++
++ if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
++ goto expire;
++
++ return dispatched;
++
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, false, BFQ_BFQQ_BUDGET_EXHAUSTED);
++ return dispatched;
++}
++
++static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++
++ while (bfqq->next_rq) {
++ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
++ dispatched++;
++ }
++
++ BUG_ON(!list_empty(&bfqq->fifo));
++ return dispatched;
++}
++
++/*
++ * Drain our current requests.
++ * Used for barriers and when switching io schedulers on-the-fly.
++ */
++static int bfq_forced_dispatch(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq, *n;
++ struct bfq_service_tree *st;
++ int dispatched = 0;
++
++ bfqq = bfqd->in_service_queue;
++ if (bfqq)
++ __bfq_bfqq_expire(bfqd, bfqq);
++
++ /*
++ * Loop through classes, and be careful to leave the scheduler
++ * in a consistent state, as feedback mechanisms and vtime
++ * updates cannot be disabled during the process.
++ */
++ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
++ st = bfq_entity_service_tree(&bfqq->entity);
++
++ dispatched += __bfq_forced_dispatch_bfqq(bfqq);
++
++ bfqq->max_budget = bfq_max_budget(bfqd);
++ bfq_forget_idle(st);
++ }
++
++ BUG_ON(bfqd->busy_queues != 0);
++
++ return dispatched;
++}
++
++static int bfq_dispatch_requests(struct request_queue *q, int force)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq;
++
++ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++
++ if (bfqd->busy_queues == 0)
++ return 0;
++
++ if (unlikely(force))
++ return bfq_forced_dispatch(bfqd);
++
++ /*
++ * Force device to serve one request at a time if
++ * strict_guarantees is true. Forcing this service scheme is
++ * currently the ONLY way to guarantee that the request
++ * service order enforced by the scheduler is respected by a
++ * queueing device. Otherwise the device is free even to make
++ * some unlucky request wait for as long as the device
++ * wishes.
++ *
++ * Of course, serving one request at at time may cause loss of
++ * throughput.
++ */
++ if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
++ return 0;
++
++ bfqq = bfq_select_queue(bfqd);
++ if (!bfqq)
++ return 0;
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ BUG_ON(bfq_bfqq_wait_request(bfqq));
++
++ if (!bfq_dispatch_request(bfqd, bfqq))
++ return 0;
++
++ bfq_log_bfqq(bfqd, bfqq, "dispatched %s request",
++ bfq_bfqq_sync(bfqq) ? "sync" : "async");
++
++ BUG_ON(bfqq->next_rq == NULL &&
++ bfqq->entity.budget < bfqq->entity.service);
++ return 1;
++}
++
++/*
++ * Task holds one reference to the queue, dropped when task exits. Each rq
++ * in-flight on this queue also holds a reference, dropped when rq is freed.
++ *
++ * Queue lock must be held here. Recall not to use bfqq after calling
++ * this function on it.
++ */
++static void bfq_put_queue(struct bfq_queue *bfqq)
++{
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ struct bfq_group *bfqg = bfqq_group(bfqq);
++#endif
++
++ BUG_ON(bfqq->ref <= 0);
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
++ bfqq->ref--;
++ if (bfqq->ref)
++ return;
++
++ BUG_ON(rb_first(&bfqq->sort_list));
++ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
++ BUG_ON(bfqq->entity.tree);
++ BUG_ON(bfq_bfqq_busy(bfqq));
++
++ if (bfq_bfqq_sync(bfqq))
++ /*
++ * The fact that this queue is being destroyed does not
++ * invalidate the fact that this queue may have been
++ * activated during the current burst. As a consequence,
++ * although the queue does not exist anymore, and hence
++ * needs to be removed from the burst list if there,
++ * the burst size has not to be decremented.
++ */
++ hlist_del_init(&bfqq->burst_list_node);
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
++
++ kmem_cache_free(bfq_pool, bfqq);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ bfqg_put(bfqg);
++#endif
++}
++
++static void bfq_put_cooperator(struct bfq_queue *bfqq)
++{
++ struct bfq_queue *__bfqq, *next;
++
++ /*
++ * If this queue was scheduled to merge with another queue, be
++ * sure to drop the reference taken on that queue (and others in
++ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
++ */
++ __bfqq = bfqq->new_bfqq;
++ while (__bfqq) {
++ if (__bfqq == bfqq)
++ break;
++ next = __bfqq->new_bfqq;
++ bfq_put_queue(__bfqq);
++ __bfqq = next;
++ }
++}
++
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ if (bfqq == bfqd->in_service_queue) {
++ __bfq_bfqq_expire(bfqd, bfqq);
++ bfq_schedule_dispatch(bfqd);
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq); /* release process reference */
++}
++
++static void bfq_init_icq(struct io_cq *icq)
++{
++ icq_to_bic(icq)->ttime.last_end_request = ktime_get_ns() - (1ULL<<32);
++}
++
++static void bfq_exit_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++
++ if (bic_to_bfqq(bic, false)) {
++ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, false));
++ bic_set_bfqq(bic, NULL, false);
++ }
++
++ if (bic_to_bfqq(bic, true)) {
++ /*
++ * If the bic is using a shared queue, put the reference
++ * taken on the io_context when the bic started using a
++ * shared bfq_queue.
++ */
++ if (bfq_bfqq_coop(bic_to_bfqq(bic, true)))
++ put_io_context(icq->ioc);
++ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, true));
++ bic_set_bfqq(bic, NULL, true);
++ }
++}
++
++/*
++ * Update the entity prio values; note that the new values will not
++ * be used until the next (re)activation.
++ */
++static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
++{
++ struct task_struct *tsk = current;
++ int ioprio_class;
++
++ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ switch (ioprio_class) {
++ default:
++ dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
++ "bfq: bad prio class %d\n", ioprio_class);
++ case IOPRIO_CLASS_NONE:
++ /*
++ * No prio set, inherit CPU scheduling settings.
++ */
++ bfqq->new_ioprio = task_nice_ioprio(tsk);
++ bfqq->new_ioprio_class = task_nice_ioclass(tsk);
++ break;
++ case IOPRIO_CLASS_RT:
++ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
++ break;
++ case IOPRIO_CLASS_BE:
++ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
++ break;
++ case IOPRIO_CLASS_IDLE:
++ bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
++ bfqq->new_ioprio = 7;
++ bfq_clear_bfqq_idle_window(bfqq);
++ break;
++ }
++
++ if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
++ pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
++ bfqq->new_ioprio);
++ BUG();
++ }
++
++ bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "set_next_ioprio_data: bic_class %d prio %d class %d",
++ ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class);
++}
++
++static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ struct bfq_queue *bfqq;
++ unsigned long uninitialized_var(flags);
++ int ioprio = bic->icq.ioc->ioprio;
++
++ /*
++ * This condition may trigger on a newly created bic, be sure to
++ * drop the lock before returning.
++ */
++ if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
++ return;
++
++ bic->ioprio = ioprio;
++
++ bfqq = bic_to_bfqq(bic, false);
++ if (bfqq) {
++ /* release process reference on this queue */
++ bfq_put_queue(bfqq);
++ bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
++ bic_set_bfqq(bic, bfqq, false);
++ bfq_log_bfqq(bfqd, bfqq,
++ "check_ioprio_change: bfqq %p %d",
++ bfqq, bfqq->ref);
++ }
++
++ bfqq = bic_to_bfqq(bic, true);
++ if (bfqq)
++ bfq_set_next_ioprio_data(bfqq, bic);
++}
++
++static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic, pid_t pid, int is_sync)
++{
++ RB_CLEAR_NODE(&bfqq->entity.rb_node);
++ INIT_LIST_HEAD(&bfqq->fifo);
++ INIT_HLIST_NODE(&bfqq->burst_list_node);
++ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
++
++ bfqq->ref = 0;
++ bfqq->bfqd = bfqd;
++
++ if (bic)
++ bfq_set_next_ioprio_data(bfqq, bic);
++
++ if (is_sync) {
++ if (!bfq_class_idle(bfqq))
++ bfq_mark_bfqq_idle_window(bfqq);
++ bfq_mark_bfqq_sync(bfqq);
++ bfq_mark_bfqq_just_created(bfqq);
++ } else
++ bfq_clear_bfqq_sync(bfqq);
++ bfq_mark_bfqq_IO_bound(bfqq);
++
++ /* Tentative initial value to trade off between thr and lat */
++ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
++ bfqq->pid = pid;
++
++ bfqq->wr_coeff = 1;
++ bfqq->last_wr_start_finish = jiffies;
++ bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
++ bfqq->budget_timeout = bfq_smallest_from_now();
++ bfqq->split_time = bfq_smallest_from_now();
++
++ /*
++ * Set to the value for which bfqq will not be deemed as
++ * soft rt when it becomes backlogged.
++ */
++ bfqq->soft_rt_next_start = bfq_greatest_from_now();
++
++ /* first request is almost certainly seeky */
++ bfqq->seek_history = 1;
++}
++
++static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ int ioprio_class, int ioprio)
++{
++ switch (ioprio_class) {
++ case IOPRIO_CLASS_RT:
++ return &bfqg->async_bfqq[0][ioprio];
++ case IOPRIO_CLASS_NONE:
++ ioprio = IOPRIO_NORM;
++ /* fall through */
++ case IOPRIO_CLASS_BE:
++ return &bfqg->async_bfqq[1][ioprio];
++ case IOPRIO_CLASS_IDLE:
++ return &bfqg->async_idle_bfqq;
++ default:
++ BUG();
++ }
++}
++
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bio *bio, bool is_sync,
++ struct bfq_io_cq *bic)
++{
++ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ struct bfq_queue **async_bfqq = NULL;
++ struct bfq_queue *bfqq;
++ struct bfq_group *bfqg;
++
++ rcu_read_lock();
++
++ bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
++ if (!bfqg) {
++ bfqq = &bfqd->oom_bfqq;
++ goto out;
++ }
++
++ if (!is_sync) {
++ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
++ ioprio);
++ bfqq = *async_bfqq;
++ if (bfqq)
++ goto out;
++ }
++
++ bfqq = kmem_cache_alloc_node(bfq_pool,
++ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
++ bfqd->queue->node);
++
++ if (bfqq) {
++ bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
++ is_sync);
++ bfq_init_entity(&bfqq->entity, bfqg);
++ bfq_log_bfqq(bfqd, bfqq, "allocated");
++ } else {
++ bfqq = &bfqd->oom_bfqq;
++ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
++ goto out;
++ }
++
++ /*
++ * Pin the queue now that it's allocated, scheduler exit will
++ * prune it.
++ */
++ if (async_bfqq) {
++ bfqq->ref++; /*
++ * Extra group reference, w.r.t. sync
++ * queue. This extra reference is removed
++ * only if bfqq->bfqg disappears, to
++ * guarantee that this queue is not freed
++ * until its group goes away.
++ */
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++ bfqq, bfqq->ref);
++ *async_bfqq = bfqq;
++ }
++
++out:
++ bfqq->ref++; /* get a process reference to this queue */
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
++ rcu_read_unlock();
++ return bfqq;
++}
++
++static void bfq_update_io_thinktime(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic)
++{
++ struct bfq_ttime *ttime = &bic->ttime;
++ u64 elapsed = ktime_get_ns() - bic->ttime.last_end_request;
++
++ elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle);
++
++ ttime->ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
++ ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
++ ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
++ ttime->ttime_samples);
++}
++
++static void
++bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ bfqq->seek_history <<= 1;
++ bfqq->seek_history |=
++ get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
++ (!blk_queue_nonrot(bfqd->queue) ||
++ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
++}
++
++/*
++ * Disable idle window if the process thinks too long or seeks so much that
++ * it doesn't matter.
++ */
++static void bfq_update_idle_window(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
++{
++ int enable_idle;
++
++ /* Don't idle for async or idle io prio class. */
++ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
++ return;
++
++ /* Idle window just restored, statistics are meaningless. */
++ if (time_is_after_eq_jiffies(bfqq->split_time +
++ bfqd->bfq_wr_min_idle_time))
++ return;
++
++ enable_idle = bfq_bfqq_idle_window(bfqq);
++
++ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
++ bfqd->bfq_slice_idle == 0 ||
++ (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
++ bfqq->wr_coeff == 1))
++ enable_idle = 0;
++ else if (bfq_sample_valid(bic->ttime.ttime_samples)) {
++ if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
++ bfqq->wr_coeff == 1)
++ enable_idle = 0;
++ else
++ enable_idle = 1;
++ }
++ bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
++ enable_idle);
++
++ if (enable_idle)
++ bfq_mark_bfqq_idle_window(bfqq);
++ else
++ bfq_clear_bfqq_idle_window(bfqq);
++}
++
++/*
++ * Called when a new fs request (rq) is added to bfqq. Check if there's
++ * something we should do about it.
++ */
++static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ struct bfq_io_cq *bic = RQ_BIC(rq);
++
++ if (rq->cmd_flags & REQ_META)
++ bfqq->meta_pending++;
++
++ bfq_update_io_thinktime(bfqd, bic);
++ bfq_update_io_seektime(bfqd, bfqq, rq);
++ if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
++ !BFQQ_SEEKY(bfqq))
++ bfq_update_idle_window(bfqd, bfqq, bic);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "rq_enqueued: idle_window=%d (seeky %d)",
++ bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq));
++
++ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
++
++ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
++ bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
++ blk_rq_sectors(rq) < 32;
++ bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
++
++ /*
++ * There is just this request queued: if the request
++ * is small and the queue is not to be expired, then
++ * just exit.
++ *
++ * In this way, if the device is being idled to wait
++ * for a new request from the in-service queue, we
++ * avoid unplugging the device and committing the
++ * device to serve just a small request. On the
++ * contrary, we wait for the block layer to decide
++ * when to unplug the device: hopefully, new requests
++ * will be merged to this one quickly, then the device
++ * will be unplugged and larger requests will be
++ * dispatched.
++ */
++ if (small_req && !budget_timeout)
++ return;
++
++ /*
++ * A large enough request arrived, or the queue is to
++ * be expired: in both cases disk idling is to be
++ * stopped, so clear wait_request flag and reset
++ * timer.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
++ bfqg_stats_update_idle_time(bfqq_group(bfqq));
++
++ /*
++ * The queue is not empty, because a new request just
++ * arrived. Hence we can safely expire the queue, in
++ * case of budget timeout, without risking that the
++ * timestamps of the queue are not updated correctly.
++ * See [1] for more details.
++ */
++ if (budget_timeout)
++ bfq_bfqq_expire(bfqd, bfqq, false,
++ BFQ_BFQQ_BUDGET_TIMEOUT);
++
++ /*
++ * Let the request rip immediately, or let a new queue be
++ * selected if bfqq has just been expired.
++ */
++ __blk_run_queue(bfqd->queue);
++ }
++}
++
++static void bfq_insert_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++
++ /*
++ * An unplug may trigger a requeue of a request from the device
++ * driver: make sure we are in process context while trying to
++ * merge two bfq_queues.
++ */
++ if (!in_interrupt()) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
++ if (new_bfqq) {
++ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
++ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
++ /*
++ * Release the request's reference to the old bfqq
++ * and make sure one is taken to the shared queue.
++ */
++ new_bfqq->allocated[rq_data_dir(rq)]++;
++ bfqq->allocated[rq_data_dir(rq)]--;
++ new_bfqq->ref++;
++ bfq_clear_bfqq_just_created(bfqq);
++ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
++ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
++ bfqq, new_bfqq);
++ /*
++ * rq is about to be enqueued into new_bfqq,
++ * release rq reference on bfqq
++ */
++ bfq_put_queue(bfqq);
++ rq->elv.priv[1] = new_bfqq;
++ bfqq = new_bfqq;
++ }
++ }
++
++ bfq_add_request(rq);
++
++ rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
++ list_add_tail(&rq->queuelist, &bfqq->fifo);
++
++ bfq_rq_enqueued(bfqd, bfqq, rq);
++}
++
++static void bfq_update_hw_tag(struct bfq_data *bfqd)
++{
++ bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
++ bfqd->rq_in_driver);
++
++ if (bfqd->hw_tag == 1)
++ return;
++
++ /*
++ * This sample is valid if the number of outstanding requests
++ * is large enough to allow a queueing behavior. Note that the
++ * sum is not exact, as it's not taking into account deactivated
++ * requests.
++ */
++ if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
++ return;
++
++ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
++ return;
++
++ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
++ bfqd->max_rq_in_driver = 0;
++ bfqd->hw_tag_samples = 0;
++}
++
++static void bfq_completed_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ u64 now_ns;
++ u32 delta_us;
++
++ bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left",
++ blk_rq_sectors(rq));
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++ bfq_update_hw_tag(bfqd);
++
++ BUG_ON(!bfqd->rq_in_driver);
++ BUG_ON(!bfqq->dispatched);
++ bfqd->rq_in_driver--;
++ bfqq->dispatched--;
++ bfqg_stats_update_completion(bfqq_group(bfqq),
++ rq_start_time_ns(rq),
++ rq_io_start_time_ns(rq),
++ rq->cmd_flags);
++
++ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++ /*
++ * Set budget_timeout (which we overload to store the
++ * time at which the queue remains with no backlog and
++ * no outstanding request; used by the weight-raising
++ * mechanism).
++ */
++ bfqq->budget_timeout = jiffies;
++
++ bfq_weights_tree_remove(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++ }
++
++ now_ns = ktime_get_ns();
++
++ RQ_BIC(rq)->ttime.last_end_request = now_ns;
++
++ /*
++ * Using us instead of ns, to get a reasonable precision in
++ * computing rate in next check.
++ */
++ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
++
++ bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
++ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
++ (USEC_PER_SEC*
++ (u64)((bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us))
++ >>BFQ_RATE_SHIFT,
++ (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT);
++
++ /*
++ * If the request took rather long to complete, and, according
++ * to the maximum request size recorded, this completion latency
++ * implies that the request was certainly served at a very low
++ * rate (less than 1M sectors/sec), then the whole observation
++ * interval that lasts up to this time instant cannot be a
++ * valid time interval for computing a new peak rate. Invoke
++ * bfq_update_rate_reset to have the following three steps
++ * taken:
++ * - close the observation interval at the last (previous)
++ * request dispatch or completion
++ * - compute rate, if possible, for that observation interval
++ * - reset to zero samples, which will trigger a proper
++ * re-initialization of the observation interval on next
++ * dispatch
++ */
++ if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
++ (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
++ 1UL<<(BFQ_RATE_SHIFT - 10))
++ bfq_update_rate_reset(bfqd, NULL);
++ bfqd->last_completion = now_ns;
++
++ /*
++ * If we are waiting to discover whether the request pattern
++ * of the task associated with the queue is actually
++ * isochronous, and both requisites for this condition to hold
++ * are now satisfied, then compute soft_rt_next_start (see the
++ * comments on the function bfq_bfqq_softrt_next_start()). We
++ * schedule this delayed check when bfqq expires, if it still
++ * has in-flight requests.
++ */
++ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list))
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++
++ /*
++ * If this is the in-service queue, check if it needs to be expired,
++ * or if we want to idle in case it has no pending requests.
++ */
++ if (bfqd->in_service_queue == bfqq) {
++ if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
++ bfq_arm_slice_timer(bfqd);
++ goto out;
++ } else if (bfq_may_expire_for_budg_timeout(bfqq))
++ bfq_bfqq_expire(bfqd, bfqq, false,
++ BFQ_BFQQ_BUDGET_TIMEOUT);
++ else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
++ (bfqq->dispatched == 0 ||
++ !bfq_bfqq_may_idle(bfqq)))
++ bfq_bfqq_expire(bfqd, bfqq, false,
++ BFQ_BFQQ_NO_MORE_REQUESTS);
++ }
++
++ if (!bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++
++out:
++ return;
++}
++
++static int __bfq_may_queue(struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
++ bfq_clear_bfqq_must_alloc(bfqq);
++ return ELV_MQUEUE_MUST;
++ }
++
++ return ELV_MQUEUE_MAY;
++}
++
++static int bfq_may_queue(struct request_queue *q, unsigned int op)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ /*
++ * Don't force setup of a queue from here, as a call to may_queue
++ * does not necessarily imply that a request actually will be
++ * queued. So just lookup a possibly existing queue, or return
++ * 'may queue' if that fails.
++ */
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (!bic)
++ return ELV_MQUEUE_MAY;
++
++ bfqq = bic_to_bfqq(bic, op_is_sync(op));
++ if (bfqq)
++ return __bfq_may_queue(bfqq);
++
++ return ELV_MQUEUE_MAY;
++}
++
++/*
++ * Queue lock held here.
++ */
++static void bfq_put_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ if (bfqq) {
++ const int rw = rq_data_dir(rq);
++
++ BUG_ON(!bfqq->allocated[rw]);
++ bfqq->allocated[rw]--;
++
++ rq->elv.priv[0] = NULL;
++ rq->elv.priv[1] = NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
++ bfqq, bfqq->ref);
++ bfq_put_queue(bfqq);
++ }
++}
++
++/*
++ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
++ * was the last process referring to that bfqq.
++ */
++static struct bfq_queue *
++bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
++
++ put_io_context(bic->icq.ioc);
++
++ if (bfqq_process_refs(bfqq) == 1) {
++ bfqq->pid = current->pid;
++ bfq_clear_bfqq_coop(bfqq);
++ bfq_clear_bfqq_split_coop(bfqq);
++ return bfqq;
++ }
++
++ bic_set_bfqq(bic, NULL, 1);
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq);
++ return NULL;
++}
++
++/*
++ * Allocate bfq data structures associated with this request.
++ */
++static int bfq_set_request(struct request_queue *q, struct request *rq,
++ struct bio *bio, gfp_t gfp_mask)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
++ const int rw = rq_data_dir(rq);
++ const int is_sync = rq_is_sync(rq);
++ struct bfq_queue *bfqq;
++ unsigned long flags;
++ bool bfqq_already_existing = false, split = false;
++
++ spin_lock_irqsave(q->queue_lock, flags);
++
++ if (!bic)
++ goto queue_fail;
++
++ bfq_check_ioprio_change(bic, bio);
++
++ bfq_bic_update_cgroup(bic, bio);
++
++new_queue:
++ bfqq = bic_to_bfqq(bic, is_sync);
++ if (!bfqq || bfqq == &bfqd->oom_bfqq) {
++ if (bfqq)
++ bfq_put_queue(bfqq);
++ bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
++ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
++
++ bic_set_bfqq(bic, bfqq, is_sync);
++ if (split && is_sync) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_request: was_in_list %d "
++ "was_in_large_burst %d "
++ "large burst in progress %d",
++ bic->was_in_burst_list,
++ bic->saved_in_large_burst,
++ bfqd->large_burst);
++
++ if ((bic->was_in_burst_list && bfqd->large_burst) ||
++ bic->saved_in_large_burst) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_request: marking in "
++ "large burst");
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ } else {
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_request: clearing in "
++ "large burst");
++ bfq_clear_bfqq_in_large_burst(bfqq);
++ if (bic->was_in_burst_list)
++ hlist_add_head(&bfqq->burst_list_node,
++ &bfqd->burst_list);
++ }
++ bfqq->split_time = jiffies;
++ }
++ } else {
++ /* If the queue was seeky for too long, break it apart. */
++ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
++ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
++
++ /* Update bic before losing reference to bfqq */
++ if (bfq_bfqq_in_large_burst(bfqq))
++ bic->saved_in_large_burst = true;
++
++ bfqq = bfq_split_bfqq(bic, bfqq);
++ split = true;
++ if (!bfqq)
++ goto new_queue;
++ else
++ bfqq_already_existing = true;
++ }
++ }
++
++ bfqq->allocated[rw]++;
++ bfqq->ref++;
++ bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref);
++
++ rq->elv.priv[0] = bic;
++ rq->elv.priv[1] = bfqq;
++
++ /*
++ * If a bfq_queue has only one process reference, it is owned
++ * by only one bfq_io_cq: we can set the bic field of the
++ * bfq_queue to the address of that structure. Also, if the
++ * queue has just been split, mark a flag so that the
++ * information is available to the other scheduler hooks.
++ */
++ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
++ bfqq->bic = bic;
++ if (split) {
++ /*
++ * If the queue has just been split from a shared
++ * queue, restore the idle window and the possible
++ * weight raising period.
++ */
++ bfq_bfqq_resume_state(bfqq, bfqd, bic,
++ bfqq_already_existing);
++ }
++ }
++
++ if (unlikely(bfq_bfqq_just_created(bfqq)))
++ bfq_handle_burst(bfqd, bfqq);
++
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 0;
++
++queue_fail:
++ bfq_schedule_dispatch(bfqd);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 1;
++}
++
++static void bfq_kick_queue(struct work_struct *work)
++{
++ struct bfq_data *bfqd =
++ container_of(work, struct bfq_data, unplug_work);
++ struct request_queue *q = bfqd->queue;
++
++ spin_lock_irq(q->queue_lock);
++ __blk_run_queue(q);
++ spin_unlock_irq(q->queue_lock);
++}
++
++/*
++ * Handler of the expiration of the timer running if the in-service queue
++ * is idling inside its time slice.
++ */
++static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
++{
++ struct bfq_data *bfqd = container_of(timer, struct bfq_data,
++ idle_slice_timer);
++ struct bfq_queue *bfqq;
++ unsigned long flags;
++ enum bfqq_expiration reason;
++
++ spin_lock_irqsave(bfqd->queue->queue_lock, flags);
++
++ bfqq = bfqd->in_service_queue;
++ /*
++ * Theoretical race here: the in-service queue can be NULL or
++ * different from the queue that was idling if the timer handler
++ * spins on the queue_lock and a new request arrives for the
++ * current queue and there is a full dispatch cycle that changes
++ * the in-service queue. This can hardly happen, but in the worst
++ * case we just expire a queue too early.
++ */
++ if (bfqq) {
++ bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
++ bfq_clear_bfqq_wait_request(bfqq);
++
++ if (bfq_bfqq_budget_timeout(bfqq))
++ /*
++ * Also here the queue can be safely expired
++ * for budget timeout without wasting
++ * guarantees
++ */
++ reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
++ /*
++ * The queue may not be empty upon timer expiration,
++ * because we may not disable the timer when the
++ * first request of the in-service queue arrives
++ * during disk idling.
++ */
++ reason = BFQ_BFQQ_TOO_IDLE;
++ else
++ goto schedule_dispatch;
++
++ bfq_bfqq_expire(bfqd, bfqq, true, reason);
++ }
++
++schedule_dispatch:
++ bfq_schedule_dispatch(bfqd);
++
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
++ return HRTIMER_NORESTART;
++}
++
++static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
++{
++ hrtimer_cancel(&bfqd->idle_slice_timer);
++ cancel_work_sync(&bfqd->unplug_work);
++}
++
++static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
++ struct bfq_queue **bfqq_ptr)
++{
++ struct bfq_group *root_group = bfqd->root_group;
++ struct bfq_queue *bfqq = *bfqq_ptr;
++
++ bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++ if (bfqq) {
++ bfq_bfqq_move(bfqd, bfqq, root_group);
++ bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++ bfqq, bfqq->ref);
++ bfq_put_queue(bfqq);
++ *bfqq_ptr = NULL;
++ }
++}
++
++/*
++ * Release all the bfqg references to its async queues. If we are
++ * deallocating the group these queues may still contain requests, so
++ * we reparent them to the root cgroup (i.e., the only one that will
++ * exist for sure until all the requests on a device are gone).
++ */
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
++
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
++}
++
++static void bfq_exit_queue(struct elevator_queue *e)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ struct request_queue *q = bfqd->queue;
++ struct bfq_queue *bfqq, *n;
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ spin_lock_irq(q->queue_lock);
++
++ BUG_ON(bfqd->in_service_queue);
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
++ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
++
++ spin_unlock_irq(q->queue_lock);
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
++#else
++ bfq_put_async_queues(bfqd, bfqd->root_group);
++ kfree(bfqd->root_group);
++#endif
++
++ kfree(bfqd);
++}
++
++static void bfq_init_root_group(struct bfq_group *root_group,
++ struct bfq_data *bfqd)
++{
++ int i;
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ root_group->entity.parent = NULL;
++ root_group->my_entity = NULL;
++ root_group->bfqd = bfqd;
++#endif
++ root_group->rq_pos_tree = RB_ROOT;
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++ root_group->sched_data.bfq_class_idle_last_service = jiffies;
++}
++
++static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
++{
++ struct bfq_data *bfqd;
++ struct elevator_queue *eq;
++
++ eq = elevator_alloc(q, e);
++ if (!eq)
++ return -ENOMEM;
++
++ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
++ if (!bfqd) {
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++ }
++ eq->elevator_data = bfqd;
++
++ /*
++ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
++ * Grab a permanent reference to it, so that the normal code flow
++ * will not attempt to free it.
++ */
++ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
++ bfqd->oom_bfqq.ref++;
++ bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
++ bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
++ bfqd->oom_bfqq.entity.new_weight =
++ bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
++
++ /* oom_bfqq does not participate to bursts */
++ bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
++ /*
++ * Trigger weight initialization, according to ioprio, at the
++ * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
++ * class won't be changed any more.
++ */
++ bfqd->oom_bfqq.entity.prio_changed = 1;
++
++ bfqd->queue = q;
++
++ spin_lock_irq(q->queue_lock);
++ q->elevator = eq;
++ spin_unlock_irq(q->queue_lock);
++
++ bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
++ if (!bfqd->root_group)
++ goto out_free;
++ bfq_init_root_group(bfqd->root_group, bfqd);
++ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
++
++ hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
++ HRTIMER_MODE_REL);
++ bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
++
++ bfqd->queue_weights_tree = RB_ROOT;
++ bfqd->group_weights_tree = RB_ROOT;
++
++ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
++
++ INIT_LIST_HEAD(&bfqd->active_list);
++ INIT_LIST_HEAD(&bfqd->idle_list);
++ INIT_HLIST_HEAD(&bfqd->burst_list);
++
++ bfqd->hw_tag = -1;
++
++ bfqd->bfq_max_budget = bfq_default_max_budget;
++
++ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
++ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
++ bfqd->bfq_back_max = bfq_back_max;
++ bfqd->bfq_back_penalty = bfq_back_penalty;
++ bfqd->bfq_slice_idle = bfq_slice_idle;
++ bfqd->bfq_timeout = bfq_timeout;
++
++ bfqd->bfq_requests_within_timer = 120;
++
++ bfqd->bfq_large_burst_thresh = 8;
++ bfqd->bfq_burst_interval = msecs_to_jiffies(180);
++
++ bfqd->low_latency = true;
++
++ /*
++ * Trade-off between responsiveness and fairness.
++ */
++ bfqd->bfq_wr_coeff = 30;
++ bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
++ bfqd->bfq_wr_max_time = 0;
++ bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
++ bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
++ bfqd->bfq_wr_max_softrt_rate = 7000; /*
++ * Approximate rate required
++ * to playback or record a
++ * high-definition compressed
++ * video.
++ */
++ bfqd->wr_busy_queues = 0;
++
++ /*
++ * Begin by assuming, optimistically, that the device is a
++ * high-speed one, and that its peak rate is equal to 2/3 of
++ * the highest reference rate.
++ */
++ bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
++ T_fast[blk_queue_nonrot(bfqd->queue)];
++ bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
++ bfqd->device_speed = BFQ_BFQD_FAST;
++
++ return 0;
++
++out_free:
++ kfree(bfqd);
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++}
++
++static void bfq_slab_kill(void)
++{
++ kmem_cache_destroy(bfq_pool);
++}
++
++static int __init bfq_slab_setup(void)
++{
++ bfq_pool = KMEM_CACHE(bfq_queue, 0);
++ if (!bfq_pool)
++ return -ENOMEM;
++ return 0;
++}
++
++static ssize_t bfq_var_show(unsigned int var, char *page)
++{
++ return sprintf(page, "%u\n", var);
++}
++
++static ssize_t bfq_var_store(unsigned long *var, const char *page,
++ size_t count)
++{
++ unsigned long new_val;
++ int ret = kstrtoul(page, 10, &new_val);
++
++ if (ret == 0)
++ *var = new_val;
++
++ return count;
++}
++
++static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++
++ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
++ jiffies_to_msecs(bfqd->bfq_wr_max_time) :
++ jiffies_to_msecs(bfq_wr_duration(bfqd)));
++}
++
++static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_queue *bfqq;
++ struct bfq_data *bfqd = e->elevator_data;
++ ssize_t num_char = 0;
++
++ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
++ bfqd->queued);
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ num_char += sprintf(page + num_char, "Active:\n");
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, nr_queued %d %d, ",
++ bfqq->pid,
++ bfqq->entity.weight,
++ bfqq->queued[0],
++ bfqq->queued[1]);
++ num_char += sprintf(page + num_char,
++ "dur %d/%u\n",
++ jiffies_to_msecs(
++ jiffies -
++ bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ num_char += sprintf(page + num_char, "Idle:\n");
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, dur %d/%u\n",
++ bfqq->pid,
++ bfqq->entity.weight,
++ jiffies_to_msecs(jiffies -
++ bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++
++ return num_char;
++}
++
++#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
++static ssize_t __FUNC(struct elevator_queue *e, char *page) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ u64 __data = __VAR; \
++ if (__CONV == 1) \
++ __data = jiffies_to_msecs(__data); \
++ else if (__CONV == 2) \
++ __data = div_u64(__data, NSEC_PER_MSEC); \
++ return bfq_var_show(__data, (page)); \
++}
++SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
++SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
++SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
++SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
++SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
++SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
++SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
++SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
++SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
++SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0);
++SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1);
++SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1);
++SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async,
++ 1);
++SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0);
++#undef SHOW_FUNCTION
++
++#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
++static ssize_t __FUNC(struct elevator_queue *e, char *page) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ u64 __data = __VAR; \
++ __data = div_u64(__data, NSEC_PER_USEC); \
++ return bfq_var_show(__data, (page)); \
++}
++USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
++#undef USEC_SHOW_FUNCTION
++
++#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
++static ssize_t \
++__FUNC(struct elevator_queue *e, const char *page, size_t count) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned long uninitialized_var(__data); \
++ int ret = bfq_var_store(&__data, (page), count); \
++ if (__data < (MIN)) \
++ __data = (MIN); \
++ else if (__data > (MAX)) \
++ __data = (MAX); \
++ if (__CONV == 1) \
++ *(__PTR) = msecs_to_jiffies(__data); \
++ else if (__CONV == 2) \
++ *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
++ else \
++ *(__PTR) = __data; \
++ return ret; \
++}
++STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
++ INT_MAX, 2);
++STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
++ INT_MAX, 2);
++STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
++STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
++ INT_MAX, 0);
++STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
++STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX,
++ 1);
++STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_min_inter_arr_async_store,
++ &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0,
++ INT_MAX, 0);
++#undef STORE_FUNCTION
++
++#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
++static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned long uninitialized_var(__data); \
++ int ret = bfq_var_store(&__data, (page), count); \
++ if (__data < (MIN)) \
++ __data = (MIN); \
++ else if (__data > (MAX)) \
++ __data = (MAX); \
++ *(__PTR) = (u64)__data * NSEC_PER_USEC; \
++ return ret; \
++}
++USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
++ UINT_MAX);
++#undef USEC_STORE_FUNCTION
++
++/* do nothing for the moment */
++static ssize_t bfq_weights_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ return count;
++}
++
++static ssize_t bfq_max_budget_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data == 0)
++ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
++ else {
++ if (__data > INT_MAX)
++ __data = INT_MAX;
++ bfqd->bfq_max_budget = __data;
++ }
++
++ bfqd->bfq_user_max_budget = __data;
++
++ return ret;
++}
++
++/*
++ * Leaving this name to preserve name compatibility with cfq
++ * parameters, but this timeout is used for both sync and async.
++ */
++static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data < 1)
++ __data = 1;
++ else if (__data > INT_MAX)
++ __data = INT_MAX;
++
++ bfqd->bfq_timeout = msecs_to_jiffies(__data);
++ if (bfqd->bfq_user_max_budget == 0)
++ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
++
++ return ret;
++}
++
++static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data > 1)
++ __data = 1;
++ if (!bfqd->strict_guarantees && __data == 1
++ && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
++ bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
++
++ bfqd->strict_guarantees = __data;
++
++ return ret;
++}
++
++static ssize_t bfq_low_latency_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data > 1)
++ __data = 1;
++ if (__data == 0 && bfqd->low_latency != 0)
++ bfq_end_wr(bfqd);
++ bfqd->low_latency = __data;
++
++ return ret;
++}
++
++#define BFQ_ATTR(name) \
++ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
++
++static struct elv_fs_entry bfq_attrs[] = {
++ BFQ_ATTR(fifo_expire_sync),
++ BFQ_ATTR(fifo_expire_async),
++ BFQ_ATTR(back_seek_max),
++ BFQ_ATTR(back_seek_penalty),
++ BFQ_ATTR(slice_idle),
++ BFQ_ATTR(slice_idle_us),
++ BFQ_ATTR(max_budget),
++ BFQ_ATTR(timeout_sync),
++ BFQ_ATTR(strict_guarantees),
++ BFQ_ATTR(low_latency),
++ BFQ_ATTR(wr_coeff),
++ BFQ_ATTR(wr_max_time),
++ BFQ_ATTR(wr_rt_max_time),
++ BFQ_ATTR(wr_min_idle_time),
++ BFQ_ATTR(wr_min_inter_arr_async),
++ BFQ_ATTR(wr_max_softrt_rate),
++ BFQ_ATTR(weights),
++ __ATTR_NULL
++};
++
++static struct elevator_type iosched_bfq = {
++ .ops.sq = {
++ .elevator_merge_fn = bfq_merge,
++ .elevator_merged_fn = bfq_merged_request,
++ .elevator_merge_req_fn = bfq_merged_requests,
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ .elevator_bio_merged_fn = bfq_bio_merged,
++#endif
++ .elevator_allow_bio_merge_fn = bfq_allow_bio_merge,
++ .elevator_allow_rq_merge_fn = bfq_allow_rq_merge,
++ .elevator_dispatch_fn = bfq_dispatch_requests,
++ .elevator_add_req_fn = bfq_insert_request,
++ .elevator_activate_req_fn = bfq_activate_request,
++ .elevator_deactivate_req_fn = bfq_deactivate_request,
++ .elevator_completed_req_fn = bfq_completed_request,
++ .elevator_former_req_fn = elv_rb_former_request,
++ .elevator_latter_req_fn = elv_rb_latter_request,
++ .elevator_init_icq_fn = bfq_init_icq,
++ .elevator_exit_icq_fn = bfq_exit_icq,
++ .elevator_set_req_fn = bfq_set_request,
++ .elevator_put_req_fn = bfq_put_request,
++ .elevator_may_queue_fn = bfq_may_queue,
++ .elevator_init_fn = bfq_init_queue,
++ .elevator_exit_fn = bfq_exit_queue,
++ },
++ .icq_size = sizeof(struct bfq_io_cq),
++ .icq_align = __alignof__(struct bfq_io_cq),
++ .elevator_attrs = bfq_attrs,
++ .elevator_name = "bfq-sq",
++ .elevator_owner = THIS_MODULE,
++};
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static struct blkcg_policy blkcg_policy_bfq = {
++ .dfl_cftypes = bfq_blkg_files,
++ .legacy_cftypes = bfq_blkcg_legacy_files,
++
++ .cpd_alloc_fn = bfq_cpd_alloc,
++ .cpd_init_fn = bfq_cpd_init,
++ .cpd_bind_fn = bfq_cpd_init,
++ .cpd_free_fn = bfq_cpd_free,
++
++ .pd_alloc_fn = bfq_pd_alloc,
++ .pd_init_fn = bfq_pd_init,
++ .pd_offline_fn = bfq_pd_offline,
++ .pd_free_fn = bfq_pd_free,
++ .pd_reset_stats_fn = bfq_pd_reset_stats,
++};
++#endif
++
++static int __init bfq_init(void)
++{
++ int ret;
++ char msg[60] = "BFQ I/O-scheduler: v8r12";
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ ret = blkcg_policy_register(&blkcg_policy_bfq);
++ if (ret)
++ return ret;
++#endif
++
++ ret = -ENOMEM;
++ if (bfq_slab_setup())
++ goto err_pol_unreg;
++
++ /*
++ * Times to load large popular applications for the typical
++ * systems installed on the reference devices (see the
++ * comments before the definitions of the next two
++ * arrays). Actually, we use slightly slower values, as the
++ * estimated peak rate tends to be smaller than the actual
++ * peak rate. The reason for this last fact is that estimates
++ * are computed over much shorter time intervals than the long
++ * intervals typically used for benchmarking. Why? First, to
++ * adapt more quickly to variations. Second, because an I/O
++ * scheduler cannot rely on a peak-rate-evaluation workload to
++ * be run for a long time.
++ */
++ T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */
++ T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */
++ T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */
++ T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */
++
++ /*
++ * Thresholds that determine the switch between speed classes
++ * (see the comments before the definition of the array
++ * device_speed_thresh). These thresholds are biased towards
++ * transitions to the fast class. This is safer than the
++ * opposite bias. In fact, a wrong transition to the slow
++ * class results in short weight-raising periods, because the
++ * speed of the device then tends to be higher that the
++ * reference peak rate. On the opposite end, a wrong
++ * transition to the fast class tends to increase
++ * weight-raising periods, because of the opposite reason.
++ */
++ device_speed_thresh[0] = (4 * R_slow[0]) / 3;
++ device_speed_thresh[1] = (4 * R_slow[1]) / 3;
++
++ ret = elv_register(&iosched_bfq);
++ if (ret)
++ goto err_pol_unreg;
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ strcat(msg, " (with cgroups support)");
++#endif
++ pr_info("%s", msg);
++
++ return 0;
++
++err_pol_unreg:
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ blkcg_policy_unregister(&blkcg_policy_bfq);
++#endif
++ return ret;
++}
++
++static void __exit bfq_exit(void)
++{
++ elv_unregister(&iosched_bfq);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ blkcg_policy_unregister(&blkcg_policy_bfq);
++#endif
++ bfq_slab_kill();
++}
++
++module_init(bfq_init);
++module_exit(bfq_exit);
++
++MODULE_AUTHOR("Arianna Avanzini, Fabio Checconi, Paolo Valente");
++MODULE_LICENSE("GPL");
+diff --git a/block/bfq.h b/block/bfq.h
+new file mode 100644
+index 000000000000..f5751ea59d98
+--- /dev/null
++++ b/block/bfq.h
+@@ -0,0 +1,948 @@
++/*
++ * BFQ v8r12 for 4.11.0: data structures and common functions prototypes.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
++ */
++
++#ifndef _BFQ_H
++#define _BFQ_H
++
++#include <linux/blktrace_api.h>
++#include <linux/hrtimer.h>
++#include <linux/blk-cgroup.h>
++
++#define BFQ_IOPRIO_CLASSES 3
++#define BFQ_CL_IDLE_TIMEOUT (HZ/5)
++
++#define BFQ_MIN_WEIGHT 1
++#define BFQ_MAX_WEIGHT 1000
++#define BFQ_WEIGHT_CONVERSION_COEFF 10
++
++#define BFQ_DEFAULT_QUEUE_IOPRIO 4
++
++#define BFQ_WEIGHT_LEGACY_DFL 100
++#define BFQ_DEFAULT_GRP_IOPRIO 0
++#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
++
++/*
++ * Soft real-time applications are extremely more latency sensitive
++ * than interactive ones. Over-raise the weight of the former to
++ * privilege them against the latter.
++ */
++#define BFQ_SOFTRT_WEIGHT_FACTOR 100
++
++struct bfq_entity;
++
++/**
++ * struct bfq_service_tree - per ioprio_class service tree.
++ *
++ * Each service tree represents a B-WF2Q+ scheduler on its own. Each
++ * ioprio_class has its own independent scheduler, and so its own
++ * bfq_service_tree. All the fields are protected by the queue lock
++ * of the containing bfqd.
++ */
++struct bfq_service_tree {
++ /* tree for active entities (i.e., those backlogged) */
++ struct rb_root active;
++ /* tree for idle entities (i.e., not backlogged, with V <= F_i)*/
++ struct rb_root idle;
++
++ struct bfq_entity *first_idle; /* idle entity with minimum F_i */
++ struct bfq_entity *last_idle; /* idle entity with maximum F_i */
++
++ u64 vtime; /* scheduler virtual time */
++ /* scheduler weight sum; active and idle entities contribute to it */
++ unsigned long wsum;
++};
++
++/**
++ * struct bfq_sched_data - multi-class scheduler.
++ *
++ * bfq_sched_data is the basic scheduler queue. It supports three
++ * ioprio_classes, and can be used either as a toplevel queue or as an
++ * intermediate queue on a hierarchical setup. @next_in_service
++ * points to the active entity of the sched_data service trees that
++ * will be scheduled next. It is used to reduce the number of steps
++ * needed for each hierarchical-schedule update.
++ *
++ * The supported ioprio_classes are the same as in CFQ, in descending
++ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
++ * Requests from higher priority queues are served before all the
++ * requests from lower priority queues; among requests of the same
++ * queue requests are served according to B-WF2Q+.
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_sched_data {
++ struct bfq_entity *in_service_entity; /* entity in service */
++ /* head-of-the-line entity in the scheduler (see comments above) */
++ struct bfq_entity *next_in_service;
++ /* array of service trees, one per ioprio_class */
++ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
++ /* last time CLASS_IDLE was served */
++ unsigned long bfq_class_idle_last_service;
++
++};
++
++/**
++ * struct bfq_weight_counter - counter of the number of all active entities
++ * with a given weight.
++ */
++struct bfq_weight_counter {
++ unsigned int weight; /* weight of the entities this counter refers to */
++ unsigned int num_active; /* nr of active entities with this weight */
++ /*
++ * Weights tree member (see bfq_data's @queue_weights_tree and
++ * @group_weights_tree)
++ */
++ struct rb_node weights_node;
++};
++
++/**
++ * struct bfq_entity - schedulable entity.
++ *
++ * A bfq_entity is used to represent either a bfq_queue (leaf node in the
++ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each
++ * entity belongs to the sched_data of the parent group in the cgroup
++ * hierarchy. Non-leaf entities have also their own sched_data, stored
++ * in @my_sched_data.
++ *
++ * Each entity stores independently its priority values; this would
++ * allow different weights on different devices, but this
++ * functionality is not exported to userspace by now. Priorities and
++ * weights are updated lazily, first storing the new values into the
++ * new_* fields, then setting the @prio_changed flag. As soon as
++ * there is a transition in the entity state that allows the priority
++ * update to take place the effective and the requested priority
++ * values are synchronized.
++ *
++ * Unless cgroups are used, the weight value is calculated from the
++ * ioprio to export the same interface as CFQ. When dealing with
++ * ``well-behaved'' queues (i.e., queues that do not spend too much
++ * time to consume their budget and have true sequential behavior, and
++ * when there are no external factors breaking anticipation) the
++ * relative weights at each level of the cgroups hierarchy should be
++ * guaranteed. All the fields are protected by the queue lock of the
++ * containing bfqd.
++ */
++struct bfq_entity {
++ struct rb_node rb_node; /* service_tree member */
++ /* pointer to the weight counter associated with this entity */
++ struct bfq_weight_counter *weight_counter;
++
++ /*
++ * Flag, true if the entity is on a tree (either the active or
++ * the idle one of its service_tree) or is in service.
++ */
++ bool on_st;
++
++ u64 finish; /* B-WF2Q+ finish timestamp (aka F_i) */
++ u64 start; /* B-WF2Q+ start timestamp (aka S_i) */
++
++ /* tree the entity is enqueued into; %NULL if not on a tree */
++ struct rb_root *tree;
++
++ /*
++ * minimum start time of the (active) subtree rooted at this
++ * entity; used for O(log N) lookups into active trees
++ */
++ u64 min_start;
++
++ /* amount of service received during the last service slot */
++ int service;
++
++ /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */
++ int budget;
++
++ unsigned int weight; /* weight of the queue */
++ unsigned int new_weight; /* next weight if a change is in progress */
++
++ /* original weight, used to implement weight boosting */
++ unsigned int orig_weight;
++
++ /* parent entity, for hierarchical scheduling */
++ struct bfq_entity *parent;
++
++ /*
++ * For non-leaf nodes in the hierarchy, the associated
++ * scheduler queue, %NULL on leaf nodes.
++ */
++ struct bfq_sched_data *my_sched_data;
++ /* the scheduler queue this entity belongs to */
++ struct bfq_sched_data *sched_data;
++
++ /* flag, set to request a weight, ioprio or ioprio_class change */
++ int prio_changed;
++};
++
++struct bfq_group;
++
++/**
++ * struct bfq_queue - leaf schedulable entity.
++ *
++ * A bfq_queue is a leaf request queue; it can be associated with an
++ * io_context or more, if it is async or shared between cooperating
++ * processes. @cgroup holds a reference to the cgroup, to be sure that it
++ * does not disappear while a bfqq still references it (mostly to avoid
++ * races between request issuing and task migration followed by cgroup
++ * destruction).
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_queue {
++ /* reference counter */
++ int ref;
++ /* parent bfq_data */
++ struct bfq_data *bfqd;
++
++ /* current ioprio and ioprio class */
++ unsigned short ioprio, ioprio_class;
++ /* next ioprio and ioprio class if a change is in progress */
++ unsigned short new_ioprio, new_ioprio_class;
++
++ /*
++ * Shared bfq_queue if queue is cooperating with one or more
++ * other queues.
++ */
++ struct bfq_queue *new_bfqq;
++ /* request-position tree member (see bfq_group's @rq_pos_tree) */
++ struct rb_node pos_node;
++ /* request-position tree root (see bfq_group's @rq_pos_tree) */
++ struct rb_root *pos_root;
++
++ /* sorted list of pending requests */
++ struct rb_root sort_list;
++ /* if fifo isn't expired, next request to serve */
++ struct request *next_rq;
++ /* number of sync and async requests queued */
++ int queued[2];
++ /* number of sync and async requests currently allocated */
++ int allocated[2];
++ /* number of pending metadata requests */
++ int meta_pending;
++ /* fifo list of requests in sort_list */
++ struct list_head fifo;
++
++ /* entity representing this queue in the scheduler */
++ struct bfq_entity entity;
++
++ /* maximum budget allowed from the feedback mechanism */
++ int max_budget;
++ /* budget expiration (in jiffies) */
++ unsigned long budget_timeout;
++
++ /* number of requests on the dispatch list or inside driver */
++ int dispatched;
++
++ unsigned int flags; /* status flags.*/
++
++ /* node for active/idle bfqq list inside parent bfqd */
++ struct list_head bfqq_list;
++
++ /* bit vector: a 1 for each seeky requests in history */
++ u32 seek_history;
++
++ /* node for the device's burst list */
++ struct hlist_node burst_list_node;
++
++ /* position of the last request enqueued */
++ sector_t last_request_pos;
++
++ /* Number of consecutive pairs of request completion and
++ * arrival, such that the queue becomes idle after the
++ * completion, but the next request arrives within an idle
++ * time slice; used only if the queue's IO_bound flag has been
++ * cleared.
++ */
++ unsigned int requests_within_timer;
++
++ /* pid of the process owning the queue, used for logging purposes */
++ pid_t pid;
++
++ /*
++ * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL
++ * if the queue is shared.
++ */
++ struct bfq_io_cq *bic;
++
++ /* current maximum weight-raising time for this queue */
++ unsigned long wr_cur_max_time;
++ /*
++ * Minimum time instant such that, only if a new request is
++ * enqueued after this time instant in an idle @bfq_queue with
++ * no outstanding requests, then the task associated with the
++ * queue it is deemed as soft real-time (see the comments on
++ * the function bfq_bfqq_softrt_next_start())
++ */
++ unsigned long soft_rt_next_start;
++ /*
++ * Start time of the current weight-raising period if
++ * the @bfq-queue is being weight-raised, otherwise
++ * finish time of the last weight-raising period.
++ */
++ unsigned long last_wr_start_finish;
++ /* factor by which the weight of this queue is multiplied */
++ unsigned int wr_coeff;
++ /*
++ * Time of the last transition of the @bfq_queue from idle to
++ * backlogged.
++ */
++ unsigned long last_idle_bklogged;
++ /*
++ * Cumulative service received from the @bfq_queue since the
++ * last transition from idle to backlogged.
++ */
++ unsigned long service_from_backlogged;
++ /*
++ * Value of wr start time when switching to soft rt
++ */
++ unsigned long wr_start_at_switch_to_srt;
++
++ unsigned long split_time; /* time of last split */
++};
++
++/**
++ * struct bfq_ttime - per process thinktime stats.
++ */
++struct bfq_ttime {
++ u64 last_end_request; /* completion time of last request */
++
++ u64 ttime_total; /* total process thinktime */
++ unsigned long ttime_samples; /* number of thinktime samples */
++ u64 ttime_mean; /* average process thinktime */
++
++};
++
++/**
++ * struct bfq_io_cq - per (request_queue, io_context) structure.
++ */
++struct bfq_io_cq {
++ /* associated io_cq structure */
++ struct io_cq icq; /* must be the first member */
++ /* array of two process queues, the sync and the async */
++ struct bfq_queue *bfqq[2];
++ /* associated @bfq_ttime struct */
++ struct bfq_ttime ttime;
++ /* per (request_queue, blkcg) ioprio */
++ int ioprio;
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ uint64_t blkcg_serial_nr; /* the current blkcg serial */
++#endif
++
++ /*
++ * Snapshot of the idle window before merging; taken to
++ * remember this value while the queue is merged, so as to be
++ * able to restore it in case of split.
++ */
++ bool saved_idle_window;
++ /*
++ * Same purpose as the previous two fields for the I/O bound
++ * classification of a queue.
++ */
++ bool saved_IO_bound;
++
++ /*
++ * Same purpose as the previous fields for the value of the
++ * field keeping the queue's belonging to a large burst
++ */
++ bool saved_in_large_burst;
++ /*
++ * True if the queue belonged to a burst list before its merge
++ * with another cooperating queue.
++ */
++ bool was_in_burst_list;
++
++ /*
++ * Similar to previous fields: save wr information.
++ */
++ unsigned long saved_wr_coeff;
++ unsigned long saved_last_wr_start_finish;
++ unsigned long saved_wr_start_at_switch_to_srt;
++ unsigned int saved_wr_cur_max_time;
++};
++
++enum bfq_device_speed {
++ BFQ_BFQD_FAST,
++ BFQ_BFQD_SLOW,
++};
++
++/**
++ * struct bfq_data - per-device data structure.
++ *
++ * All the fields are protected by the @queue lock.
++ */
++struct bfq_data {
++ /* request queue for the device */
++ struct request_queue *queue;
++
++ /* root bfq_group for the device */
++ struct bfq_group *root_group;
++
++ /*
++ * rbtree of weight counters of @bfq_queues, sorted by
++ * weight. Used to keep track of whether all @bfq_queues have
++ * the same weight. The tree contains one counter for each
++ * distinct weight associated to some active and not
++ * weight-raised @bfq_queue (see the comments to the functions
++ * bfq_weights_tree_[add|remove] for further details).
++ */
++ struct rb_root queue_weights_tree;
++ /*
++ * rbtree of non-queue @bfq_entity weight counters, sorted by
++ * weight. Used to keep track of whether all @bfq_groups have
++ * the same weight. The tree contains one counter for each
++ * distinct weight associated to some active @bfq_group (see
++ * the comments to the functions bfq_weights_tree_[add|remove]
++ * for further details).
++ */
++ struct rb_root group_weights_tree;
++
++ /*
++ * Number of bfq_queues containing requests (including the
++ * queue in service, even if it is idling).
++ */
++ int busy_queues;
++ /* number of weight-raised busy @bfq_queues */
++ int wr_busy_queues;
++ /* number of queued requests */
++ int queued;
++ /* number of requests dispatched and waiting for completion */
++ int rq_in_driver;
++
++ /*
++ * Maximum number of requests in driver in the last
++ * @hw_tag_samples completed requests.
++ */
++ int max_rq_in_driver;
++ /* number of samples used to calculate hw_tag */
++ int hw_tag_samples;
++ /* flag set to one if the driver is showing a queueing behavior */
++ int hw_tag;
++
++ /* number of budgets assigned */
++ int budgets_assigned;
++
++ /*
++ * Timer set when idling (waiting) for the next request from
++ * the queue in service.
++ */
++ struct hrtimer idle_slice_timer;
++ /* delayed work to restart dispatching on the request queue */
++ struct work_struct unplug_work;
++
++ /* bfq_queue in service */
++ struct bfq_queue *in_service_queue;
++ /* bfq_io_cq (bic) associated with the @in_service_queue */
++ struct bfq_io_cq *in_service_bic;
++
++ /* on-disk position of the last served request */
++ sector_t last_position;
++
++ /* time of last request completion (ns) */
++ u64 last_completion;
++
++ /* time of first rq dispatch in current observation interval (ns) */
++ u64 first_dispatch;
++ /* time of last rq dispatch in current observation interval (ns) */
++ u64 last_dispatch;
++
++ /* beginning of the last budget */
++ ktime_t last_budget_start;
++ /* beginning of the last idle slice */
++ ktime_t last_idling_start;
++
++ /* number of samples in current observation interval */
++ int peak_rate_samples;
++ /* num of samples of seq dispatches in current observation interval */
++ u32 sequential_samples;
++ /* total num of sectors transferred in current observation interval */
++ u64 tot_sectors_dispatched;
++ /* max rq size seen during current observation interval (sectors) */
++ u32 last_rq_max_size;
++ /* time elapsed from first dispatch in current observ. interval (us) */
++ u64 delta_from_first;
++ /* current estimate of device peak rate */
++ u32 peak_rate;
++
++ /* maximum budget allotted to a bfq_queue before rescheduling */
++ int bfq_max_budget;
++
++ /* list of all the bfq_queues active on the device */
++ struct list_head active_list;
++ /* list of all the bfq_queues idle on the device */
++ struct list_head idle_list;
++
++ /*
++ * Timeout for async/sync requests; when it fires, requests
++ * are served in fifo order.
++ */
++ u64 bfq_fifo_expire[2];
++ /* weight of backward seeks wrt forward ones */
++ unsigned int bfq_back_penalty;
++ /* maximum allowed backward seek */
++ unsigned int bfq_back_max;
++ /* maximum idling time */
++ u32 bfq_slice_idle;
++
++ /* user-configured max budget value (0 for auto-tuning) */
++ int bfq_user_max_budget;
++ /*
++ * Timeout for bfq_queues to consume their budget; used to
++ * prevent seeky queues from imposing long latencies to
++ * sequential or quasi-sequential ones (this also implies that
++ * seeky queues cannot receive guarantees in the service
++ * domain; after a timeout they are charged for the time they
++ * have been in service, to preserve fairness among them, but
++ * without service-domain guarantees).
++ */
++ unsigned int bfq_timeout;
++
++ /*
++ * Number of consecutive requests that must be issued within
++ * the idle time slice to set again idling to a queue which
++ * was marked as non-I/O-bound (see the definition of the
++ * IO_bound flag for further details).
++ */
++ unsigned int bfq_requests_within_timer;
++
++ /*
++ * Force device idling whenever needed to provide accurate
++ * service guarantees, without caring about throughput
++ * issues. CAVEAT: this may even increase latencies, in case
++ * of useless idling for processes that did stop doing I/O.
++ */
++ bool strict_guarantees;
++
++ /*
++ * Last time at which a queue entered the current burst of
++ * queues being activated shortly after each other; for more
++ * details about this and the following parameters related to
++ * a burst of activations, see the comments on the function
++ * bfq_handle_burst.
++ */
++ unsigned long last_ins_in_burst;
++ /*
++ * Reference time interval used to decide whether a queue has
++ * been activated shortly after @last_ins_in_burst.
++ */
++ unsigned long bfq_burst_interval;
++ /* number of queues in the current burst of queue activations */
++ int burst_size;
++
++ /* common parent entity for the queues in the burst */
++ struct bfq_entity *burst_parent_entity;
++ /* Maximum burst size above which the current queue-activation
++ * burst is deemed as 'large'.
++ */
++ unsigned long bfq_large_burst_thresh;
++ /* true if a large queue-activation burst is in progress */
++ bool large_burst;
++ /*
++ * Head of the burst list (as for the above fields, more
++ * details in the comments on the function bfq_handle_burst).
++ */
++ struct hlist_head burst_list;
++
++ /* if set to true, low-latency heuristics are enabled */
++ bool low_latency;
++ /*
++ * Maximum factor by which the weight of a weight-raised queue
++ * is multiplied.
++ */
++ unsigned int bfq_wr_coeff;
++ /* maximum duration of a weight-raising period (jiffies) */
++ unsigned int bfq_wr_max_time;
++
++ /* Maximum weight-raising duration for soft real-time processes */
++ unsigned int bfq_wr_rt_max_time;
++ /*
++ * Minimum idle period after which weight-raising may be
++ * reactivated for a queue (in jiffies).
++ */
++ unsigned int bfq_wr_min_idle_time;
++ /*
++ * Minimum period between request arrivals after which
++ * weight-raising may be reactivated for an already busy async
++ * queue (in jiffies).
++ */
++ unsigned long bfq_wr_min_inter_arr_async;
++
++ /* Max service-rate for a soft real-time queue, in sectors/sec */
++ unsigned int bfq_wr_max_softrt_rate;
++ /*
++ * Cached value of the product R*T, used for computing the
++ * maximum duration of weight raising automatically.
++ */
++ u64 RT_prod;
++ /* device-speed class for the low-latency heuristic */
++ enum bfq_device_speed device_speed;
++
++ /* fallback dummy bfqq for extreme OOM conditions */
++ struct bfq_queue oom_bfqq;
++};
++
++enum bfqq_state_flags {
++ BFQ_BFQQ_FLAG_just_created = 0, /* queue just allocated */
++ BFQ_BFQQ_FLAG_busy, /* has requests or is in service */
++ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */
++ BFQ_BFQQ_FLAG_non_blocking_wait_rq, /*
++ * waiting for a request
++ * without idling the device
++ */
++ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
++ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
++ BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */
++ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
++ BFQ_BFQQ_FLAG_IO_bound, /*
++ * bfqq has timed-out at least once
++ * having consumed at most 2/10 of
++ * its budget
++ */
++ BFQ_BFQQ_FLAG_in_large_burst, /*
++ * bfqq activated in a large burst,
++ * see comments to bfq_handle_burst.
++ */
++ BFQ_BFQQ_FLAG_softrt_update, /*
++ * may need softrt-next-start
++ * update
++ */
++ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
++ BFQ_BFQQ_FLAG_split_coop /* shared bfqq will be split */
++};
++
++#define BFQ_BFQQ_FNS(name) \
++static void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
++{ \
++ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \
++}
++
++BFQ_BFQQ_FNS(just_created);
++BFQ_BFQQ_FNS(busy);
++BFQ_BFQQ_FNS(wait_request);
++BFQ_BFQQ_FNS(non_blocking_wait_rq);
++BFQ_BFQQ_FNS(must_alloc);
++BFQ_BFQQ_FNS(fifo_expire);
++BFQ_BFQQ_FNS(idle_window);
++BFQ_BFQQ_FNS(sync);
++BFQ_BFQQ_FNS(IO_bound);
++BFQ_BFQQ_FNS(in_large_burst);
++BFQ_BFQQ_FNS(coop);
++BFQ_BFQQ_FNS(split_coop);
++BFQ_BFQQ_FNS(softrt_update);
++#undef BFQ_BFQQ_FNS
++
++/* Logging facilities. */
++#ifdef CONFIG_BFQ_REDIRECT_TO_CONSOLE
++
++static const char *checked_dev_name(const struct device *dev)
++{
++ static const char nodev[] = "nodev";
++
++ if (dev)
++ return dev_name(dev);
++
++ return nodev;
++}
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
++static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ assert_spin_locked((bfqd)->queue->queue_lock); \
++ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
++ pr_crit("%s bfq%d%c %s " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ (bfqq)->pid, \
++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ __pbuf, ##args); \
++} while (0)
++
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
++ pr_crit("%s %s " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ __pbuf, ##args); \
++} while (0)
++
++#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++ pr_crit("%s bfq%d%c " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ ##args)
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
++
++#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++
++#define bfq_log(bfqd, fmt, args...) \
++ pr_crit("%s bfq " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ ##args)
++
++#else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
++static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ assert_spin_locked((bfqd)->queue->queue_lock); \
++ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \
++ (bfqq)->pid, \
++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ __pbuf, ##args); \
++} while (0)
++
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
++ blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
++} while (0)
++
++#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ ##args)
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
++
++#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++
++#define bfq_log(bfqd, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++#endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
++
++/* Expiration reasons. */
++enum bfqq_expiration {
++ BFQ_BFQQ_TOO_IDLE = 0, /*
++ * queue has been idling for
++ * too long
++ */
++ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */
++ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */
++ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */
++ BFQ_BFQQ_PREEMPTED /* preemption in progress */
++};
++
++
++struct bfqg_stats {
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ /* number of ios merged */
++ struct blkg_rwstat merged;
++ /* total time spent on device in ns, may not be accurate w/ queueing */
++ struct blkg_rwstat service_time;
++ /* total time spent waiting in scheduler queue in ns */
++ struct blkg_rwstat wait_time;
++ /* number of IOs queued up */
++ struct blkg_rwstat queued;
++ /* total disk time and nr sectors dispatched by this group */
++ struct blkg_stat time;
++ /* sum of number of ios queued across all samples */
++ struct blkg_stat avg_queue_size_sum;
++ /* count of samples taken for average */
++ struct blkg_stat avg_queue_size_samples;
++ /* how many times this group has been removed from service tree */
++ struct blkg_stat dequeue;
++ /* total time spent waiting for it to be assigned a timeslice. */
++ struct blkg_stat group_wait_time;
++ /* time spent idling for this blkcg_gq */
++ struct blkg_stat idle_time;
++ /* total time with empty current active q with other requests queued */
++ struct blkg_stat empty_time;
++ /* fields after this shouldn't be cleared on stat reset */
++ uint64_t start_group_wait_time;
++ uint64_t start_idle_time;
++ uint64_t start_empty_time;
++ uint16_t flags;
++#endif
++};
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++/*
++ * struct bfq_group_data - per-blkcg storage for the blkio subsystem.
++ *
++ * @ps: @blkcg_policy_storage that this structure inherits
++ * @weight: weight of the bfq_group
++ */
++struct bfq_group_data {
++ /* must be the first member */
++ struct blkcg_policy_data pd;
++
++ unsigned int weight;
++};
++
++/**
++ * struct bfq_group - per (device, cgroup) data structure.
++ * @entity: schedulable entity to insert into the parent group sched_data.
++ * @sched_data: own sched_data, to contain child entities (they may be
++ * both bfq_queues and bfq_groups).
++ * @bfqd: the bfq_data for the device this group acts upon.
++ * @async_bfqq: array of async queues for all the tasks belonging to
++ * the group, one queue per ioprio value per ioprio_class,
++ * except for the idle class that has only one queue.
++ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
++ * @my_entity: pointer to @entity, %NULL for the toplevel group; used
++ * to avoid too many special cases during group creation/
++ * migration.
++ * @active_entities: number of active entities belonging to the group;
++ * unused for the root group. Used to know whether there
++ * are groups with more than one active @bfq_entity
++ * (see the comments to the function
++ * bfq_bfqq_may_idle()).
++ * @rq_pos_tree: rbtree sorted by next_request position, used when
++ * determining if two or more queues have interleaving
++ * requests (see bfq_find_close_cooperator()).
++ *
++ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
++ * there is a set of bfq_groups, each one collecting the lower-level
++ * entities belonging to the group that are acting on the same device.
++ *
++ * Locking works as follows:
++ * o @bfqd is protected by the queue lock, RCU is used to access it
++ * from the readers.
++ * o All the other fields are protected by the @bfqd queue lock.
++ */
++struct bfq_group {
++ /* must be the first member */
++ struct blkg_policy_data pd;
++
++ struct bfq_entity entity;
++ struct bfq_sched_data sched_data;
++
++ void *bfqd;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++
++ struct bfq_entity *my_entity;
++
++ int active_entities;
++
++ struct rb_root rq_pos_tree;
++
++ struct bfqg_stats stats;
++};
++
++#else
++struct bfq_group {
++ struct bfq_sched_data sched_data;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++
++ struct rb_root rq_pos_tree;
++};
++#endif
++
++static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
++
++static unsigned int bfq_class_idx(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ return bfqq ? bfqq->ioprio_class - 1 :
++ BFQ_DEFAULT_GRP_CLASS - 1;
++}
++
++static struct bfq_service_tree *
++bfq_entity_service_tree(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sched_data = entity->sched_data;
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ unsigned int idx = bfq_class_idx(entity);
++
++ BUG_ON(idx >= BFQ_IOPRIO_CLASSES);
++ BUG_ON(sched_data == NULL);
++
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "entity_service_tree %p %d",
++ sched_data->service_tree + idx, idx);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "entity_service_tree %p %d",
++ sched_data->service_tree + idx, idx);
++ }
++#endif
++ return sched_data->service_tree + idx;
++}
++
++static struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
++{
++ return bic->bfqq[is_sync];
++}
++
++static void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq,
++ bool is_sync)
++{
++ bic->bfqq[is_sync] = bfqq;
++}
++
++static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
++{
++ return bic->icq.q->elevator->elevator_data;
++}
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++
++static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *group_entity = bfqq->entity.parent;
++
++ if (!group_entity)
++ group_entity = &bfqq->bfqd->root_group->entity;
++
++ return container_of(group_entity, struct bfq_group, entity);
++}
++
++#else
++
++static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
++{
++ return bfqq->bfqd->root_group;
++}
++
++#endif
++
++static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio);
++static void bfq_put_queue(struct bfq_queue *bfqq);
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bio *bio, bool is_sync,
++ struct bfq_io_cq *bic);
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
++#endif
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
++
++#endif /* _BFQ_H */
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 8da66379f7ea..bf000c58644b 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -54,7 +54,7 @@ struct blk_stat_callback;
+ * Maximum number of blkcg policies allowed to be registered concurrently.
+ * Defined here to simplify include dependency.
+ */
+-#define BLKCG_MAX_POLS 3
++#define BLKCG_MAX_POLS 4
+
+ typedef void (rq_end_io_fn)(struct request *, blk_status_t);
+
+
+From 9916fed6c89c61a2b26053be04501784570bbec8 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 20 Jul 2017 10:46:39 +0200
+Subject: [PATCH 02/51] Add extra checks related to entity scheduling
+
+- extra checks related to ioprioi-class changes
+- specific check on st->idle in __bfq_requeue_entity
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-sched.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index ac8991bca9fa..5ddf9af4261e 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -812,6 +812,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
+ }
+ #endif
+
++ BUG_ON(entity->tree && update_class_too);
+ BUG_ON(old_st->wsum < entity->weight);
+ old_st->wsum -= entity->weight;
+
+@@ -883,8 +884,10 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
+
+ new_st->wsum += entity->weight;
+
+- if (new_st != old_st)
++ if (new_st != old_st) {
++ BUG_ON(!update_class_too);
+ entity->start = new_st->vtime;
++ }
+ }
+
+ return new_st;
+@@ -993,6 +996,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ * tree, then it is safe to invoke next function with the last
+ * parameter set (see the comments on the function).
+ */
++ BUG_ON(entity->tree);
+ st = __bfq_entity_update_weight_prio(st, entity, true);
+ bfq_calc_finish(entity, entity->budget);
+
+@@ -1113,9 +1117,11 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
+ * check for that.
+ */
+ bfq_idle_extract(st, entity);
++ BUG_ON(entity->tree);
+ entity->start = bfq_gt(min_vstart, entity->finish) ?
+ min_vstart : entity->finish;
+ } else {
++ BUG_ON(entity->tree);
+ /*
+ * The finish time of the entity may be invalid, and
+ * it is in the past for sure, otherwise the queue
+@@ -1203,6 +1209,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
+ */
+ bfq_calc_finish(entity, entity->service);
+ entity->start = entity->finish;
++ BUG_ON(entity->tree && entity->tree == &st->idle);
+ BUG_ON(entity->tree && entity->tree != &st->active);
+ /*
+ * In addition, if the entity had more than one child
+
+From 8f5b2c25dcbe31dda524e85b921b3aa1fe11d111 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 21 Jul 2017 12:08:57 +0200
+Subject: [PATCH 03/51] block, bfq: reset in_service_entity if it becomes idle
+
+BFQ implements hierarchical scheduling by representing each group of
+queues with a generic parent entity. For each parent entity, BFQ
+maintains an in_service_entity pointer: if one of the child entities
+happens to be in service, in_service_entity points to it. The
+resetting of these pointers happens only on queue expirations: when
+the in-service queue is expired, i.e., stops to be the queue in
+service, BFQ resets all in_service_entity pointers along the
+parent-entity path from this queue to the root entity.
+
+Functions handling the scheduling of entities assume, naturally, that
+in-service entities are active, i.e., have pending I/O requests (or,
+as a special case, even if they have no pending requests, they are
+expected to receive a new request very soon, with the scheduler idling
+the storage device while waiting for such an event). Unfortunately,
+the above resetting scheme of the in_service_entity pointers may cause
+this assumption to be violated. For example, the in-service queue may
+happen to remain without requests because of a request merge. In this
+case the queue does become idle, and all related data structures are
+updated accordingly. But in_service_entity still points to the queue
+in the parent entity. This inconsistency may even propagate to
+higher-level parent entities, if they happen to become idle as well,
+as a consequence of the leaf queue becoming idle. For this queue and
+parent entities, scheduling functions have an undefined behaviour,
+and, as reported, may easily lead to kernel crashes or hangs.
+
+This commit addresses this issue by simply resetting the
+in_service_entity field also when it is detected to point to an entity
+becoming idle (regardless of why the entity becomes idle).
+
+Reported-by: Laurentiu Nicola <lnicola@dend.ro>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Tested-by: Laurentiu Nicola <lnicola@dend.ro>
+---
+ block/bfq-sched.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 5ddf9af4261e..a07a06eb5c72 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -1336,8 +1336,10 @@ static bool __bfq_deactivate_entity(struct bfq_entity *entity,
+
+ BUG_ON(is_in_service && entity->tree && entity->tree != &st->active);
+
+- if (is_in_service)
++ if (is_in_service) {
+ bfq_calc_finish(entity, entity->service);
++ sd->in_service_entity = NULL;
++ }
+
+ if (entity->tree == &st->active)
+ bfq_active_extract(st, entity);
+
+From 600ea668e2d340c95724bcf981d88812d6900342 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 28 Jul 2017 21:09:51 +0200
+Subject: [PATCH 04/51] block, bfq: consider also in_service_entity to state
+ whether an entity is active
+
+Groups of BFQ queues are represented by generic entities in BFQ. When
+a queue belonging to a parent entity is deactivated, the parent entity
+may need to be deactivated too, in case the deactivated queue was the
+only active queue for the parent entity. This deactivation may need to
+be propagated upwards if the entity belongs, in its turn, to a further
+higher-level entity, and so on. In particular, the upward propagation
+of deactivation stops at the first parent entity that remains active
+even if one of its child entities has been deactivated.
+
+To decide whether the last non-deactivation condition holds for a
+parent entity, BFQ checks whether the field next_in_service is still
+not NULL for the parent entity, after the deactivation of one of its
+child entity. If it is not NULL, then there are certainly other active
+entities in the parent entity, and deactivations can stop.
+
+Unfortunately, this check misses a corner case: if in_service_entity
+is not NULL, then next_in_service may happen to be NULL, although the
+parent entity is evidently active. This happens if: 1) the entity
+pointed by in_service_entity is the only active entity in the parent
+entity, and 2) according to the definition of next_in_service, the
+in_service_entity cannot be considered as next_in_service. See the
+comments on the definition of next_in_service for details on this
+second point.
+
+Hitting the above corner case causes crashes.
+
+To address this issue, this commit:
+1) Extends the above check on only next_in_service to controlling both
+next_in_service and in_service_entity (if any of them is not NULL,
+then no further deactivation is performed)
+2) Improves the (important) comments on how next_in_service is defined
+and updated; in particular it fixes a few rather obscure paragraphs
+
+Reported-by: Eric Wheeler <bfq-sched@lists.ewheeler.net>
+Reported-by: Rick Yiu <rick_yiu@htc.com>
+Reported-by: Tom X Nguyen <tom81094@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Tested-by: Eric Wheeler <bfq-sched@lists.ewheeler.net>
+Tested-by: Rick Yiu <rick_yiu@htc.com>
+Tested-by: Laurentiu Nicola <lnicola@dend.ro>
+Tested-by: Tom X Nguyen <tom81094@gmail.com>
+---
+ block/bfq-sched.c | 140 ++++++++++++++++++++++++++++++------------------------
+ block/bfq.h | 23 +++++++--
+ 2 files changed, 95 insertions(+), 68 deletions(-)
+
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index a07a06eb5c72..5c0f9290a79c 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -196,21 +196,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
+
+ /*
+ * This function tells whether entity stops being a candidate for next
+- * service, according to the following logic.
++ * service, according to the restrictive definition of the field
++ * next_in_service. In particular, this function is invoked for an
++ * entity that is about to be set in service.
+ *
+- * This function is invoked for an entity that is about to be set in
+- * service. If such an entity is a queue, then the entity is no longer
+- * a candidate for next service (i.e, a candidate entity to serve
+- * after the in-service entity is expired). The function then returns
+- * true.
++ * If entity is a queue, then the entity is no longer a candidate for
++ * next service according to the that definition, because entity is
++ * about to become the in-service queue. This function then returns
++ * true if entity is a queue.
+ *
+- * In contrast, the entity could stil be a candidate for next service
+- * if it is not a queue, and has more than one child. In fact, even if
+- * one of its children is about to be set in service, other children
+- * may still be the next to serve. As a consequence, a non-queue
+- * entity is not a candidate for next-service only if it has only one
+- * child. And only if this condition holds, then the function returns
+- * true for a non-queue entity.
++ * In contrast, entity could still be a candidate for next service if
++ * it is not a queue, and has more than one active child. In fact,
++ * even if one of its children is about to be set in service, other
++ * active children may still be the next to serve, for the parent
++ * entity, even according to the above definition. As a consequence, a
++ * non-queue entity is not a candidate for next-service only if it has
++ * only one active child. And only if this condition holds, then this
++ * function returns true for a non-queue entity.
+ */
+ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
+ {
+@@ -223,6 +225,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
+
+ BUG_ON(bfqg == ((struct bfq_data *)(bfqg->bfqd))->root_group);
+ BUG_ON(bfqg->active_entities == 0);
++ /*
++ * The field active_entities does not always contain the
++ * actual number of active children entities: it happens to
++ * not account for the in-service entity in case the latter is
++ * removed from its active tree (which may get done after
++ * invoking the function bfq_no_longer_next_in_service in
++ * bfq_get_next_queue). Fortunately, here, i.e., while
++ * bfq_no_longer_next_in_service is not yet completed in
++ * bfq_get_next_queue, bfq_active_extract has not yet been
++ * invoked, and thus active_entities still coincides with the
++ * actual number of active entities.
++ */
+ if (bfqg->active_entities == 1)
+ return true;
+
+@@ -1089,7 +1103,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ * one of its children receives a new request.
+ *
+ * Basically, this function updates the timestamps of entity and
+- * inserts entity into its active tree, ater possible extracting it
++ * inserts entity into its active tree, ater possibly extracting it
+ * from its idle tree.
+ */
+ static void __bfq_activate_entity(struct bfq_entity *entity,
+@@ -1213,7 +1227,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
+ BUG_ON(entity->tree && entity->tree != &st->active);
+ /*
+ * In addition, if the entity had more than one child
+- * when set in service, then was not extracted from
++ * when set in service, then it was not extracted from
+ * the active tree. This implies that the position of
+ * the entity in the active tree may need to be
+ * changed now, because we have just updated the start
+@@ -1221,9 +1235,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
+ * time in a moment (the requeueing is then, more
+ * precisely, a repositioning in this case). To
+ * implement this repositioning, we: 1) dequeue the
+- * entity here, 2) update the finish time and
+- * requeue the entity according to the new
+- * timestamps below.
++ * entity here, 2) update the finish time and requeue
++ * the entity according to the new timestamps below.
+ */
+ if (entity->tree)
+ bfq_active_extract(st, entity);
+@@ -1270,9 +1283,9 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
+
+
+ /**
+- * bfq_activate_entity - activate or requeue an entity representing a bfq_queue,
+- * and activate, requeue or reposition all ancestors
+- * for which such an update becomes necessary.
++ * bfq_activate_requeue_entity - activate or requeue an entity representing a bfq_queue,
++ * and activate, requeue or reposition all ancestors
++ * for which such an update becomes necessary.
+ * @entity: the entity to activate.
+ * @non_blocking_wait_rq: true if this entity was waiting for a request
+ * @requeue: true if this is a requeue, which implies that bfqq is
+@@ -1308,9 +1321,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
+ * @ins_into_idle_tree: if false, the entity will not be put into the
+ * idle tree.
+ *
+- * Deactivates an entity, independently from its previous state. Must
++ * Deactivates an entity, independently of its previous state. Must
+ * be invoked only if entity is on a service tree. Extracts the entity
+- * from that tree, and if necessary and allowed, puts it on the idle
++ * from that tree, and if necessary and allowed, puts it into the idle
+ * tree.
+ */
+ static bool __bfq_deactivate_entity(struct bfq_entity *entity,
+@@ -1359,7 +1372,7 @@ static bool __bfq_deactivate_entity(struct bfq_entity *entity,
+ /**
+ * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
+ * @entity: the entity to deactivate.
+- * @ins_into_idle_tree: true if the entity can be put on the idle tree
++ * @ins_into_idle_tree: true if the entity can be put into the idle tree
+ */
+ static void bfq_deactivate_entity(struct bfq_entity *entity,
+ bool ins_into_idle_tree,
+@@ -1406,16 +1419,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
+ */
+ bfq_update_next_in_service(sd, NULL);
+
+- if (sd->next_in_service) {
++ if (sd->next_in_service || sd->in_service_entity) {
+ /*
+- * The parent entity is still backlogged,
+- * because next_in_service is not NULL. So, no
+- * further upwards deactivation must be
+- * performed. Yet, next_in_service has
+- * changed. Then the schedule does need to be
+- * updated upwards.
++ * The parent entity is still active, because
++ * either next_in_service or in_service_entity
++ * is not NULL. So, no further upwards
++ * deactivation must be performed. Yet,
++ * next_in_service has changed. Then the
++ * schedule does need to be updated upwards.
++ *
++ * NOTE If in_service_entity is not NULL, then
++ * next_in_service may happen to be NULL,
++ * although the parent entity is evidently
++ * active. This happens if 1) the entity
++ * pointed by in_service_entity is the only
++ * active entity in the parent entity, and 2)
++ * according to the definition of
++ * next_in_service, the in_service_entity
++ * cannot be considered as
++ * next_in_service. See the comments on the
++ * definition of next_in_service for details.
+ */
+ BUG_ON(sd->next_in_service == entity);
++ BUG_ON(sd->in_service_entity == entity);
+ break;
+ }
+
+@@ -1806,45 +1832,33 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+
+ /*
+ * If entity is no longer a candidate for next
+- * service, then we extract it from its active tree,
+- * for the following reason. To further boost the
+- * throughput in some special case, BFQ needs to know
+- * which is the next candidate entity to serve, while
+- * there is already an entity in service. In this
+- * respect, to make it easy to compute/update the next
+- * candidate entity to serve after the current
+- * candidate has been set in service, there is a case
+- * where it is necessary to extract the current
+- * candidate from its service tree. Such a case is
+- * when the entity just set in service cannot be also
+- * a candidate for next service. Details about when
+- * this conditions holds are reported in the comments
+- * on the function bfq_no_longer_next_in_service()
+- * invoked below.
++ * service, then it must be extracted from its active
++ * tree, so as to make sure that it won't be
++ * considered when computing next_in_service. See the
++ * comments on the function
++ * bfq_no_longer_next_in_service() for details.
+ */
+ if (bfq_no_longer_next_in_service(entity))
+ bfq_active_extract(bfq_entity_service_tree(entity),
+ entity);
+
+ /*
+- * For the same reason why we may have just extracted
+- * entity from its active tree, we may need to update
+- * next_in_service for the sched_data of entity too,
+- * regardless of whether entity has been extracted.
+- * In fact, even if entity has not been extracted, a
+- * descendant entity may get extracted. Such an event
+- * would cause a change in next_in_service for the
+- * level of the descendant entity, and thus possibly
+- * back to upper levels.
++ * Even if entity is not to be extracted according to
++ * the above check, a descendant entity may get
++ * extracted in one of the next iterations of this
++ * loop. Such an event could cause a change in
++ * next_in_service for the level of the descendant
++ * entity, and thus possibly back to this level.
+ *
+- * We cannot perform the resulting needed update
+- * before the end of this loop, because, to know which
+- * is the correct next-to-serve candidate entity for
+- * each level, we need first to find the leaf entity
+- * to set in service. In fact, only after we know
+- * which is the next-to-serve leaf entity, we can
+- * discover whether the parent entity of the leaf
+- * entity becomes the next-to-serve, and so on.
++ * However, we cannot perform the resulting needed
++ * update of next_in_service for this level before the
++ * end of the whole loop, because, to know which is
++ * the correct next-to-serve candidate entity for each
++ * level, we need first to find the leaf entity to set
++ * in service. In fact, only after we know which is
++ * the next-to-serve leaf entity, we can discover
++ * whether the parent entity of the leaf entity
++ * becomes the next-to-serve, and so on.
+ */
+
+ /* Log some information */
+diff --git a/block/bfq.h b/block/bfq.h
+index f5751ea59d98..ebd9688b9f61 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -68,17 +68,30 @@ struct bfq_service_tree {
+ *
+ * bfq_sched_data is the basic scheduler queue. It supports three
+ * ioprio_classes, and can be used either as a toplevel queue or as an
+- * intermediate queue on a hierarchical setup. @next_in_service
+- * points to the active entity of the sched_data service trees that
+- * will be scheduled next. It is used to reduce the number of steps
+- * needed for each hierarchical-schedule update.
++ * intermediate queue in a hierarchical setup.
+ *
+ * The supported ioprio_classes are the same as in CFQ, in descending
+ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
+ * Requests from higher priority queues are served before all the
+ * requests from lower priority queues; among requests of the same
+ * queue requests are served according to B-WF2Q+.
+- * All the fields are protected by the queue lock of the containing bfqd.
++ *
++ * The schedule is implemented by the service trees, plus the field
++ * @next_in_service, which points to the entity on the active trees
++ * that will be served next, if 1) no changes in the schedule occurs
++ * before the current in-service entity is expired, 2) the in-service
++ * queue becomes idle when it expires, and 3) if the entity pointed by
++ * in_service_entity is not a queue, then the in-service child entity
++ * of the entity pointed by in_service_entity becomes idle on
++ * expiration. This peculiar definition allows for the following
++ * optimization, not yet exploited: while a given entity is still in
++ * service, we already know which is the best candidate for next
++ * service among the other active entitities in the same parent
++ * entity. We can then quickly compare the timestamps of the
++ * in-service entity with those of such best candidate.
++ *
++ * All the fields are protected by the queue lock of the containing
++ * bfqd.
+ */
+ struct bfq_sched_data {
+ struct bfq_entity *in_service_entity; /* entity in service */
+
+From 6b5effd10bc6711a862e7cbd7cd2dd0146defa01 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 4 May 2017 10:53:43 +0200
+Subject: [PATCH 05/51] block, bfq: improve and refactor throughput-boosting
+ logic
+
+When a queue associated with a process remains empty, there are cases
+where throughput gets boosted if the device is idled to await the
+arrival of a new I/O request for that queue. Currently, BFQ assumes
+that one of these cases is when the device has no internal queueing
+(regardless of the properties of the I/O being served). Unfortunately,
+this condition has proved to be too general. So, this commit refines it
+as "the device has no internal queueing and is rotational".
+
+This refinement provides a significant throughput boost with random
+I/O, on flash-based storage without internal queueing. For example, on
+a HiKey board, throughput increases by up to 125%, growing, e.g., from
+6.9MB/s to 15.6MB/s with two or three random readers in parallel.
+
+This commit also refactors the code related to device idling, for the
+following reason. Finding the change that provides the above large
+improvement has been slightly more difficult than it had to be,
+because the logic that decides whether to idle the device is still
+scattered across three functions. Almost all of the logic is in the
+function bfq_bfqq_may_idle, but (1) part of the decision is made in
+bfq_update_idle_window, and (2) the function bfq_bfqq_must_idle may
+switch off idling regardless of the output of bfq_bfqq_may_idle. In
+addition, both bfq_update_idle_window and bfq_bfqq_must_idle make
+their decisions as a function of parameters that are used, for similar
+purposes, also in bfq_bfqq_may_idle. This commit addresses this issue
+by moving all the logic into bfq_bfqq_may_idle.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Luca Miccio <lucmiccio@gmail.com>
+---
+ block/bfq-sq-iosched.c | 141 +++++++++++++++++++++++++++----------------------
+ block/bfq.h | 12 ++---
+ 2 files changed, 83 insertions(+), 70 deletions(-)
+
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 65e7c7e77f3c..30d019fc67e0 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -684,10 +684,10 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ unsigned int old_wr_coeff;
+ bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
+
+- if (bic->saved_idle_window)
+- bfq_mark_bfqq_idle_window(bfqq);
++ if (bic->saved_has_short_ttime)
++ bfq_mark_bfqq_has_short_ttime(bfqq);
+ else
+- bfq_clear_bfqq_idle_window(bfqq);
++ bfq_clear_bfqq_has_short_ttime(bfqq);
+
+ if (bic->saved_IO_bound)
+ bfq_mark_bfqq_IO_bound(bfqq);
+@@ -2047,7 +2047,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ if (!bic)
+ return;
+
+- bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
++ bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
+ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
+@@ -3214,9 +3214,9 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
+ }
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "expire (%d, slow %d, num_disp %d, idle_win %d, weight %d)",
++ "expire (%d, slow %d, num_disp %d, short_ttime %d, weight %d)",
+ reason, slow, bfqq->dispatched,
+- bfq_bfqq_idle_window(bfqq), entity->weight);
++ bfq_bfqq_has_short_ttime(bfqq), entity->weight);
+
+ /*
+ * Increase, decrease or leave budget unchanged according to
+@@ -3298,7 +3298,10 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
+ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ {
+ struct bfq_data *bfqd = bfqq->bfqd;
+- bool idling_boosts_thr, idling_boosts_thr_without_issues,
++ bool rot_without_queueing =
++ !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
++ bfqq_sequential_and_IO_bound,
++ idling_boosts_thr, idling_boosts_thr_without_issues,
+ idling_needed_for_service_guarantees,
+ asymmetric_scenario;
+
+@@ -3306,27 +3309,44 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ return true;
+
+ /*
++ * Idling is performed only if slice_idle > 0. In addition, we
++ * do not idle if
++ * (a) bfqq is async
++ * (b) bfqq is in the idle io prio class: in this case we do
++ * not idle because we want to minimize the bandwidth that
++ * queues in this class can steal to higher-priority queues
++ */
++ if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
++ bfq_class_idle(bfqq))
++ return false;
++
++ bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
++ bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
++ /*
+ * The next variable takes into account the cases where idling
+ * boosts the throughput.
+ *
+ * The value of the variable is computed considering, first, that
+ * idling is virtually always beneficial for the throughput if:
+- * (a) the device is not NCQ-capable, or
+- * (b) regardless of the presence of NCQ, the device is rotational
+- * and the request pattern for bfqq is I/O-bound and sequential.
++ * (a) the device is not NCQ-capable and rotational, or
++ * (b) regardless of the presence of NCQ, the device is rotational and
++ * the request pattern for bfqq is I/O-bound and sequential, or
++ * (c) regardless of whether it is rotational, the device is
++ * not NCQ-capable and the request pattern for bfqq is
++ * I/O-bound and sequential.
+ *
+ * Secondly, and in contrast to the above item (b), idling an
+ * NCQ-capable flash-based device would not boost the
+ * throughput even with sequential I/O; rather it would lower
+ * the throughput in proportion to how fast the device
+ * is. Accordingly, the next variable is true if any of the
+- * above conditions (a) and (b) is true, and, in particular,
+- * happens to be false if bfqd is an NCQ-capable flash-based
+- * device.
++ * above conditions (a), (b) or (c) is true, and, in
++ * particular, happens to be false if bfqd is an NCQ-capable
++ * flash-based device.
+ */
+- idling_boosts_thr = !bfqd->hw_tag ||
+- (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) &&
+- bfq_bfqq_idle_window(bfqq));
++ idling_boosts_thr = rot_without_queueing ||
++ ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
++ bfqq_sequential_and_IO_bound);
+
+ /*
+ * The value of the next variable,
+@@ -3497,12 +3517,10 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
+
+ /*
+- * We have now all the components we need to compute the return
+- * value of the function, which is true only if both the following
+- * conditions hold:
+- * 1) bfqq is sync, because idling make sense only for sync queues;
+- * 2) idling either boosts the throughput (without issues), or
+- * is necessary to preserve service guarantees.
++ * We have now all the components we need to compute the
++ * return value of the function, which is true only if idling
++ * either boosts the throughput (without issues), or is
++ * necessary to preserve service guarantees.
+ */
+ bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d",
+ bfq_bfqq_sync(bfqq), idling_boosts_thr);
+@@ -3514,9 +3532,8 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ bfq_bfqq_IO_bound(bfqq),
+ idling_needed_for_service_guarantees);
+
+- return bfq_bfqq_sync(bfqq) &&
+- (idling_boosts_thr_without_issues ||
+- idling_needed_for_service_guarantees);
++ return idling_boosts_thr_without_issues ||
++ idling_needed_for_service_guarantees;
+ }
+
+ /*
+@@ -3532,10 +3549,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ */
+ static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
+ {
+- struct bfq_data *bfqd = bfqq->bfqd;
+-
+- return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
+- bfq_bfqq_may_idle(bfqq);
++ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq);
+ }
+
+ /*
+@@ -3994,7 +4008,6 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
+ case IOPRIO_CLASS_IDLE:
+ bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
+ bfqq->new_ioprio = 7;
+- bfq_clear_bfqq_idle_window(bfqq);
+ break;
+ }
+
+@@ -4058,8 +4071,14 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfq_set_next_ioprio_data(bfqq, bic);
+
+ if (is_sync) {
++ /*
++ * No need to mark as has_short_ttime if in
++ * idle_class, because no device idling is performed
++ * for queues in idle class
++ */
+ if (!bfq_class_idle(bfqq))
+- bfq_mark_bfqq_idle_window(bfqq);
++ /* tentatively mark as has_short_ttime */
++ bfq_mark_bfqq_has_short_ttime(bfqq);
+ bfq_mark_bfqq_sync(bfqq);
+ bfq_mark_bfqq_just_created(bfqq);
+ } else
+@@ -4195,18 +4214,19 @@ bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
+ }
+
+-/*
+- * Disable idle window if the process thinks too long or seeks so much that
+- * it doesn't matter.
+- */
+-static void bfq_update_idle_window(struct bfq_data *bfqd,
+- struct bfq_queue *bfqq,
+- struct bfq_io_cq *bic)
++static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
+ {
+- int enable_idle;
++ bool has_short_ttime = true;
+
+- /* Don't idle for async or idle io prio class. */
+- if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
++ /*
++ * No need to update has_short_ttime if bfqq is async or in
++ * idle io prio class, or if bfq_slice_idle is zero, because
++ * no device idling is performed for bfqq in this case.
++ */
++ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
++ bfqd->bfq_slice_idle == 0)
+ return;
+
+ /* Idle window just restored, statistics are meaningless. */
+@@ -4214,27 +4234,22 @@ static void bfq_update_idle_window(struct bfq_data *bfqd,
+ bfqd->bfq_wr_min_idle_time))
+ return;
+
+- enable_idle = bfq_bfqq_idle_window(bfqq);
+-
++ /* Think time is infinite if no process is linked to
++ * bfqq. Otherwise check average think time to
++ * decide whether to mark as has_short_ttime
++ */
+ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
+- bfqd->bfq_slice_idle == 0 ||
+- (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
+- bfqq->wr_coeff == 1))
+- enable_idle = 0;
+- else if (bfq_sample_valid(bic->ttime.ttime_samples)) {
+- if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
+- bfqq->wr_coeff == 1)
+- enable_idle = 0;
+- else
+- enable_idle = 1;
+- }
+- bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
+- enable_idle);
++ (bfq_sample_valid(bic->ttime.ttime_samples) &&
++ bic->ttime.ttime_mean > bfqd->bfq_slice_idle))
++ has_short_ttime = false;
++
++ bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
++ has_short_ttime);
+
+- if (enable_idle)
+- bfq_mark_bfqq_idle_window(bfqq);
++ if (has_short_ttime)
++ bfq_mark_bfqq_has_short_ttime(bfqq);
+ else
+- bfq_clear_bfqq_idle_window(bfqq);
++ bfq_clear_bfqq_has_short_ttime(bfqq);
+ }
+
+ /*
+@@ -4250,14 +4265,12 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfqq->meta_pending++;
+
+ bfq_update_io_thinktime(bfqd, bic);
++ bfq_update_has_short_ttime(bfqd, bfqq, bic);
+ bfq_update_io_seektime(bfqd, bfqq, rq);
+- if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
+- !BFQQ_SEEKY(bfqq))
+- bfq_update_idle_window(bfqd, bfqq, bic);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "rq_enqueued: idle_window=%d (seeky %d)",
+- bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq));
++ "rq_enqueued: has_short_ttime=%d (seeky %d)",
++ bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
+
+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
+
+diff --git a/block/bfq.h b/block/bfq.h
+index ebd9688b9f61..34fc4697fd89 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -349,11 +349,11 @@ struct bfq_io_cq {
+ #endif
+
+ /*
+- * Snapshot of the idle window before merging; taken to
+- * remember this value while the queue is merged, so as to be
+- * able to restore it in case of split.
++ * Snapshot of the has_short_time flag before merging; taken
++ * to remember its value while the queue is merged, so as to
++ * be able to restore it in case of split.
+ */
+- bool saved_idle_window;
++ bool saved_has_short_ttime;
+ /*
+ * Same purpose as the previous two fields for the I/O bound
+ * classification of a queue.
+@@ -610,7 +610,7 @@ enum bfqq_state_flags {
+ */
+ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
+ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
+- BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */
++ BFQ_BFQQ_FLAG_has_short_ttime, /* queue has a short think time */
+ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
+ BFQ_BFQQ_FLAG_IO_bound, /*
+ * bfqq has timed-out at least once
+@@ -649,7 +649,7 @@ BFQ_BFQQ_FNS(wait_request);
+ BFQ_BFQQ_FNS(non_blocking_wait_rq);
+ BFQ_BFQQ_FNS(must_alloc);
+ BFQ_BFQQ_FNS(fifo_expire);
+-BFQ_BFQQ_FNS(idle_window);
++BFQ_BFQQ_FNS(has_short_ttime);
+ BFQ_BFQQ_FNS(sync);
+ BFQ_BFQQ_FNS(IO_bound);
+ BFQ_BFQQ_FNS(in_large_burst);
+
+From b5e746fa99d961a5642cffb27c19a77e8b638007 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 19 Dec 2016 16:59:33 +0100
+Subject: [PATCH 06/51] FIRST BFQ-MQ COMMIT: Copy bfq-sq-iosched.c as
+ bfq-mq-iosched.c
+
+This commit introduces bfq-mq-iosched.c, the main source file that
+will contain the code of bfq for blk-mq. I name tentatively
+bfq-mq this version of bfq.
+
+For the moment, the file bfq-mq-iosched.c is just a copy of
+bfq-sq-iosched.c, i.e, of the main source file of bfq for blk.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 5392 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 5392 insertions(+)
+ create mode 100644 block/bfq-mq-iosched.c
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+new file mode 100644
+index 000000000000..30d019fc67e0
+--- /dev/null
++++ b/block/bfq-mq-iosched.c
+@@ -0,0 +1,5392 @@
++/*
++ * Budget Fair Queueing (BFQ) I/O scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ *
++ * BFQ is a proportional-share I/O scheduler, with some extra
++ * low-latency capabilities. BFQ also supports full hierarchical
++ * scheduling through cgroups. Next paragraphs provide an introduction
++ * on BFQ inner workings. Details on BFQ benefits and usage can be
++ * found in Documentation/block/bfq-iosched.txt.
++ *
++ * BFQ is a proportional-share storage-I/O scheduling algorithm based
++ * on the slice-by-slice service scheme of CFQ. But BFQ assigns
++ * budgets, measured in number of sectors, to processes instead of
++ * time slices. The device is not granted to the in-service process
++ * for a given time slice, but until it has exhausted its assigned
++ * budget. This change from the time to the service domain enables BFQ
++ * to distribute the device throughput among processes as desired,
++ * without any distortion due to throughput fluctuations, or to device
++ * internal queueing. BFQ uses an ad hoc internal scheduler, called
++ * B-WF2Q+, to schedule processes according to their budgets. More
++ * precisely, BFQ schedules queues associated with processes. Thanks to
++ * the accurate policy of B-WF2Q+, BFQ can afford to assign high
++ * budgets to I/O-bound processes issuing sequential requests (to
++ * boost the throughput), and yet guarantee a low latency to
++ * interactive and soft real-time applications.
++ *
++ * NOTE: if the main or only goal, with a given device, is to achieve
++ * the maximum-possible throughput at all times, then do switch off
++ * all low-latency heuristics for that device, by setting low_latency
++ * to 0.
++ *
++ * BFQ is described in [1], where also a reference to the initial, more
++ * theoretical paper on BFQ can be found. The interested reader can find
++ * in the latter paper full details on the main algorithm, as well as
++ * formulas of the guarantees and formal proofs of all the properties.
++ * With respect to the version of BFQ presented in these papers, this
++ * implementation adds a few more heuristics, such as the one that
++ * guarantees a low latency to soft real-time applications, and a
++ * hierarchical extension based on H-WF2Q+.
++ *
++ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with
++ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
++ * complexity derives from the one introduced with EEVDF in [3].
++ *
++ * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
++ * Scheduler", Proceedings of the First Workshop on Mobile System
++ * Technologies (MST-2015), May 2015.
++ * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
++ *
++ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf
++ *
++ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
++ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
++ * Oct 1997.
++ *
++ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
++ *
++ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
++ * First: A Flexible and Accurate Mechanism for Proportional Share
++ * Resource Allocation,'' technical report.
++ *
++ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
++ */
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/cgroup.h>
++#include <linux/elevator.h>
++#include <linux/jiffies.h>
++#include <linux/rbtree.h>
++#include <linux/ioprio.h>
++#include "blk.h"
++#include "bfq.h"
++
++/* Expiration time of sync (0) and async (1) requests, in ns. */
++static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
++
++/* Maximum backwards seek, in KiB. */
++static const int bfq_back_max = (16 * 1024);
++
++/* Penalty of a backwards seek, in number of sectors. */
++static const int bfq_back_penalty = 2;
++
++/* Idling period duration, in ns. */
++static u32 bfq_slice_idle = (NSEC_PER_SEC / 125);
++
++/* Minimum number of assigned budgets for which stats are safe to compute. */
++static const int bfq_stats_min_budgets = 194;
++
++/* Default maximum budget values, in sectors and number of requests. */
++static const int bfq_default_max_budget = (16 * 1024);
++
++/*
++ * Async to sync throughput distribution is controlled as follows:
++ * when an async request is served, the entity is charged the number
++ * of sectors of the request, multiplied by the factor below
++ */
++static const int bfq_async_charge_factor = 10;
++
++/* Default timeout values, in jiffies, approximating CFQ defaults. */
++static const int bfq_timeout = (HZ / 8);
++
++static struct kmem_cache *bfq_pool;
++
++/* Below this threshold (in ns), we consider thinktime immediate. */
++#define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
++
++/* hw_tag detection: parallel requests threshold and min samples needed. */
++#define BFQ_HW_QUEUE_THRESHOLD 4
++#define BFQ_HW_QUEUE_SAMPLES 32
++
++#define BFQQ_SEEK_THR (sector_t)(8 * 100)
++#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
++#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
++#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
++
++/* Min number of samples required to perform peak-rate update */
++#define BFQ_RATE_MIN_SAMPLES 32
++/* Min observation time interval required to perform a peak-rate update (ns) */
++#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
++/* Target observation time interval for a peak-rate update (ns) */
++#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
++
++/* Shift used for peak rate fixed precision calculations. */
++#define BFQ_RATE_SHIFT 16
++
++/*
++ * By default, BFQ computes the duration of the weight raising for
++ * interactive applications automatically, using the following formula:
++ * duration = (R / r) * T, where r is the peak rate of the device, and
++ * R and T are two reference parameters.
++ * In particular, R is the peak rate of the reference device (see below),
++ * and T is a reference time: given the systems that are likely to be
++ * installed on the reference device according to its speed class, T is
++ * about the maximum time needed, under BFQ and while reading two files in
++ * parallel, to load typical large applications on these systems.
++ * In practice, the slower/faster the device at hand is, the more/less it
++ * takes to load applications with respect to the reference device.
++ * Accordingly, the longer/shorter BFQ grants weight raising to interactive
++ * applications.
++ *
++ * BFQ uses four different reference pairs (R, T), depending on:
++ * . whether the device is rotational or non-rotational;
++ * . whether the device is slow, such as old or portable HDDs, as well as
++ * SD cards, or fast, such as newer HDDs and SSDs.
++ *
++ * The device's speed class is dynamically (re)detected in
++ * bfq_update_peak_rate() every time the estimated peak rate is updated.
++ *
++ * In the following definitions, R_slow[0]/R_fast[0] and
++ * T_slow[0]/T_fast[0] are the reference values for a slow/fast
++ * rotational device, whereas R_slow[1]/R_fast[1] and
++ * T_slow[1]/T_fast[1] are the reference values for a slow/fast
++ * non-rotational device. Finally, device_speed_thresh are the
++ * thresholds used to switch between speed classes. The reference
++ * rates are not the actual peak rates of the devices used as a
++ * reference, but slightly lower values. The reason for using these
++ * slightly lower values is that the peak-rate estimator tends to
++ * yield slightly lower values than the actual peak rate (it can yield
++ * the actual peak rate only if there is only one process doing I/O,
++ * and the process does sequential I/O).
++ *
++ * Both the reference peak rates and the thresholds are measured in
++ * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
++ */
++static int R_slow[2] = {1000, 10700};
++static int R_fast[2] = {14000, 33000};
++/*
++ * To improve readability, a conversion function is used to initialize the
++ * following arrays, which entails that they can be initialized only in a
++ * function.
++ */
++static int T_slow[2];
++static int T_fast[2];
++static int device_speed_thresh[2];
++
++#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
++ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
++
++#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
++
++static void bfq_schedule_dispatch(struct bfq_data *bfqd);
++
++#include "bfq-ioc.c"
++#include "bfq-sched.c"
++#include "bfq-cgroup-included.c"
++
++#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
++#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
++
++#define bfq_sample_valid(samples) ((samples) > 80)
++
++/*
++ * Scheduler run of queue, if there are requests pending and no one in the
++ * driver that will restart queueing.
++ */
++static void bfq_schedule_dispatch(struct bfq_data *bfqd)
++{
++ if (bfqd->queued != 0) {
++ bfq_log(bfqd, "schedule dispatch");
++ kblockd_schedule_work(&bfqd->unplug_work);
++ }
++}
++
++/*
++ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
++ * We choose the request that is closesr to the head right now. Distance
++ * behind the head is penalized and only allowed to a certain extent.
++ */
++static struct request *bfq_choose_req(struct bfq_data *bfqd,
++ struct request *rq1,
++ struct request *rq2,
++ sector_t last)
++{
++ sector_t s1, s2, d1 = 0, d2 = 0;
++ unsigned long back_max;
++#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
++#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
++ unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
++
++ if (!rq1 || rq1 == rq2)
++ return rq2;
++ if (!rq2)
++ return rq1;
++
++ if (rq_is_sync(rq1) && !rq_is_sync(rq2))
++ return rq1;
++ else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
++ return rq2;
++ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
++ return rq1;
++ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
++ return rq2;
++
++ s1 = blk_rq_pos(rq1);
++ s2 = blk_rq_pos(rq2);
++
++ /*
++ * By definition, 1KiB is 2 sectors.
++ */
++ back_max = bfqd->bfq_back_max * 2;
++
++ /*
++ * Strict one way elevator _except_ in the case where we allow
++ * short backward seeks which are biased as twice the cost of a
++ * similar forward seek.
++ */
++ if (s1 >= last)
++ d1 = s1 - last;
++ else if (s1 + back_max >= last)
++ d1 = (last - s1) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ1_WRAP;
++
++ if (s2 >= last)
++ d2 = s2 - last;
++ else if (s2 + back_max >= last)
++ d2 = (last - s2) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ2_WRAP;
++
++ /* Found required data */
++
++ /*
++ * By doing switch() on the bit mask "wrap" we avoid having to
++ * check two variables for all permutations: --> faster!
++ */
++ switch (wrap) {
++ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
++ if (d1 < d2)
++ return rq1;
++ else if (d2 < d1)
++ return rq2;
++
++ if (s1 >= s2)
++ return rq1;
++ else
++ return rq2;
++
++ case BFQ_RQ2_WRAP:
++ return rq1;
++ case BFQ_RQ1_WRAP:
++ return rq2;
++ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
++ default:
++ /*
++ * Since both rqs are wrapped,
++ * start with the one that's further behind head
++ * (--> only *one* back seek required),
++ * since back seek takes more time than forward.
++ */
++ if (s1 <= s2)
++ return rq1;
++ else
++ return rq2;
++ }
++}
++
++static struct bfq_queue *
++bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
++ sector_t sector, struct rb_node **ret_parent,
++ struct rb_node ***rb_link)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *bfqq = NULL;
++
++ parent = NULL;
++ p = &root->rb_node;
++ while (*p) {
++ struct rb_node **n;
++
++ parent = *p;
++ bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++
++ /*
++ * Sort strictly based on sector. Smallest to the left,
++ * largest to the right.
++ */
++ if (sector > blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_right;
++ else if (sector < blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_left;
++ else
++ break;
++ p = n;
++ bfqq = NULL;
++ }
++
++ *ret_parent = parent;
++ if (rb_link)
++ *rb_link = p;
++
++ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++ (unsigned long long) sector,
++ bfqq ? bfqq->pid : 0);
++
++ return bfqq;
++}
++
++static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *__bfqq;
++
++ if (bfqq->pos_root) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++
++ if (bfq_class_idle(bfqq))
++ return;
++ if (!bfqq->next_rq)
++ return;
++
++ bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
++ blk_rq_pos(bfqq->next_rq), &parent, &p);
++ if (!__bfqq) {
++ rb_link_node(&bfqq->pos_node, parent, p);
++ rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
++ } else
++ bfqq->pos_root = NULL;
++}
++
++/*
++ * Tell whether there are active queues or groups with differentiated weights.
++ */
++static bool bfq_differentiated_weights(struct bfq_data *bfqd)
++{
++ /*
++ * For weights to differ, at least one of the trees must contain
++ * at least two nodes.
++ */
++ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
++ (bfqd->queue_weights_tree.rb_node->rb_left ||
++ bfqd->queue_weights_tree.rb_node->rb_right)
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ ) ||
++ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
++ (bfqd->group_weights_tree.rb_node->rb_left ||
++ bfqd->group_weights_tree.rb_node->rb_right)
++#endif
++ );
++}
++
++/*
++ * The following function returns true if every queue must receive the
++ * same share of the throughput (this condition is used when deciding
++ * whether idling may be disabled, see the comments in the function
++ * bfq_bfqq_may_idle()).
++ *
++ * Such a scenario occurs when:
++ * 1) all active queues have the same weight,
++ * 2) all active groups at the same level in the groups tree have the same
++ * weight,
++ * 3) all active groups at the same level in the groups tree have the same
++ * number of children.
++ *
++ * Unfortunately, keeping the necessary state for evaluating exactly the
++ * above symmetry conditions would be quite complex and time-consuming.
++ * Therefore this function evaluates, instead, the following stronger
++ * sub-conditions, for which it is much easier to maintain the needed
++ * state:
++ * 1) all active queues have the same weight,
++ * 2) all active groups have the same weight,
++ * 3) all active groups have at most one active child each.
++ * In particular, the last two conditions are always true if hierarchical
++ * support and the cgroups interface are not enabled, thus no state needs
++ * to be maintained in this case.
++ */
++static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
++{
++ return !bfq_differentiated_weights(bfqd);
++}
++
++/*
++ * If the weight-counter tree passed as input contains no counter for
++ * the weight of the input entity, then add that counter; otherwise just
++ * increment the existing counter.
++ *
++ * Note that weight-counter trees contain few nodes in mostly symmetric
++ * scenarios. For example, if all queues have the same weight, then the
++ * weight-counter tree for the queues may contain at most one node.
++ * This holds even if low_latency is on, because weight-raised queues
++ * are not inserted in the tree.
++ * In most scenarios, the rate at which nodes are created/destroyed
++ * should be low too.
++ */
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ struct rb_node **new = &(root->rb_node), *parent = NULL;
++
++ /*
++ * Do not insert if the entity is already associated with a
++ * counter, which happens if:
++ * 1) the entity is associated with a queue,
++ * 2) a request arrival has caused the queue to become both
++ * non-weight-raised, and hence change its weight, and
++ * backlogged; in this respect, each of the two events
++ * causes an invocation of this function,
++ * 3) this is the invocation of this function caused by the
++ * second event. This second invocation is actually useless,
++ * and we handle this fact by exiting immediately. More
++ * efficient or clearer solutions might possibly be adopted.
++ */
++ if (entity->weight_counter)
++ return;
++
++ while (*new) {
++ struct bfq_weight_counter *__counter = container_of(*new,
++ struct bfq_weight_counter,
++ weights_node);
++ parent = *new;
++
++ if (entity->weight == __counter->weight) {
++ entity->weight_counter = __counter;
++ goto inc_counter;
++ }
++ if (entity->weight < __counter->weight)
++ new = &((*new)->rb_left);
++ else
++ new = &((*new)->rb_right);
++ }
++
++ entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
++ GFP_ATOMIC);
++
++ /*
++ * In the unlucky event of an allocation failure, we just
++ * exit. This will cause the weight of entity to not be
++ * considered in bfq_differentiated_weights, which, in its
++ * turn, causes the scenario to be deemed wrongly symmetric in
++ * case entity's weight would have been the only weight making
++ * the scenario asymmetric. On the bright side, no unbalance
++ * will however occur when entity becomes inactive again (the
++ * invocation of this function is triggered by an activation
++ * of entity). In fact, bfq_weights_tree_remove does nothing
++ * if !entity->weight_counter.
++ */
++ if (unlikely(!entity->weight_counter))
++ return;
++
++ entity->weight_counter->weight = entity->weight;
++ rb_link_node(&entity->weight_counter->weights_node, parent, new);
++ rb_insert_color(&entity->weight_counter->weights_node, root);
++
++inc_counter:
++ entity->weight_counter->num_active++;
++}
++
++/*
++ * Decrement the weight counter associated with the entity, and, if the
++ * counter reaches 0, remove the counter from the tree.
++ * See the comments to the function bfq_weights_tree_add() for considerations
++ * about overhead.
++ */
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ if (!entity->weight_counter)
++ return;
++
++ BUG_ON(RB_EMPTY_ROOT(root));
++ BUG_ON(entity->weight_counter->weight != entity->weight);
++
++ BUG_ON(!entity->weight_counter->num_active);
++ entity->weight_counter->num_active--;
++ if (entity->weight_counter->num_active > 0)
++ goto reset_entity_pointer;
++
++ rb_erase(&entity->weight_counter->weights_node, root);
++ kfree(entity->weight_counter);
++
++reset_entity_pointer:
++ entity->weight_counter = NULL;
++}
++
++/*
++ * Return expired entry, or NULL to just start from scratch in rbtree.
++ */
++static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
++ struct request *last)
++{
++ struct request *rq;
++
++ if (bfq_bfqq_fifo_expire(bfqq))
++ return NULL;
++
++ bfq_mark_bfqq_fifo_expire(bfqq);
++
++ rq = rq_entry_fifo(bfqq->fifo.next);
++
++ if (rq == last || ktime_get_ns() < rq->fifo_time)
++ return NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
++ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
++ return rq;
++}
++
++static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct request *last)
++{
++ struct rb_node *rbnext = rb_next(&last->rb_node);
++ struct rb_node *rbprev = rb_prev(&last->rb_node);
++ struct request *next, *prev = NULL;
++
++ BUG_ON(list_empty(&bfqq->fifo));
++
++ /* Follow expired path, else get first next available. */
++ next = bfq_check_fifo(bfqq, last);
++ if (next) {
++ BUG_ON(next == last);
++ return next;
++ }
++
++ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
++
++ if (rbprev)
++ prev = rb_entry_rq(rbprev);
++
++ if (rbnext)
++ next = rb_entry_rq(rbnext);
++ else {
++ rbnext = rb_first(&bfqq->sort_list);
++ if (rbnext && rbnext != &last->rb_node)
++ next = rb_entry_rq(rbnext);
++ }
++
++ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
++}
++
++/* see the definition of bfq_async_charge_factor for details */
++static unsigned long bfq_serv_to_charge(struct request *rq,
++ struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
++ return blk_rq_sectors(rq);
++
++ /*
++ * If there are no weight-raised queues, then amplify service
++ * by just the async charge factor; otherwise amplify service
++ * by twice the async charge factor, to further reduce latency
++ * for weight-raised queues.
++ */
++ if (bfqq->bfqd->wr_busy_queues == 0)
++ return blk_rq_sectors(rq) * bfq_async_charge_factor;
++
++ return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
++}
++
++/**
++ * bfq_updated_next_req - update the queue after a new next_rq selection.
++ * @bfqd: the device data the queue belongs to.
++ * @bfqq: the queue to update.
++ *
++ * If the first request of a queue changes we make sure that the queue
++ * has enough budget to serve at least its first request (if the
++ * request has grown). We do this because if the queue has not enough
++ * budget for its first request, it has to go through two dispatch
++ * rounds to actually get it dispatched.
++ */
++static void bfq_updated_next_req(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ struct request *next_rq = bfqq->next_rq;
++ unsigned long new_budget;
++
++ if (!next_rq)
++ return;
++
++ if (bfqq == bfqd->in_service_queue)
++ /*
++ * In order not to break guarantees, budgets cannot be
++ * changed after an entity has been selected.
++ */
++ return;
++
++ BUG_ON(entity->tree != &st->active);
++ BUG_ON(entity == entity->sched_data->in_service_entity);
++
++ new_budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ if (entity->budget != new_budget) {
++ entity->budget = new_budget;
++ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
++ new_budget);
++ bfq_requeue_bfqq(bfqd, bfqq);
++ }
++}
++
++static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
++{
++ u64 dur;
++
++ if (bfqd->bfq_wr_max_time > 0)
++ return bfqd->bfq_wr_max_time;
++
++ dur = bfqd->RT_prod;
++ do_div(dur, bfqd->peak_rate);
++
++ /*
++ * Limit duration between 3 and 13 seconds. Tests show that
++ * higher values than 13 seconds often yield the opposite of
++ * the desired result, i.e., worsen responsiveness by letting
++ * non-interactive and non-soft-real-time applications
++ * preserve weight raising for a too long time interval.
++ *
++ * On the other end, lower values than 3 seconds make it
++ * difficult for most interactive tasks to complete their jobs
++ * before weight-raising finishes.
++ */
++ if (dur > msecs_to_jiffies(13000))
++ dur = msecs_to_jiffies(13000);
++ else if (dur < msecs_to_jiffies(3000))
++ dur = msecs_to_jiffies(3000);
++
++ return dur;
++}
++
++static void
++bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
++ struct bfq_io_cq *bic, bool bfq_already_existing)
++{
++ unsigned int old_wr_coeff;
++ bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
++
++ if (bic->saved_has_short_ttime)
++ bfq_mark_bfqq_has_short_ttime(bfqq);
++ else
++ bfq_clear_bfqq_has_short_ttime(bfqq);
++
++ if (bic->saved_IO_bound)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ else
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ if (unlikely(busy))
++ old_wr_coeff = bfqq->wr_coeff;
++
++ bfqq->wr_coeff = bic->saved_wr_coeff;
++ bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
++ BUG_ON(time_is_after_jiffies(bfqq->wr_start_at_switch_to_srt));
++ bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
++ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++
++ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
++ time_is_before_jiffies(bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time))) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "resume state: switching off wr (%lu + %lu < %lu)",
++ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time,
++ jiffies);
++
++ bfqq->wr_coeff = 1;
++ }
++
++ /* make sure weight will be updated, however we got here */
++ bfqq->entity.prio_changed = 1;
++
++ if (likely(!busy))
++ return;
++
++ if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) {
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ } else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) {
++ bfqd->wr_busy_queues--;
++ BUG_ON(bfqd->wr_busy_queues < 0);
++ }
++}
++
++static int bfqq_process_refs(struct bfq_queue *bfqq)
++{
++ int process_refs, io_refs;
++
++ lockdep_assert_held(bfqq->bfqd->queue->queue_lock);
++
++ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++ process_refs = bfqq->ref - io_refs - bfqq->entity.on_st;
++ BUG_ON(process_refs < 0);
++ return process_refs;
++}
++
++/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
++static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_queue *item;
++ struct hlist_node *n;
++
++ hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
++ hlist_del_init(&item->burst_list_node);
++ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++ bfqd->burst_size = 1;
++ bfqd->burst_parent_entity = bfqq->entity.parent;
++}
++
++/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
++static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ /* Increment burst size to take into account also bfqq */
++ bfqd->burst_size++;
++
++ bfq_log_bfqq(bfqd, bfqq, "add_to_burst %d", bfqd->burst_size);
++
++ BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh);
++
++ if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
++ struct bfq_queue *pos, *bfqq_item;
++ struct hlist_node *n;
++
++ /*
++ * Enough queues have been activated shortly after each
++ * other to consider this burst as large.
++ */
++ bfqd->large_burst = true;
++ bfq_log_bfqq(bfqd, bfqq, "add_to_burst: large burst started");
++
++ /*
++ * We can now mark all queues in the burst list as
++ * belonging to a large burst.
++ */
++ hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
++ burst_list_node) {
++ bfq_mark_bfqq_in_large_burst(bfqq_item);
++ bfq_log_bfqq(bfqd, bfqq_item, "marked in large burst");
++ }
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ bfq_log_bfqq(bfqd, bfqq, "marked in large burst");
++
++ /*
++ * From now on, and until the current burst finishes, any
++ * new queue being activated shortly after the last queue
++ * was inserted in the burst can be immediately marked as
++ * belonging to a large burst. So the burst list is not
++ * needed any more. Remove it.
++ */
++ hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
++ burst_list_node)
++ hlist_del_init(&pos->burst_list_node);
++ } else /*
++ * Burst not yet large: add bfqq to the burst list. Do
++ * not increment the ref counter for bfqq, because bfqq
++ * is removed from the burst list before freeing bfqq
++ * in put_queue.
++ */
++ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++}
++
++/*
++ * If many queues belonging to the same group happen to be created
++ * shortly after each other, then the processes associated with these
++ * queues have typically a common goal. In particular, bursts of queue
++ * creations are usually caused by services or applications that spawn
++ * many parallel threads/processes. Examples are systemd during boot,
++ * or git grep. To help these processes get their job done as soon as
++ * possible, it is usually better to not grant either weight-raising
++ * or device idling to their queues.
++ *
++ * In this comment we describe, firstly, the reasons why this fact
++ * holds, and, secondly, the next function, which implements the main
++ * steps needed to properly mark these queues so that they can then be
++ * treated in a different way.
++ *
++ * The above services or applications benefit mostly from a high
++ * throughput: the quicker the requests of the activated queues are
++ * cumulatively served, the sooner the target job of these queues gets
++ * completed. As a consequence, weight-raising any of these queues,
++ * which also implies idling the device for it, is almost always
++ * counterproductive. In most cases it just lowers throughput.
++ *
++ * On the other hand, a burst of queue creations may be caused also by
++ * the start of an application that does not consist of a lot of
++ * parallel I/O-bound threads. In fact, with a complex application,
++ * several short processes may need to be executed to start-up the
++ * application. In this respect, to start an application as quickly as
++ * possible, the best thing to do is in any case to privilege the I/O
++ * related to the application with respect to all other
++ * I/O. Therefore, the best strategy to start as quickly as possible
++ * an application that causes a burst of queue creations is to
++ * weight-raise all the queues created during the burst. This is the
++ * exact opposite of the best strategy for the other type of bursts.
++ *
++ * In the end, to take the best action for each of the two cases, the
++ * two types of bursts need to be distinguished. Fortunately, this
++ * seems relatively easy, by looking at the sizes of the bursts. In
++ * particular, we found a threshold such that only bursts with a
++ * larger size than that threshold are apparently caused by
++ * services or commands such as systemd or git grep. For brevity,
++ * hereafter we call just 'large' these bursts. BFQ *does not*
++ * weight-raise queues whose creation occurs in a large burst. In
++ * addition, for each of these queues BFQ performs or does not perform
++ * idling depending on which choice boosts the throughput more. The
++ * exact choice depends on the device and request pattern at
++ * hand.
++ *
++ * Unfortunately, false positives may occur while an interactive task
++ * is starting (e.g., an application is being started). The
++ * consequence is that the queues associated with the task do not
++ * enjoy weight raising as expected. Fortunately these false positives
++ * are very rare. They typically occur if some service happens to
++ * start doing I/O exactly when the interactive task starts.
++ *
++ * Turning back to the next function, it implements all the steps
++ * needed to detect the occurrence of a large burst and to properly
++ * mark all the queues belonging to it (so that they can then be
++ * treated in a different way). This goal is achieved by maintaining a
++ * "burst list" that holds, temporarily, the queues that belong to the
++ * burst in progress. The list is then used to mark these queues as
++ * belonging to a large burst if the burst does become large. The main
++ * steps are the following.
++ *
++ * . when the very first queue is created, the queue is inserted into the
++ * list (as it could be the first queue in a possible burst)
++ *
++ * . if the current burst has not yet become large, and a queue Q that does
++ * not yet belong to the burst is activated shortly after the last time
++ * at which a new queue entered the burst list, then the function appends
++ * Q to the burst list
++ *
++ * . if, as a consequence of the previous step, the burst size reaches
++ * the large-burst threshold, then
++ *
++ * . all the queues in the burst list are marked as belonging to a
++ * large burst
++ *
++ * . the burst list is deleted; in fact, the burst list already served
++ * its purpose (keeping temporarily track of the queues in a burst,
++ * so as to be able to mark them as belonging to a large burst in the
++ * previous sub-step), and now is not needed any more
++ *
++ * . the device enters a large-burst mode
++ *
++ * . if a queue Q that does not belong to the burst is created while
++ * the device is in large-burst mode and shortly after the last time
++ * at which a queue either entered the burst list or was marked as
++ * belonging to the current large burst, then Q is immediately marked
++ * as belonging to a large burst.
++ *
++ * . if a queue Q that does not belong to the burst is created a while
++ * later, i.e., not shortly after, than the last time at which a queue
++ * either entered the burst list or was marked as belonging to the
++ * current large burst, then the current burst is deemed as finished and:
++ *
++ * . the large-burst mode is reset if set
++ *
++ * . the burst list is emptied
++ *
++ * . Q is inserted in the burst list, as Q may be the first queue
++ * in a possible new burst (then the burst list contains just Q
++ * after this step).
++ */
++static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq is already in the burst list or is part of a large
++ * burst, or finally has just been split, then there is
++ * nothing else to do.
++ */
++ if (!hlist_unhashed(&bfqq->burst_list_node) ||
++ bfq_bfqq_in_large_burst(bfqq) ||
++ time_is_after_eq_jiffies(bfqq->split_time +
++ msecs_to_jiffies(10)))
++ return;
++
++ /*
++ * If bfqq's creation happens late enough, or bfqq belongs to
++ * a different group than the burst group, then the current
++ * burst is finished, and related data structures must be
++ * reset.
++ *
++ * In this respect, consider the special case where bfqq is
++ * the very first queue created after BFQ is selected for this
++ * device. In this case, last_ins_in_burst and
++ * burst_parent_entity are not yet significant when we get
++ * here. But it is easy to verify that, whether or not the
++ * following condition is true, bfqq will end up being
++ * inserted into the burst list. In particular the list will
++ * happen to contain only bfqq. And this is exactly what has
++ * to happen, as bfqq may be the first queue of the first
++ * burst.
++ */
++ if (time_is_before_jiffies(bfqd->last_ins_in_burst +
++ bfqd->bfq_burst_interval) ||
++ bfqq->entity.parent != bfqd->burst_parent_entity) {
++ bfqd->large_burst = false;
++ bfq_reset_burst_list(bfqd, bfqq);
++ bfq_log_bfqq(bfqd, bfqq,
++ "handle_burst: late activation or different group");
++ goto end;
++ }
++
++ /*
++ * If we get here, then bfqq is being activated shortly after the
++ * last queue. So, if the current burst is also large, we can mark
++ * bfqq as belonging to this large burst immediately.
++ */
++ if (bfqd->large_burst) {
++ bfq_log_bfqq(bfqd, bfqq, "handle_burst: marked in burst");
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ goto end;
++ }
++
++ /*
++ * If we get here, then a large-burst state has not yet been
++ * reached, but bfqq is being activated shortly after the last
++ * queue. Then we add bfqq to the burst.
++ */
++ bfq_add_to_burst(bfqd, bfqq);
++end:
++ /*
++ * At this point, bfqq either has been added to the current
++ * burst or has caused the current burst to terminate and a
++ * possible new burst to start. In particular, in the second
++ * case, bfqq has become the first queue in the possible new
++ * burst. In both cases last_ins_in_burst needs to be moved
++ * forward.
++ */
++ bfqd->last_ins_in_burst = jiffies;
++
++}
++
++static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ return entity->budget - entity->service;
++}
++
++/*
++ * If enough samples have been computed, return the current max budget
++ * stored in bfqd, which is dynamically updated according to the
++ * estimated disk peak rate; otherwise return the default max budget
++ */
++static int bfq_max_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < bfq_stats_min_budgets)
++ return bfq_default_max_budget;
++ else
++ return bfqd->bfq_max_budget;
++}
++
++/*
++ * Return min budget, which is a fraction of the current or default
++ * max budget (trying with 1/32)
++ */
++static int bfq_min_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < bfq_stats_min_budgets)
++ return bfq_default_max_budget / 32;
++ else
++ return bfqd->bfq_max_budget / 32;
++}
++
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool compensate,
++ enum bfqq_expiration reason);
++
++/*
++ * The next function, invoked after the input queue bfqq switches from
++ * idle to busy, updates the budget of bfqq. The function also tells
++ * whether the in-service queue should be expired, by returning
++ * true. The purpose of expiring the in-service queue is to give bfqq
++ * the chance to possibly preempt the in-service queue, and the reason
++ * for preempting the in-service queue is to achieve one of the two
++ * goals below.
++ *
++ * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
++ * expired because it has remained idle. In particular, bfqq may have
++ * expired for one of the following two reasons:
++ *
++ * - BFQ_BFQQ_NO_MORE_REQUEST bfqq did not enjoy any device idling and
++ * did not make it to issue a new request before its last request
++ * was served;
++ *
++ * - BFQ_BFQQ_TOO_IDLE bfqq did enjoy device idling, but did not issue
++ * a new request before the expiration of the idling-time.
++ *
++ * Even if bfqq has expired for one of the above reasons, the process
++ * associated with the queue may be however issuing requests greedily,
++ * and thus be sensitive to the bandwidth it receives (bfqq may have
++ * remained idle for other reasons: CPU high load, bfqq not enjoying
++ * idling, I/O throttling somewhere in the path from the process to
++ * the I/O scheduler, ...). But if, after every expiration for one of
++ * the above two reasons, bfqq has to wait for the service of at least
++ * one full budget of another queue before being served again, then
++ * bfqq is likely to get a much lower bandwidth or resource time than
++ * its reserved ones. To address this issue, two countermeasures need
++ * to be taken.
++ *
++ * First, the budget and the timestamps of bfqq need to be updated in
++ * a special way on bfqq reactivation: they need to be updated as if
++ * bfqq did not remain idle and did not expire. In fact, if they are
++ * computed as if bfqq expired and remained idle until reactivation,
++ * then the process associated with bfqq is treated as if, instead of
++ * being greedy, it stopped issuing requests when bfqq remained idle,
++ * and restarts issuing requests only on this reactivation. In other
++ * words, the scheduler does not help the process recover the "service
++ * hole" between bfqq expiration and reactivation. As a consequence,
++ * the process receives a lower bandwidth than its reserved one. In
++ * contrast, to recover this hole, the budget must be updated as if
++ * bfqq was not expired at all before this reactivation, i.e., it must
++ * be set to the value of the remaining budget when bfqq was
++ * expired. Along the same line, timestamps need to be assigned the
++ * value they had the last time bfqq was selected for service, i.e.,
++ * before last expiration. Thus timestamps need to be back-shifted
++ * with respect to their normal computation (see [1] for more details
++ * on this tricky aspect).
++ *
++ * Secondly, to allow the process to recover the hole, the in-service
++ * queue must be expired too, to give bfqq the chance to preempt it
++ * immediately. In fact, if bfqq has to wait for a full budget of the
++ * in-service queue to be completed, then it may become impossible to
++ * let the process recover the hole, even if the back-shifted
++ * timestamps of bfqq are lower than those of the in-service queue. If
++ * this happens for most or all of the holes, then the process may not
++ * receive its reserved bandwidth. In this respect, it is worth noting
++ * that, being the service of outstanding requests unpreemptible, a
++ * little fraction of the holes may however be unrecoverable, thereby
++ * causing a little loss of bandwidth.
++ *
++ * The last important point is detecting whether bfqq does need this
++ * bandwidth recovery. In this respect, the next function deems the
++ * process associated with bfqq greedy, and thus allows it to recover
++ * the hole, if: 1) the process is waiting for the arrival of a new
++ * request (which implies that bfqq expired for one of the above two
++ * reasons), and 2) such a request has arrived soon. The first
++ * condition is controlled through the flag non_blocking_wait_rq,
++ * while the second through the flag arrived_in_time. If both
++ * conditions hold, then the function computes the budget in the
++ * above-described special way, and signals that the in-service queue
++ * should be expired. Timestamp back-shifting is done later in
++ * __bfq_activate_entity.
++ *
++ * 2. Reduce latency. Even if timestamps are not backshifted to let
++ * the process associated with bfqq recover a service hole, bfqq may
++ * however happen to have, after being (re)activated, a lower finish
++ * timestamp than the in-service queue. That is, the next budget of
++ * bfqq may have to be completed before the one of the in-service
++ * queue. If this is the case, then preempting the in-service queue
++ * allows this goal to be achieved, apart from the unpreemptible,
++ * outstanding requests mentioned above.
++ *
++ * Unfortunately, regardless of which of the above two goals one wants
++ * to achieve, service trees need first to be updated to know whether
++ * the in-service queue must be preempted. To have service trees
++ * correctly updated, the in-service queue must be expired and
++ * rescheduled, and bfqq must be scheduled too. This is one of the
++ * most costly operations (in future versions, the scheduling
++ * mechanism may be re-designed in such a way to make it possible to
++ * know whether preemption is needed without needing to update service
++ * trees). In addition, queue preemptions almost always cause random
++ * I/O, and thus loss of throughput. Because of these facts, the next
++ * function adopts the following simple scheme to avoid both costly
++ * operations and too frequent preemptions: it requests the expiration
++ * of the in-service queue (unconditionally) only for queues that need
++ * to recover a hole, or that either are weight-raised or deserve to
++ * be weight-raised.
++ */
++static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool arrived_in_time,
++ bool wr_or_deserves_wr)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
++ /*
++ * We do not clear the flag non_blocking_wait_rq here, as
++ * the latter is used in bfq_activate_bfqq to signal
++ * that timestamps need to be back-shifted (and is
++ * cleared right after).
++ */
++
++ /*
++ * In next assignment we rely on that either
++ * entity->service or entity->budget are not updated
++ * on expiration if bfqq is empty (see
++ * __bfq_bfqq_recalc_budget). Thus both quantities
++ * remain unchanged after such an expiration, and the
++ * following statement therefore assigns to
++ * entity->budget the remaining budget on such an
++ * expiration. For clarity, entity->service is not
++ * updated on expiration in any case, and, in normal
++ * operation, is reset only when bfqq is selected for
++ * service (see bfq_get_next_queue).
++ */
++ BUG_ON(bfqq->max_budget < 0);
++ entity->budget = min_t(unsigned long,
++ bfq_bfqq_budget_left(bfqq),
++ bfqq->max_budget);
++
++ BUG_ON(entity->budget < 0);
++ return true;
++ }
++
++ BUG_ON(bfqq->max_budget < 0);
++ entity->budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(bfqq->next_rq, bfqq));
++ BUG_ON(entity->budget < 0);
++
++ bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
++ return wr_or_deserves_wr;
++}
++
++static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ unsigned int old_wr_coeff,
++ bool wr_or_deserves_wr,
++ bool interactive,
++ bool in_burst,
++ bool soft_rt)
++{
++ if (old_wr_coeff == 1 && wr_or_deserves_wr) {
++ /* start a weight-raising period */
++ if (interactive) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ } else {
++ bfqq->wr_start_at_switch_to_srt = jiffies;
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff *
++ BFQ_SOFTRT_WEIGHT_FACTOR;
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ }
++ /*
++ * If needed, further reduce budget to make sure it is
++ * close to bfqq's backlog, so as to reduce the
++ * scheduling-error component due to a too large
++ * budget. Do not care about throughput consequences,
++ * but only about latency. Finally, do not assign a
++ * too small budget either, to avoid increasing
++ * latency by causing too frequent expirations.
++ */
++ bfqq->entity.budget = min_t(unsigned long,
++ bfqq->entity.budget,
++ 2 * bfq_min_budget(bfqd));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais starting at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ } else if (old_wr_coeff > 1) {
++ if (interactive) { /* update wr coeff and duration */
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ } else if (in_burst) {
++ bfqq->wr_coeff = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais ending at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->
++ wr_cur_max_time));
++ } else if (soft_rt) {
++ /*
++ * The application is now or still meeting the
++ * requirements for being deemed soft rt. We
++ * can then correctly and safely (re)charge
++ * the weight-raising duration for the
++ * application with the weight-raising
++ * duration for soft rt applications.
++ *
++ * In particular, doing this recharge now, i.e.,
++ * before the weight-raising period for the
++ * application finishes, reduces the probability
++ * of the following negative scenario:
++ * 1) the weight of a soft rt application is
++ * raised at startup (as for any newly
++ * created application),
++ * 2) since the application is not interactive,
++ * at a certain time weight-raising is
++ * stopped for the application,
++ * 3) at that time the application happens to
++ * still have pending requests, and hence
++ * is destined to not have a chance to be
++ * deemed soft rt before these requests are
++ * completed (see the comments to the
++ * function bfq_bfqq_softrt_next_start()
++ * for details on soft rt detection),
++ * 4) these pending requests experience a high
++ * latency because the application is not
++ * weight-raised while they are pending.
++ */
++ if (bfqq->wr_cur_max_time !=
++ bfqd->bfq_wr_rt_max_time) {
++ bfqq->wr_start_at_switch_to_srt =
++ bfqq->last_wr_start_finish;
++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff *
++ BFQ_SOFTRT_WEIGHT_FACTOR;
++ bfq_log_bfqq(bfqd, bfqq,
++ "switching to soft_rt wr");
++ } else
++ bfq_log_bfqq(bfqd, bfqq,
++ "moving forward soft_rt wr duration");
++ bfqq->last_wr_start_finish = jiffies;
++ }
++ }
++}
++
++static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ return bfqq->dispatched == 0 &&
++ time_is_before_jiffies(
++ bfqq->budget_timeout +
++ bfqd->bfq_wr_min_idle_time);
++}
++
++static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ int old_wr_coeff,
++ struct request *rq,
++ bool *interactive)
++{
++ bool soft_rt, in_burst, wr_or_deserves_wr,
++ bfqq_wants_to_preempt,
++ idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
++ /*
++ * See the comments on
++ * bfq_bfqq_update_budg_for_activation for
++ * details on the usage of the next variable.
++ */
++ arrived_in_time = ktime_get_ns() <=
++ RQ_BIC(rq)->ttime.last_end_request +
++ bfqd->bfq_slice_idle * 3;
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "bfq_add_request non-busy: "
++ "jiffies %lu, in_time %d, idle_long %d busyw %d "
++ "wr_coeff %u",
++ jiffies, arrived_in_time,
++ idle_for_long_time,
++ bfq_bfqq_non_blocking_wait_rq(bfqq),
++ old_wr_coeff);
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ BUG_ON(bfqq == bfqd->in_service_queue);
++ bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
++
++ /*
++ * bfqq deserves to be weight-raised if:
++ * - it is sync,
++ * - it does not belong to a large burst,
++ * - it has been idle for enough time or is soft real-time,
++ * - is linked to a bfq_io_cq (it is not shared in any sense)
++ */
++ in_burst = bfq_bfqq_in_large_burst(bfqq);
++ soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
++ !in_burst &&
++ time_is_before_jiffies(bfqq->soft_rt_next_start);
++ *interactive =
++ !in_burst &&
++ idle_for_long_time;
++ wr_or_deserves_wr = bfqd->low_latency &&
++ (bfqq->wr_coeff > 1 ||
++ (bfq_bfqq_sync(bfqq) &&
++ bfqq->bic && (*interactive || soft_rt)));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "bfq_add_request: "
++ "in_burst %d, "
++ "soft_rt %d (next %lu), inter %d, bic %p",
++ bfq_bfqq_in_large_burst(bfqq), soft_rt,
++ bfqq->soft_rt_next_start,
++ *interactive,
++ bfqq->bic);
++
++ /*
++ * Using the last flag, update budget and check whether bfqq
++ * may want to preempt the in-service queue.
++ */
++ bfqq_wants_to_preempt =
++ bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
++ arrived_in_time,
++ wr_or_deserves_wr);
++
++ /*
++ * If bfqq happened to be activated in a burst, but has been
++ * idle for much more than an interactive queue, then we
++ * assume that, in the overall I/O initiated in the burst, the
++ * I/O associated with bfqq is finished. So bfqq does not need
++ * to be treated as a queue belonging to a burst
++ * anymore. Accordingly, we reset bfqq's in_large_burst flag
++ * if set, and remove bfqq from the burst list if it's
++ * there. We do not decrement burst_size, because the fact
++ * that bfqq does not need to belong to the burst list any
++ * more does not invalidate the fact that bfqq was created in
++ * a burst.
++ */
++ if (likely(!bfq_bfqq_just_created(bfqq)) &&
++ idle_for_long_time &&
++ time_is_before_jiffies(
++ bfqq->budget_timeout +
++ msecs_to_jiffies(10000))) {
++ hlist_del_init(&bfqq->burst_list_node);
++ bfq_clear_bfqq_in_large_burst(bfqq);
++ }
++
++ bfq_clear_bfqq_just_created(bfqq);
++
++ if (!bfq_bfqq_IO_bound(bfqq)) {
++ if (arrived_in_time) {
++ bfqq->requests_within_timer++;
++ if (bfqq->requests_within_timer >=
++ bfqd->bfq_requests_within_timer)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ } else
++ bfqq->requests_within_timer = 0;
++ bfq_log_bfqq(bfqd, bfqq, "requests in time %d",
++ bfqq->requests_within_timer);
++ }
++
++ if (bfqd->low_latency) {
++ if (unlikely(time_is_after_jiffies(bfqq->split_time)))
++ /* wraparound */
++ bfqq->split_time =
++ jiffies - bfqd->bfq_wr_min_idle_time - 1;
++
++ if (time_is_before_jiffies(bfqq->split_time +
++ bfqd->bfq_wr_min_idle_time)) {
++ bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
++ old_wr_coeff,
++ wr_or_deserves_wr,
++ *interactive,
++ in_burst,
++ soft_rt);
++
++ if (old_wr_coeff != bfqq->wr_coeff)
++ bfqq->entity.prio_changed = 1;
++ }
++ }
++
++ bfqq->last_idle_bklogged = jiffies;
++ bfqq->service_from_backlogged = 0;
++ bfq_clear_bfqq_softrt_update(bfqq);
++
++ bfq_add_bfqq_busy(bfqd, bfqq);
++
++ /*
++ * Expire in-service queue only if preemption may be needed
++ * for guarantees. In this respect, the function
++ * next_queue_may_preempt just checks a simple, necessary
++ * condition, and not a sufficient condition based on
++ * timestamps. In fact, for the latter condition to be
++ * evaluated, timestamps would need first to be updated, and
++ * this operation is quite costly (see the comments on the
++ * function bfq_bfqq_update_budg_for_activation).
++ */
++ if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
++ bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
++ next_queue_may_preempt(bfqd)) {
++ struct bfq_queue *in_serv =
++ bfqd->in_service_queue;
++ BUG_ON(in_serv == bfqq);
++
++ bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
++ false, BFQ_BFQQ_PREEMPTED);
++ }
++}
++
++static void bfq_add_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *next_rq, *prev;
++ unsigned int old_wr_coeff = bfqq->wr_coeff;
++ bool interactive = false;
++
++ bfq_log_bfqq(bfqd, bfqq, "add_request: size %u %s",
++ blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A");
++
++ if (bfqq->wr_coeff > 1) /* queue is being weight-raised */
++ bfq_log_bfqq(bfqd, bfqq,
++ "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqq->wr_coeff,
++ bfqq->entity.weight, bfqq->entity.orig_weight);
++
++ bfqq->queued[rq_is_sync(rq)]++;
++ bfqd->queued++;
++
++ elv_rb_add(&bfqq->sort_list, rq);
++
++ /*
++ * Check if this request is a better next-to-serve candidate.
++ */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
++ BUG_ON(!next_rq);
++ bfqq->next_rq = next_rq;
++
++ /*
++ * Adjust priority tree position, if next_rq changes.
++ */
++ if (prev != bfqq->next_rq)
++ bfq_pos_tree_add_move(bfqd, bfqq);
++
++ if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
++ bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
++ rq, &interactive);
++ else {
++ if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
++ time_is_before_jiffies(
++ bfqq->last_wr_start_finish +
++ bfqd->bfq_wr_min_inter_arr_async)) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "non-idle wrais starting, "
++ "wr_max_time %u wr_busy %d",
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqd->wr_busy_queues);
++ }
++ if (prev != bfqq->next_rq)
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ /*
++ * Assign jiffies to last_wr_start_finish in the following
++ * cases:
++ *
++ * . if bfqq is not going to be weight-raised, because, for
++ * non weight-raised queues, last_wr_start_finish stores the
++ * arrival time of the last request; as of now, this piece
++ * of information is used only for deciding whether to
++ * weight-raise async queues
++ *
++ * . if bfqq is not weight-raised, because, if bfqq is now
++ * switching to weight-raised, then last_wr_start_finish
++ * stores the time when weight-raising starts
++ *
++ * . if bfqq is interactive, because, regardless of whether
++ * bfqq is currently weight-raised, the weight-raising
++ * period must start or restart (this case is considered
++ * separately because it is not detected by the above
++ * conditions, if bfqq is already weight-raised)
++ *
++ * last_wr_start_finish has to be updated also if bfqq is soft
++ * real-time, because the weight-raising period is constantly
++ * restarted on idle-to-busy transitions for these queues, but
++ * this is already done in bfq_bfqq_handle_idle_busy_switch if
++ * needed.
++ */
++ if (bfqd->low_latency &&
++ (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
++ bfqq->last_wr_start_finish = jiffies;
++}
++
++static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
++ struct bio *bio)
++{
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (!bic)
++ return NULL;
++
++ bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
++ if (bfqq)
++ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
++
++ return NULL;
++}
++
++static sector_t get_sdist(sector_t last_pos, struct request *rq)
++{
++ sector_t sdist = 0;
++
++ if (last_pos) {
++ if (last_pos < blk_rq_pos(rq))
++ sdist = blk_rq_pos(rq) - last_pos;
++ else
++ sdist = last_pos - blk_rq_pos(rq);
++ }
++
++ return sdist;
++}
++
++static void bfq_activate_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ bfqd->rq_in_driver++;
++}
++
++static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ BUG_ON(bfqd->rq_in_driver == 0);
++ bfqd->rq_in_driver--;
++}
++
++static void bfq_remove_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ const int sync = rq_is_sync(rq);
++
++ BUG_ON(bfqq->entity.service > bfqq->entity.budget &&
++ bfqq == bfqd->in_service_queue);
++
++ if (bfqq->next_rq == rq) {
++ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ if (rq->queuelist.prev != &rq->queuelist)
++ list_del_init(&rq->queuelist);
++ BUG_ON(bfqq->queued[sync] == 0);
++ bfqq->queued[sync]--;
++ bfqd->queued--;
++ elv_rb_del(&bfqq->sort_list, rq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ bfqq->next_rq = NULL;
++
++ BUG_ON(bfqq->entity.budget < 0);
++
++ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
++ BUG_ON(bfqq->ref < 2); /* referred by rq and on tree */
++ bfq_del_bfqq_busy(bfqd, bfqq, false);
++ /*
++ * bfqq emptied. In normal operation, when
++ * bfqq is empty, bfqq->entity.service and
++ * bfqq->entity.budget must contain,
++ * respectively, the service received and the
++ * budget used last time bfqq emptied. These
++ * facts do not hold in this case, as at least
++ * this last removal occurred while bfqq is
++ * not in service. To avoid inconsistencies,
++ * reset both bfqq->entity.service and
++ * bfqq->entity.budget, if bfqq has still a
++ * process that may issue I/O requests to it.
++ */
++ bfqq->entity.budget = bfqq->entity.service = 0;
++ }
++
++ /*
++ * Remove queue from request-position tree as it is empty.
++ */
++ if (bfqq->pos_root) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++ }
++
++ if (rq->cmd_flags & REQ_META) {
++ BUG_ON(bfqq->meta_pending == 0);
++ bfqq->meta_pending--;
++ }
++ bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
++}
++
++static enum elv_merge bfq_merge(struct request_queue *q, struct request **req,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct request *__rq;
++
++ __rq = bfq_find_rq_fmerge(bfqd, bio);
++ if (__rq && elv_bio_merge_ok(__rq, bio)) {
++ *req = __rq;
++ return ELEVATOR_FRONT_MERGE;
++ }
++
++ return ELEVATOR_NO_MERGE;
++}
++
++static void bfq_merged_request(struct request_queue *q, struct request *req,
++ enum elv_merge type)
++{
++ if (type == ELEVATOR_FRONT_MERGE &&
++ rb_prev(&req->rb_node) &&
++ blk_rq_pos(req) <
++ blk_rq_pos(container_of(rb_prev(&req->rb_node),
++ struct request, rb_node))) {
++ struct bfq_queue *bfqq = RQ_BFQQ(req);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *prev, *next_rq;
++
++ /* Reposition request in its sort_list */
++ elv_rb_del(&bfqq->sort_list, req);
++ elv_rb_add(&bfqq->sort_list, req);
++ /* Choose next request to be served for bfqq */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
++ bfqd->last_position);
++ BUG_ON(!next_rq);
++ bfqq->next_rq = next_rq;
++ /*
++ * If next_rq changes, update both the queue's budget to
++ * fit the new request and the queue's position in its
++ * rq_pos_tree.
++ */
++ if (prev != bfqq->next_rq) {
++ bfq_updated_next_req(bfqd, bfqq);
++ bfq_pos_tree_add_move(bfqd, bfqq);
++ }
++ }
++}
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static void bfq_bio_merged(struct request_queue *q, struct request *req,
++ struct bio *bio)
++{
++ bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_opf);
++}
++#endif
++
++static void bfq_merged_requests(struct request_queue *q, struct request *rq,
++ struct request *next)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
++
++ /*
++ * If next and rq belong to the same bfq_queue and next is older
++ * than rq, then reposition rq in the fifo (by substituting next
++ * with rq). Otherwise, if next and rq belong to different
++ * bfq_queues, never reposition rq: in fact, we would have to
++ * reposition it with respect to next's position in its own fifo,
++ * which would most certainly be too expensive with respect to
++ * the benefits.
++ */
++ if (bfqq == next_bfqq &&
++ !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
++ next->fifo_time < rq->fifo_time) {
++ list_del_init(&rq->queuelist);
++ list_replace_init(&next->queuelist, &rq->queuelist);
++ rq->fifo_time = next->fifo_time;
++ }
++
++ if (bfqq->next_rq == next)
++ bfqq->next_rq = rq;
++
++ bfq_remove_request(next);
++ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
++}
++
++/* Must be called with bfqq != NULL */
++static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
++{
++ BUG_ON(!bfqq);
++
++ if (bfq_bfqq_busy(bfqq)) {
++ bfqq->bfqd->wr_busy_queues--;
++ BUG_ON(bfqq->bfqd->wr_busy_queues < 0);
++ }
++ bfqq->wr_coeff = 1;
++ bfqq->wr_cur_max_time = 0;
++ bfqq->last_wr_start_finish = jiffies;
++ /*
++ * Trigger a weight change on the next invocation of
++ * __bfq_entity_update_weight_prio.
++ */
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "end_wr: wrais ending at %lu, rais_max_time %u",
++ bfqq->last_wr_start_finish,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "end_wr: wr_busy %d",
++ bfqq->bfqd->wr_busy_queues);
++}
++
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ if (bfqg->async_bfqq[i][j])
++ bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
++ if (bfqg->async_idle_bfqq)
++ bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
++}
++
++static void bfq_end_wr(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ bfq_end_wr_async(bfqd);
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++}
++
++static sector_t bfq_io_struct_pos(void *io_struct, bool request)
++{
++ if (request)
++ return blk_rq_pos(io_struct);
++ else
++ return ((struct bio *)io_struct)->bi_iter.bi_sector;
++}
++
++static int bfq_rq_close_to_sector(void *io_struct, bool request,
++ sector_t sector)
++{
++ return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
++ BFQQ_CLOSE_THR;
++}
++
++static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ sector_t sector)
++{
++ struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
++ struct rb_node *parent, *node;
++ struct bfq_queue *__bfqq;
++
++ if (RB_EMPTY_ROOT(root))
++ return NULL;
++
++ /*
++ * First, if we find a request starting at the end of the last
++ * request, choose it.
++ */
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
++ if (__bfqq)
++ return __bfqq;
++
++ /*
++ * If the exact sector wasn't found, the parent of the NULL leaf
++ * will contain the closest sector (rq_pos_tree sorted by
++ * next_request position).
++ */
++ __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ if (blk_rq_pos(__bfqq->next_rq) < sector)
++ node = rb_next(&__bfqq->pos_node);
++ else
++ node = rb_prev(&__bfqq->pos_node);
++ if (!node)
++ return NULL;
++
++ __bfqq = rb_entry(node, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ return NULL;
++}
++
++static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
++ struct bfq_queue *cur_bfqq,
++ sector_t sector)
++{
++ struct bfq_queue *bfqq;
++
++ /*
++ * We shall notice if some of the queues are cooperating,
++ * e.g., working closely on the same area of the device. In
++ * that case, we can group them together and: 1) don't waste
++ * time idling, and 2) serve the union of their requests in
++ * the best possible order for throughput.
++ */
++ bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
++ if (!bfqq || bfqq == cur_bfqq)
++ return NULL;
++
++ return bfqq;
++}
++
++static struct bfq_queue *
++bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ int process_refs, new_process_refs;
++ struct bfq_queue *__bfqq;
++
++ /*
++ * If there are no process references on the new_bfqq, then it is
++ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
++ * may have dropped their last reference (not just their last process
++ * reference).
++ */
++ if (!bfqq_process_refs(new_bfqq))
++ return NULL;
++
++ /* Avoid a circular list and skip interim queue merges. */
++ while ((__bfqq = new_bfqq->new_bfqq)) {
++ if (__bfqq == bfqq)
++ return NULL;
++ new_bfqq = __bfqq;
++ }
++
++ process_refs = bfqq_process_refs(bfqq);
++ new_process_refs = bfqq_process_refs(new_bfqq);
++ /*
++ * If the process for the bfqq has gone away, there is no
++ * sense in merging the queues.
++ */
++ if (process_refs == 0 || new_process_refs == 0)
++ return NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
++ new_bfqq->pid);
++
++ /*
++ * Merging is just a redirection: the requests of the process
++ * owning one of the two queues are redirected to the other queue.
++ * The latter queue, in its turn, is set as shared if this is the
++ * first time that the requests of some process are redirected to
++ * it.
++ *
++ * We redirect bfqq to new_bfqq and not the opposite, because we
++ * are in the context of the process owning bfqq, hence we have
++ * the io_cq of this process. So we can immediately configure this
++ * io_cq to redirect the requests of the process to new_bfqq.
++ *
++ * NOTE, even if new_bfqq coincides with the in-service queue, the
++ * io_cq of new_bfqq is not available, because, if the in-service
++ * queue is shared, bfqd->in_service_bic may not point to the
++ * io_cq of the in-service queue.
++ * Redirecting the requests of the process owning bfqq to the
++ * currently in-service queue is in any case the best option, as
++ * we feed the in-service queue with new requests close to the
++ * last request served and, by doing so, hopefully increase the
++ * throughput.
++ */
++ bfqq->new_bfqq = new_bfqq;
++ new_bfqq->ref += process_refs;
++ return new_bfqq;
++}
++
++static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
++ struct bfq_queue *new_bfqq)
++{
++ if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
++ (bfqq->ioprio_class != new_bfqq->ioprio_class))
++ return false;
++
++ /*
++ * If either of the queues has already been detected as seeky,
++ * then merging it with the other queue is unlikely to lead to
++ * sequential I/O.
++ */
++ if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
++ return false;
++
++ /*
++ * Interleaved I/O is known to be done by (some) applications
++ * only for reads, so it does not make sense to merge async
++ * queues.
++ */
++ if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
++ return false;
++
++ return true;
++}
++
++/*
++ * If this function returns true, then bfqq cannot be merged. The idea
++ * is that true cooperation happens very early after processes start
++ * to do I/O. Usually, late cooperations are just accidental false
++ * positives. In case bfqq is weight-raised, such false positives
++ * would evidently degrade latency guarantees for bfqq.
++ */
++static bool wr_from_too_long(struct bfq_queue *bfqq)
++{
++ return bfqq->wr_coeff > 1 &&
++ time_is_before_jiffies(bfqq->last_wr_start_finish +
++ msecs_to_jiffies(100));
++}
++
++/*
++ * Attempt to schedule a merge of bfqq with the currently in-service
++ * queue or with a close queue among the scheduled queues. Return
++ * NULL if no merge was scheduled, a pointer to the shared bfq_queue
++ * structure otherwise.
++ *
++ * The OOM queue is not allowed to participate to cooperation: in fact, since
++ * the requests temporarily redirected to the OOM queue could be redirected
++ * again to dedicated queues at any time, the state needed to correctly
++ * handle merging with the OOM queue would be quite complex and expensive
++ * to maintain. Besides, in such a critical condition as an out of memory,
++ * the benefits of queue merging may be little relevant, or even negligible.
++ *
++ * Weight-raised queues can be merged only if their weight-raising
++ * period has just started. In fact cooperating processes are usually
++ * started together. Thus, with this filter we avoid false positives
++ * that would jeopardize low-latency guarantees.
++ *
++ * WARNING: queue merging may impair fairness among non-weight raised
++ * queues, for at least two reasons: 1) the original weight of a
++ * merged queue may change during the merged state, 2) even being the
++ * weight the same, a merged queue may be bloated with many more
++ * requests than the ones produced by its originally-associated
++ * process.
++ */
++static struct bfq_queue *
++bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ void *io_struct, bool request)
++{
++ struct bfq_queue *in_service_bfqq, *new_bfqq;
++
++ if (bfqq->new_bfqq)
++ return bfqq->new_bfqq;
++
++ if (io_struct && wr_from_too_long(bfqq) &&
++ likely(bfqq != &bfqd->oom_bfqq))
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have looked for coop, but bfq%d wr",
++ bfqq->pid);
++
++ if (!io_struct ||
++ wr_from_too_long(bfqq) ||
++ unlikely(bfqq == &bfqd->oom_bfqq))
++ return NULL;
++
++ /* If there is only one backlogged queue, don't search. */
++ if (bfqd->busy_queues == 1)
++ return NULL;
++
++ in_service_bfqq = bfqd->in_service_queue;
++
++ if (in_service_bfqq && in_service_bfqq != bfqq &&
++ bfqd->in_service_bic && wr_from_too_long(in_service_bfqq)
++ && likely(in_service_bfqq == &bfqd->oom_bfqq))
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have tried merge with in-service-queue, but wr");
++
++ if (!in_service_bfqq || in_service_bfqq == bfqq ||
++ !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) ||
++ unlikely(in_service_bfqq == &bfqd->oom_bfqq))
++ goto check_scheduled;
++
++ if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++ bfqq->entity.parent == in_service_bfqq->entity.parent &&
++ bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
++ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
++ if (new_bfqq)
++ return new_bfqq;
++ }
++ /*
++ * Check whether there is a cooperator among currently scheduled
++ * queues. The only thing we need is that the bio/request is not
++ * NULL, as we need it to establish whether a cooperator exists.
++ */
++check_scheduled:
++ new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
++ bfq_io_struct_pos(io_struct, request));
++
++ BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent);
++
++ if (new_bfqq && wr_from_too_long(new_bfqq) &&
++ likely(new_bfqq != &bfqd->oom_bfqq) &&
++ bfq_may_be_close_cooperator(bfqq, new_bfqq))
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have merged with bfq%d, but wr",
++ new_bfqq->pid);
++
++ if (new_bfqq && !wr_from_too_long(new_bfqq) &&
++ likely(new_bfqq != &bfqd->oom_bfqq) &&
++ bfq_may_be_close_cooperator(bfqq, new_bfqq))
++ return bfq_setup_merge(bfqq, new_bfqq);
++
++ return NULL;
++}
++
++static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
++{
++ struct bfq_io_cq *bic = bfqq->bic;
++
++ /*
++ * If !bfqq->bic, the queue is already shared or its requests
++ * have already been redirected to a shared queue; both idle window
++ * and weight raising state have already been saved. Do nothing.
++ */
++ if (!bic)
++ return;
++
++ bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
++ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
++ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
++ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
++ bic->saved_wr_coeff = bfqq->wr_coeff;
++ bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
++ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
++ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++}
++
++static void bfq_get_bic_reference(struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq->bic has a non-NULL value, the bic to which it belongs
++ * is about to begin using a shared bfq_queue.
++ */
++ if (bfqq->bic)
++ atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
++}
++
++static void
++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
++ (unsigned long) new_bfqq->pid);
++ /* Save weight raising and idle window of the merged queues */
++ bfq_bfqq_save_state(bfqq);
++ bfq_bfqq_save_state(new_bfqq);
++ if (bfq_bfqq_IO_bound(bfqq))
++ bfq_mark_bfqq_IO_bound(new_bfqq);
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ /*
++ * If bfqq is weight-raised, then let new_bfqq inherit
++ * weight-raising. To reduce false positives, neglect the case
++ * where bfqq has just been created, but has not yet made it
++ * to be weight-raised (which may happen because EQM may merge
++ * bfqq even before bfq_add_request is executed for the first
++ * time for bfqq). Handling this case would however be very
++ * easy, thanks to the flag just_created.
++ */
++ if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
++ new_bfqq->wr_coeff = bfqq->wr_coeff;
++ new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
++ new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
++ new_bfqq->wr_start_at_switch_to_srt =
++ bfqq->wr_start_at_switch_to_srt;
++ if (bfq_bfqq_busy(new_bfqq)) {
++ bfqd->wr_busy_queues++;
++ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues);
++ }
++
++ new_bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqd, new_bfqq,
++ "wr start after merge with %d, rais_max_time %u",
++ bfqq->pid,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
++ bfqq->wr_coeff = 1;
++ bfqq->entity.prio_changed = 1;
++ if (bfq_bfqq_busy(bfqq)) {
++ bfqd->wr_busy_queues--;
++ BUG_ON(bfqd->wr_busy_queues < 0);
++ }
++
++ }
++
++ bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
++ bfqd->wr_busy_queues);
++
++ /*
++ * Grab a reference to the bic, to prevent it from being destroyed
++ * before being possibly touched by a bfq_split_bfqq().
++ */
++ bfq_get_bic_reference(bfqq);
++ bfq_get_bic_reference(new_bfqq);
++ /*
++ * Merge queues (that is, let bic redirect its requests to new_bfqq)
++ */
++ bic_set_bfqq(bic, new_bfqq, 1);
++ bfq_mark_bfqq_coop(new_bfqq);
++ /*
++ * new_bfqq now belongs to at least two bics (it is a shared queue):
++ * set new_bfqq->bic to NULL. bfqq either:
++ * - does not belong to any bic any more, and hence bfqq->bic must
++ * be set to NULL, or
++ * - is a queue whose owning bics have already been redirected to a
++ * different queue, hence the queue is destined to not belong to
++ * any bic soon and bfqq->bic is already NULL (therefore the next
++ * assignment causes no harm).
++ */
++ new_bfqq->bic = NULL;
++ bfqq->bic = NULL;
++ /* release process reference to bfqq */
++ bfq_put_queue(bfqq);
++}
++
++static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ bool is_sync = op_is_sync(bio->bi_opf);
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq, *new_bfqq;
++
++ /*
++ * Disallow merge of a sync bio into an async request.
++ */
++ if (is_sync && !rq_is_sync(rq))
++ return false;
++
++ /*
++ * Lookup the bfqq that this bio will be queued with. Allow
++ * merge only if rq is queued there.
++ * Queue lock is held here.
++ */
++ bic = bfq_bic_lookup(bfqd, current->io_context);
++ if (!bic)
++ return false;
++
++ bfqq = bic_to_bfqq(bic, is_sync);
++ /*
++ * We take advantage of this function to perform an early merge
++ * of the queues of possible cooperating processes.
++ */
++ if (bfqq) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
++ if (new_bfqq) {
++ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
++ /*
++ * If we get here, the bio will be queued in the
++ * shared queue, i.e., new_bfqq, so use new_bfqq
++ * to decide whether bio and rq can be merged.
++ */
++ bfqq = new_bfqq;
++ }
++ }
++
++ return bfqq == RQ_BFQQ(rq);
++}
++
++static int bfq_allow_rq_merge(struct request_queue *q, struct request *rq,
++ struct request *next)
++{
++ return RQ_BFQQ(rq) == RQ_BFQQ(next);
++}
++
++/*
++ * Set the maximum time for the in-service queue to consume its
++ * budget. This prevents seeky processes from lowering the throughput.
++ * In practice, a time-slice service scheme is used with seeky
++ * processes.
++ */
++static void bfq_set_budget_timeout(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ unsigned int timeout_coeff;
++
++ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
++ timeout_coeff = 1;
++ else
++ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
++
++ bfqd->last_budget_start = ktime_get();
++
++ bfqq->budget_timeout = jiffies +
++ bfqd->bfq_timeout * timeout_coeff;
++
++ bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++ jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff));
++}
++
++static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ if (bfqq) {
++ bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
++ bfq_mark_bfqq_must_alloc(bfqq);
++ bfq_clear_bfqq_fifo_expire(bfqq);
++
++ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
++
++ BUG_ON(bfqq == bfqd->in_service_queue);
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
++ bfqq->wr_coeff > 1 &&
++ bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
++ time_is_before_jiffies(bfqq->budget_timeout)) {
++ /*
++ * For soft real-time queues, move the start
++ * of the weight-raising period forward by the
++ * time the queue has not received any
++ * service. Otherwise, a relatively long
++ * service delay is likely to cause the
++ * weight-raising period of the queue to end,
++ * because of the short duration of the
++ * weight-raising period of a soft real-time
++ * queue. It is worth noting that this move
++ * is not so dangerous for the other queues,
++ * because soft real-time queues are not
++ * greedy.
++ *
++ * To not add a further variable, we use the
++ * overloaded field budget_timeout to
++ * determine for how long the queue has not
++ * received service, i.e., how much time has
++ * elapsed since the queue expired. However,
++ * this is a little imprecise, because
++ * budget_timeout is set to jiffies if bfqq
++ * not only expires, but also remains with no
++ * request.
++ */
++ if (time_after(bfqq->budget_timeout,
++ bfqq->last_wr_start_finish))
++ bfqq->last_wr_start_finish +=
++ jiffies - bfqq->budget_timeout;
++ else
++ bfqq->last_wr_start_finish = jiffies;
++
++ if (time_is_after_jiffies(bfqq->last_wr_start_finish)) {
++ pr_crit(
++ "BFQ WARNING:last %lu budget %lu jiffies %lu",
++ bfqq->last_wr_start_finish,
++ bfqq->budget_timeout,
++ jiffies);
++ pr_crit("diff %lu", jiffies -
++ max_t(unsigned long,
++ bfqq->last_wr_start_finish,
++ bfqq->budget_timeout));
++ bfqq->last_wr_start_finish = jiffies;
++ }
++ }
++
++ bfq_set_budget_timeout(bfqd, bfqq);
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_in_service_queue, cur-budget = %d",
++ bfqq->entity.budget);
++ } else
++ bfq_log(bfqd, "set_in_service_queue: NULL");
++
++ bfqd->in_service_queue = bfqq;
++}
++
++/*
++ * Get and set a new queue for service.
++ */
++static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
++
++ __bfq_set_in_service_queue(bfqd, bfqq);
++ return bfqq;
++}
++
++static void bfq_arm_slice_timer(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfqd->in_service_queue;
++ struct bfq_io_cq *bic;
++ u32 sl;
++
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ /* Processes have exited, don't wait. */
++ bic = bfqd->in_service_bic;
++ if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0)
++ return;
++
++ bfq_mark_bfqq_wait_request(bfqq);
++
++ /*
++ * We don't want to idle for seeks, but we do want to allow
++ * fair distribution of slice time for a process doing back-to-back
++ * seeks. So allow a little bit of time for him to submit a new rq.
++ *
++ * To prevent processes with (partly) seeky workloads from
++ * being too ill-treated, grant them a small fraction of the
++ * assigned budget before reducing the waiting time to
++ * BFQ_MIN_TT. This happened to help reduce latency.
++ */
++ sl = bfqd->bfq_slice_idle;
++ /*
++ * Unless the queue is being weight-raised or the scenario is
++ * asymmetric, grant only minimum idle time if the queue
++ * is seeky. A long idling is preserved for a weight-raised
++ * queue, or, more in general, in an asymemtric scenario,
++ * because a long idling is needed for guaranteeing to a queue
++ * its reserved share of the throughput (in particular, it is
++ * needed if the queue has a higher weight than some other
++ * queue).
++ */
++ if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
++ bfq_symmetric_scenario(bfqd))
++ sl = min_t(u32, sl, BFQ_MIN_TT);
++
++ bfqd->last_idling_start = ktime_get();
++ hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
++ HRTIMER_MODE_REL);
++ bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
++ bfq_log(bfqd, "arm idle: %ld/%ld ms",
++ sl / NSEC_PER_MSEC, bfqd->bfq_slice_idle / NSEC_PER_MSEC);
++}
++
++/*
++ * In autotuning mode, max_budget is dynamically recomputed as the
++ * amount of sectors transferred in timeout at the estimated peak
++ * rate. This enables BFQ to utilize a full timeslice with a full
++ * budget, even if the in-service queue is served at peak rate. And
++ * this maximises throughput with sequential workloads.
++ */
++static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
++{
++ return (u64)bfqd->peak_rate * USEC_PER_MSEC *
++ jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
++}
++
++/*
++ * Update parameters related to throughput and responsiveness, as a
++ * function of the estimated peak rate. See comments on
++ * bfq_calc_max_budget(), and on T_slow and T_fast arrays.
++ */
++static void update_thr_responsiveness_params(struct bfq_data *bfqd)
++{
++ int dev_type = blk_queue_nonrot(bfqd->queue);
++
++ if (bfqd->bfq_user_max_budget == 0) {
++ bfqd->bfq_max_budget =
++ bfq_calc_max_budget(bfqd);
++ BUG_ON(bfqd->bfq_max_budget < 0);
++ bfq_log(bfqd, "new max_budget = %d",
++ bfqd->bfq_max_budget);
++ }
++
++ if (bfqd->device_speed == BFQ_BFQD_FAST &&
++ bfqd->peak_rate < device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_SLOW;
++ bfqd->RT_prod = R_slow[dev_type] *
++ T_slow[dev_type];
++ } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
++ bfqd->peak_rate > device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_FAST;
++ bfqd->RT_prod = R_fast[dev_type] *
++ T_fast[dev_type];
++ }
++
++ bfq_log(bfqd,
++"dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec",
++ dev_type == 0 ? "ROT" : "NONROT",
++ bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW",
++ bfqd->device_speed == BFQ_BFQD_FAST ?
++ (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT :
++ (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT,
++ (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>>
++ BFQ_RATE_SHIFT);
++}
++
++static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq)
++{
++ if (rq != NULL) { /* new rq dispatch now, reset accordingly */
++ bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns() ;
++ bfqd->peak_rate_samples = 1;
++ bfqd->sequential_samples = 0;
++ bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
++ blk_rq_sectors(rq);
++ } else /* no new rq dispatched, just reset the number of samples */
++ bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
++
++ bfq_log(bfqd,
++ "reset_rate_computation at end, sample %u/%u tot_sects %llu",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ bfqd->tot_sectors_dispatched);
++}
++
++static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
++{
++ u32 rate, weight, divisor;
++
++ /*
++ * For the convergence property to hold (see comments on
++ * bfq_update_peak_rate()) and for the assessment to be
++ * reliable, a minimum number of samples must be present, and
++ * a minimum amount of time must have elapsed. If not so, do
++ * not compute new rate. Just reset parameters, to get ready
++ * for a new evaluation attempt.
++ */
++ if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
++ bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) {
++ bfq_log(bfqd,
++ "update_rate_reset: only resetting, delta_first %lluus samples %d",
++ bfqd->delta_from_first>>10, bfqd->peak_rate_samples);
++ goto reset_computation;
++ }
++
++ /*
++ * If a new request completion has occurred after last
++ * dispatch, then, to approximate the rate at which requests
++ * have been served by the device, it is more precise to
++ * extend the observation interval to the last completion.
++ */
++ bfqd->delta_from_first =
++ max_t(u64, bfqd->delta_from_first,
++ bfqd->last_completion - bfqd->first_dispatch);
++
++ BUG_ON(bfqd->delta_from_first == 0);
++ /*
++ * Rate computed in sects/usec, and not sects/nsec, for
++ * precision issues.
++ */
++ rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
++ div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
++
++ bfq_log(bfqd,
++"update_rate_reset: tot_sects %llu delta_first %lluus rate %llu sects/s (%d)",
++ bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10,
++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
++ rate > 20<<BFQ_RATE_SHIFT);
++
++ /*
++ * Peak rate not updated if:
++ * - the percentage of sequential dispatches is below 3/4 of the
++ * total, and rate is below the current estimated peak rate
++ * - rate is unreasonably high (> 20M sectors/sec)
++ */
++ if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
++ rate <= bfqd->peak_rate) ||
++ rate > 20<<BFQ_RATE_SHIFT) {
++ bfq_log(bfqd,
++ "update_rate_reset: goto reset, samples %u/%u rate/peak %llu/%llu",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
++ goto reset_computation;
++ } else {
++ bfq_log(bfqd,
++ "update_rate_reset: do update, samples %u/%u rate/peak %llu/%llu",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
++ }
++
++ /*
++ * We have to update the peak rate, at last! To this purpose,
++ * we use a low-pass filter. We compute the smoothing constant
++ * of the filter as a function of the 'weight' of the new
++ * measured rate.
++ *
++ * As can be seen in next formulas, we define this weight as a
++ * quantity proportional to how sequential the workload is,
++ * and to how long the observation time interval is.
++ *
++ * The weight runs from 0 to 8. The maximum value of the
++ * weight, 8, yields the minimum value for the smoothing
++ * constant. At this minimum value for the smoothing constant,
++ * the measured rate contributes for half of the next value of
++ * the estimated peak rate.
++ *
++ * So, the first step is to compute the weight as a function
++ * of how sequential the workload is. Note that the weight
++ * cannot reach 9, because bfqd->sequential_samples cannot
++ * become equal to bfqd->peak_rate_samples, which, in its
++ * turn, holds true because bfqd->sequential_samples is not
++ * incremented for the first sample.
++ */
++ weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
++
++ /*
++ * Second step: further refine the weight as a function of the
++ * duration of the observation interval.
++ */
++ weight = min_t(u32, 8,
++ div_u64(weight * bfqd->delta_from_first,
++ BFQ_RATE_REF_INTERVAL));
++
++ /*
++ * Divisor ranging from 10, for minimum weight, to 2, for
++ * maximum weight.
++ */
++ divisor = 10 - weight;
++ BUG_ON(divisor == 0);
++
++ /*
++ * Finally, update peak rate:
++ *
++ * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
++ */
++ bfqd->peak_rate *= divisor-1;
++ bfqd->peak_rate /= divisor;
++ rate /= divisor; /* smoothing constant alpha = 1/divisor */
++
++ bfq_log(bfqd,
++ "update_rate_reset: divisor %d tmp_peak_rate %llu tmp_rate %u",
++ divisor,
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT),
++ (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT));
++
++ BUG_ON(bfqd->peak_rate == 0);
++ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
++
++ bfqd->peak_rate += rate;
++ update_thr_responsiveness_params(bfqd);
++ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
++
++reset_computation:
++ bfq_reset_rate_computation(bfqd, rq);
++}
++
++/*
++ * Update the read/write peak rate (the main quantity used for
++ * auto-tuning, see update_thr_responsiveness_params()).
++ *
++ * It is not trivial to estimate the peak rate (correctly): because of
++ * the presence of sw and hw queues between the scheduler and the
++ * device components that finally serve I/O requests, it is hard to
++ * say exactly when a given dispatched request is served inside the
++ * device, and for how long. As a consequence, it is hard to know
++ * precisely at what rate a given set of requests is actually served
++ * by the device.
++ *
++ * On the opposite end, the dispatch time of any request is trivially
++ * available, and, from this piece of information, the "dispatch rate"
++ * of requests can be immediately computed. So, the idea in the next
++ * function is to use what is known, namely request dispatch times
++ * (plus, when useful, request completion times), to estimate what is
++ * unknown, namely in-device request service rate.
++ *
++ * The main issue is that, because of the above facts, the rate at
++ * which a certain set of requests is dispatched over a certain time
++ * interval can vary greatly with respect to the rate at which the
++ * same requests are then served. But, since the size of any
++ * intermediate queue is limited, and the service scheme is lossless
++ * (no request is silently dropped), the following obvious convergence
++ * property holds: the number of requests dispatched MUST become
++ * closer and closer to the number of requests completed as the
++ * observation interval grows. This is the key property used in
++ * the next function to estimate the peak service rate as a function
++ * of the observed dispatch rate. The function assumes to be invoked
++ * on every request dispatch.
++ */
++static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
++{
++ u64 now_ns = ktime_get_ns();
++
++ if (bfqd->peak_rate_samples == 0) { /* first dispatch */
++ bfq_log(bfqd,
++ "update_peak_rate: goto reset, samples %d",
++ bfqd->peak_rate_samples) ;
++ bfq_reset_rate_computation(bfqd, rq);
++ goto update_last_values; /* will add one sample */
++ }
++
++ /*
++ * Device idle for very long: the observation interval lasting
++ * up to this dispatch cannot be a valid observation interval
++ * for computing a new peak rate (similarly to the late-
++ * completion event in bfq_completed_request()). Go to
++ * update_rate_and_reset to have the following three steps
++ * taken:
++ * - close the observation interval at the last (previous)
++ * request dispatch or completion
++ * - compute rate, if possible, for that observation interval
++ * - start a new observation interval with this dispatch
++ */
++ if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
++ bfqd->rq_in_driver == 0) {
++ bfq_log(bfqd,
++"update_peak_rate: jumping to updating&resetting delta_last %lluus samples %d",
++ (now_ns - bfqd->last_dispatch)>>10,
++ bfqd->peak_rate_samples) ;
++ goto update_rate_and_reset;
++ }
++
++ /* Update sampling information */
++ bfqd->peak_rate_samples++;
++
++ if ((bfqd->rq_in_driver > 0 ||
++ now_ns - bfqd->last_completion < BFQ_MIN_TT)
++ && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
++ bfqd->sequential_samples++;
++
++ bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
++
++ /* Reset max observed rq size every 32 dispatches */
++ if (likely(bfqd->peak_rate_samples % 32))
++ bfqd->last_rq_max_size =
++ max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
++ else
++ bfqd->last_rq_max_size = blk_rq_sectors(rq);
++
++ bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
++
++ bfq_log(bfqd,
++ "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus",
++ bfqd->peak_rate_samples, bfqd->sequential_samples,
++ bfqd->tot_sectors_dispatched,
++ bfqd->delta_from_first>>10);
++
++ /* Target observation interval not yet reached, go on sampling */
++ if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
++ goto update_last_values;
++
++update_rate_and_reset:
++ bfq_update_rate_reset(bfqd, rq);
++update_last_values:
++ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
++ bfqd->last_dispatch = now_ns;
++
++ bfq_log(bfqd,
++ "update_peak_rate: delta_first %lluus last_pos %llu peak_rate %llu",
++ (now_ns - bfqd->first_dispatch)>>10,
++ (unsigned long long) bfqd->last_position,
++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
++ bfq_log(bfqd,
++ "update_peak_rate: samples at end %d", bfqd->peak_rate_samples);
++}
++
++/*
++ * Move request from internal lists to the dispatch list of the request queue
++ */
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ /*
++ * For consistency, the next instruction should have been executed
++ * after removing the request from the queue and dispatching it.
++ * We execute instead this instruction before bfq_remove_request()
++ * (and hence introduce a temporary inconsistency), for efficiency.
++ * In fact, in a forced_dispatch, this prevents two counters related
++ * to bfqq->dispatched to risk to be uselessly decremented if bfqq
++ * is not in service, and then to be incremented again after
++ * incrementing bfqq->dispatched.
++ */
++ bfqq->dispatched++;
++ bfq_update_peak_rate(q->elevator->elevator_data, rq);
++
++ bfq_remove_request(rq);
++ elv_dispatch_sort(q, rq);
++}
++
++static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ /*
++ * If this bfqq is shared between multiple processes, check
++ * to make sure that those processes are still issuing I/Os
++ * within the mean seek distance. If not, it may be time to
++ * break the queues apart again.
++ */
++ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
++ bfq_mark_bfqq_split_coop(bfqq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ if (bfqq->dispatched == 0)
++ /*
++ * Overloading budget_timeout field to store
++ * the time at which the queue remains with no
++ * backlog and no outstanding request; used by
++ * the weight-raising mechanism.
++ */
++ bfqq->budget_timeout = jiffies;
++
++ bfq_del_bfqq_busy(bfqd, bfqq, true);
++ } else {
++ bfq_requeue_bfqq(bfqd, bfqq);
++ /*
++ * Resort priority tree of potential close cooperators.
++ */
++ bfq_pos_tree_add_move(bfqd, bfqq);
++ }
++
++ /*
++ * All in-service entities must have been properly deactivated
++ * or requeued before executing the next function, which
++ * resets all in-service entites as no more in service.
++ */
++ __bfq_bfqd_reset_in_service(bfqd);
++}
++
++/**
++ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
++ * @bfqd: device data.
++ * @bfqq: queue to update.
++ * @reason: reason for expiration.
++ *
++ * Handle the feedback on @bfqq budget at queue expiration.
++ * See the body for detailed comments.
++ */
++static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ enum bfqq_expiration reason)
++{
++ struct request *next_rq;
++ int budget, min_budget;
++
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ min_budget = bfq_min_budget(bfqd);
++
++ if (bfqq->wr_coeff == 1)
++ budget = bfqq->max_budget;
++ else /*
++ * Use a constant, low budget for weight-raised queues,
++ * to help achieve a low latency. Keep it slightly higher
++ * than the minimum possible budget, to cause a little
++ * bit fewer expirations.
++ */
++ budget = 2 * min_budget;
++
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
++ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
++ budget, bfq_min_budget(bfqd));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
++
++ if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
++ switch (reason) {
++ /*
++ * Caveat: in all the following cases we trade latency
++ * for throughput.
++ */
++ case BFQ_BFQQ_TOO_IDLE:
++ /*
++ * This is the only case where we may reduce
++ * the budget: if there is no request of the
++ * process still waiting for completion, then
++ * we assume (tentatively) that the timer has
++ * expired because the batch of requests of
++ * the process could have been served with a
++ * smaller budget. Hence, betting that
++ * process will behave in the same way when it
++ * becomes backlogged again, we reduce its
++ * next budget. As long as we guess right,
++ * this budget cut reduces the latency
++ * experienced by the process.
++ *
++ * However, if there are still outstanding
++ * requests, then the process may have not yet
++ * issued its next request just because it is
++ * still waiting for the completion of some of
++ * the still outstanding ones. So in this
++ * subcase we do not reduce its budget, on the
++ * contrary we increase it to possibly boost
++ * the throughput, as discussed in the
++ * comments to the BUDGET_TIMEOUT case.
++ */
++ if (bfqq->dispatched > 0) /* still outstanding reqs */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ else {
++ if (budget > 5 * min_budget)
++ budget -= 4 * min_budget;
++ else
++ budget = min_budget;
++ }
++ break;
++ case BFQ_BFQQ_BUDGET_TIMEOUT:
++ /*
++ * We double the budget here because it gives
++ * the chance to boost the throughput if this
++ * is not a seeky process (and has bumped into
++ * this timeout because of, e.g., ZBR).
++ */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_BUDGET_EXHAUSTED:
++ /*
++ * The process still has backlog, and did not
++ * let either the budget timeout or the disk
++ * idling timeout expire. Hence it is not
++ * seeky, has a short thinktime and may be
++ * happy with a higher budget too. So
++ * definitely increase the budget of this good
++ * candidate to boost the disk throughput.
++ */
++ budget = min(budget * 4, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_NO_MORE_REQUESTS:
++ /*
++ * For queues that expire for this reason, it
++ * is particularly important to keep the
++ * budget close to the actual service they
++ * need. Doing so reduces the timestamp
++ * misalignment problem described in the
++ * comments in the body of
++ * __bfq_activate_entity. In fact, suppose
++ * that a queue systematically expires for
++ * BFQ_BFQQ_NO_MORE_REQUESTS and presents a
++ * new request in time to enjoy timestamp
++ * back-shifting. The larger the budget of the
++ * queue is with respect to the service the
++ * queue actually requests in each service
++ * slot, the more times the queue can be
++ * reactivated with the same virtual finish
++ * time. It follows that, even if this finish
++ * time is pushed to the system virtual time
++ * to reduce the consequent timestamp
++ * misalignment, the queue unjustly enjoys for
++ * many re-activations a lower finish time
++ * than all newly activated queues.
++ *
++ * The service needed by bfqq is measured
++ * quite precisely by bfqq->entity.service.
++ * Since bfqq does not enjoy device idling,
++ * bfqq->entity.service is equal to the number
++ * of sectors that the process associated with
++ * bfqq requested to read/write before waiting
++ * for request completions, or blocking for
++ * other reasons.
++ */
++ budget = max_t(int, bfqq->entity.service, min_budget);
++ break;
++ default:
++ return;
++ }
++ } else if (!bfq_bfqq_sync(bfqq))
++ /*
++ * Async queues get always the maximum possible
++ * budget, as for them we do not care about latency
++ * (in addition, their ability to dispatch is limited
++ * by the charging factor).
++ */
++ budget = bfqd->bfq_max_budget;
++
++ bfqq->max_budget = budget;
++
++ if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
++ !bfqd->bfq_user_max_budget)
++ bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
++
++ /*
++ * If there is still backlog, then assign a new budget, making
++ * sure that it is large enough for the next request. Since
++ * the finish time of bfqq must be kept in sync with the
++ * budget, be sure to call __bfq_bfqq_expire() *after* this
++ * update.
++ *
++ * If there is no backlog, then no need to update the budget;
++ * it will be updated on the arrival of a new request.
++ */
++ next_rq = bfqq->next_rq;
++ if (next_rq) {
++ BUG_ON(reason == BFQ_BFQQ_TOO_IDLE ||
++ reason == BFQ_BFQQ_NO_MORE_REQUESTS);
++ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ BUG_ON(!bfq_bfqq_busy(bfqq));
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
++ next_rq ? blk_rq_sectors(next_rq) : 0,
++ bfqq->entity.budget);
++}
++
++/*
++ * Return true if the process associated with bfqq is "slow". The slow
++ * flag is used, in addition to the budget timeout, to reduce the
++ * amount of service provided to seeky processes, and thus reduce
++ * their chances to lower the throughput. More details in the comments
++ * on the function bfq_bfqq_expire().
++ *
++ * An important observation is in order: as discussed in the comments
++ * on the function bfq_update_peak_rate(), with devices with internal
++ * queues, it is hard if ever possible to know when and for how long
++ * an I/O request is processed by the device (apart from the trivial
++ * I/O pattern where a new request is dispatched only after the
++ * previous one has been completed). This makes it hard to evaluate
++ * the real rate at which the I/O requests of each bfq_queue are
++ * served. In fact, for an I/O scheduler like BFQ, serving a
++ * bfq_queue means just dispatching its requests during its service
++ * slot (i.e., until the budget of the queue is exhausted, or the
++ * queue remains idle, or, finally, a timeout fires). But, during the
++ * service slot of a bfq_queue, around 100 ms at most, the device may
++ * be even still processing requests of bfq_queues served in previous
++ * service slots. On the opposite end, the requests of the in-service
++ * bfq_queue may be completed after the service slot of the queue
++ * finishes.
++ *
++ * Anyway, unless more sophisticated solutions are used
++ * (where possible), the sum of the sizes of the requests dispatched
++ * during the service slot of a bfq_queue is probably the only
++ * approximation available for the service received by the bfq_queue
++ * during its service slot. And this sum is the quantity used in this
++ * function to evaluate the I/O speed of a process.
++ */
++static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ bool compensate, enum bfqq_expiration reason,
++ unsigned long *delta_ms)
++{
++ ktime_t delta_ktime;
++ u32 delta_usecs;
++ bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
++
++ if (!bfq_bfqq_sync(bfqq))
++ return false;
++
++ if (compensate)
++ delta_ktime = bfqd->last_idling_start;
++ else
++ delta_ktime = ktime_get();
++ delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
++ delta_usecs = ktime_to_us(delta_ktime);
++
++ /* don't use too short time intervals */
++ if (delta_usecs < 1000) {
++ if (blk_queue_nonrot(bfqd->queue))
++ /*
++ * give same worst-case guarantees as idling
++ * for seeky
++ */
++ *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
++ else /* charge at least one seek */
++ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
++
++ bfq_log(bfqd, "bfq_bfqq_is_slow: too short %u", delta_usecs);
++
++ return slow;
++ }
++
++ *delta_ms = delta_usecs / USEC_PER_MSEC;
++
++ /*
++ * Use only long (> 20ms) intervals to filter out excessive
++ * spikes in service rate estimation.
++ */
++ if (delta_usecs > 20000) {
++ /*
++ * Caveat for rotational devices: processes doing I/O
++ * in the slower disk zones tend to be slow(er) even
++ * if not seeky. In this respect, the estimated peak
++ * rate is likely to be an average over the disk
++ * surface. Accordingly, to not be too harsh with
++ * unlucky processes, a process is deemed slow only if
++ * its rate has been lower than half of the estimated
++ * peak rate.
++ */
++ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
++ bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d",
++ bfqq->entity.service, bfqd->bfq_max_budget);
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
++
++ return slow;
++}
++
++/*
++ * To be deemed as soft real-time, an application must meet two
++ * requirements. First, the application must not require an average
++ * bandwidth higher than the approximate bandwidth required to playback or
++ * record a compressed high-definition video.
++ * The next function is invoked on the completion of the last request of a
++ * batch, to compute the next-start time instant, soft_rt_next_start, such
++ * that, if the next request of the application does not arrive before
++ * soft_rt_next_start, then the above requirement on the bandwidth is met.
++ *
++ * The second requirement is that the request pattern of the application is
++ * isochronous, i.e., that, after issuing a request or a batch of requests,
++ * the application stops issuing new requests until all its pending requests
++ * have been completed. After that, the application may issue a new batch,
++ * and so on.
++ * For this reason the next function is invoked to compute
++ * soft_rt_next_start only for applications that meet this requirement,
++ * whereas soft_rt_next_start is set to infinity for applications that do
++ * not.
++ *
++ * Unfortunately, even a greedy application may happen to behave in an
++ * isochronous way if the CPU load is high. In fact, the application may
++ * stop issuing requests while the CPUs are busy serving other processes,
++ * then restart, then stop again for a while, and so on. In addition, if
++ * the disk achieves a low enough throughput with the request pattern
++ * issued by the application (e.g., because the request pattern is random
++ * and/or the device is slow), then the application may meet the above
++ * bandwidth requirement too. To prevent such a greedy application to be
++ * deemed as soft real-time, a further rule is used in the computation of
++ * soft_rt_next_start: soft_rt_next_start must be higher than the current
++ * time plus the maximum time for which the arrival of a request is waited
++ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
++ * This filters out greedy applications, as the latter issue instead their
++ * next request as soon as possible after the last one has been completed
++ * (in contrast, when a batch of requests is completed, a soft real-time
++ * application spends some time processing data).
++ *
++ * Unfortunately, the last filter may easily generate false positives if
++ * only bfqd->bfq_slice_idle is used as a reference time interval and one
++ * or both the following cases occur:
++ * 1) HZ is so low that the duration of a jiffy is comparable to or higher
++ * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
++ * HZ=100.
++ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
++ * for a while, then suddenly 'jump' by several units to recover the lost
++ * increments. This seems to happen, e.g., inside virtual machines.
++ * To address this issue, we do not use as a reference time interval just
++ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
++ * particular we add the minimum number of jiffies for which the filter
++ * seems to be quite precise also in embedded systems and KVM/QEMU virtual
++ * machines.
++ */
++static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqd, bfqq,
++"softrt_next_start: service_blkg %lu soft_rate %u sects/sec interval %u",
++ bfqq->service_from_backlogged,
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies_to_msecs(HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate));
++
++ return max(bfqq->last_idle_bklogged +
++ HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
++}
++
++/*
++ * Return the farthest future time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_greatest_from_now(void)
++{
++ return jiffies + MAX_JIFFY_OFFSET;
++}
++
++/*
++ * Return the farthest past time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_smallest_from_now(void)
++{
++ return jiffies - MAX_JIFFY_OFFSET;
++}
++
++/**
++ * bfq_bfqq_expire - expire a queue.
++ * @bfqd: device owning the queue.
++ * @bfqq: the queue to expire.
++ * @compensate: if true, compensate for the time spent idling.
++ * @reason: the reason causing the expiration.
++ *
++ * If the process associated with bfqq does slow I/O (e.g., because it
++ * issues random requests), we charge bfqq with the time it has been
++ * in service instead of the service it has received (see
++ * bfq_bfqq_charge_time for details on how this goal is achieved). As
++ * a consequence, bfqq will typically get higher timestamps upon
++ * reactivation, and hence it will be rescheduled as if it had
++ * received more service than what it has actually received. In the
++ * end, bfqq receives less service in proportion to how slowly its
++ * associated process consumes its budgets (and hence how seriously it
++ * tends to lower the throughput). In addition, this time-charging
++ * strategy guarantees time fairness among slow processes. In
++ * contrast, if the process associated with bfqq is not slow, we
++ * charge bfqq exactly with the service it has received.
++ *
++ * Charging time to the first type of queues and the exact service to
++ * the other has the effect of using the WF2Q+ policy to schedule the
++ * former on a timeslice basis, without violating service domain
++ * guarantees among the latter.
++ */
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ bool compensate,
++ enum bfqq_expiration reason)
++{
++ bool slow;
++ unsigned long delta = 0;
++ struct bfq_entity *entity = &bfqq->entity;
++ int ref;
++
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ /*
++ * Check whether the process is slow (see bfq_bfqq_is_slow).
++ */
++ slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
++
++ /*
++ * Increase service_from_backlogged before next statement,
++ * because the possible next invocation of
++ * bfq_bfqq_charge_time would likely inflate
++ * entity->service. In contrast, service_from_backlogged must
++ * contain real service, to enable the soft real-time
++ * heuristic to correctly compute the bandwidth consumed by
++ * bfqq.
++ */
++ bfqq->service_from_backlogged += entity->service;
++
++ /*
++ * As above explained, charge slow (typically seeky) and
++ * timed-out queues with the time and not the service
++ * received, to favor sequential workloads.
++ *
++ * Processes doing I/O in the slower disk zones will tend to
++ * be slow(er) even if not seeky. Therefore, since the
++ * estimated peak rate is actually an average over the disk
++ * surface, these processes may timeout just for bad luck. To
++ * avoid punishing them, do not charge time to processes that
++ * succeeded in consuming at least 2/3 of their budget. This
++ * allows BFQ to preserve enough elasticity to still perform
++ * bandwidth, and not time, distribution with little unlucky
++ * or quasi-sequential processes.
++ */
++ if (bfqq->wr_coeff == 1 &&
++ (slow ||
++ (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++ bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
++ bfq_bfqq_charge_time(bfqd, bfqq, delta);
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ if (reason == BFQ_BFQQ_TOO_IDLE &&
++ entity->service <= 2 * entity->budget / 10)
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ if (bfqd->low_latency && bfqq->wr_coeff == 1)
++ bfqq->last_wr_start_finish = jiffies;
++
++ if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ /*
++ * If we get here, and there are no outstanding
++ * requests, then the request pattern is isochronous
++ * (see the comments on the function
++ * bfq_bfqq_softrt_next_start()). Thus we can compute
++ * soft_rt_next_start. If, instead, the queue still
++ * has outstanding requests, then we have to wait for
++ * the completion of all the outstanding requests to
++ * discover whether the request pattern is actually
++ * isochronous.
++ */
++ BUG_ON(bfqd->busy_queues < 1);
++ if (bfqq->dispatched == 0) {
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++ bfq_log_bfqq(bfqd, bfqq, "new soft_rt_next %lu",
++ bfqq->soft_rt_next_start);
++ } else {
++ /*
++ * The application is still waiting for the
++ * completion of one or more requests:
++ * prevent it from possibly being incorrectly
++ * deemed as soft real-time by setting its
++ * soft_rt_next_start to infinity. In fact,
++ * without this assignment, the application
++ * would be incorrectly deemed as soft
++ * real-time if:
++ * 1) it issued a new request before the
++ * completion of all its in-flight
++ * requests, and
++ * 2) at that time, its soft_rt_next_start
++ * happened to be in the past.
++ */
++ bfqq->soft_rt_next_start =
++ bfq_greatest_from_now();
++ /*
++ * Schedule an update of soft_rt_next_start to when
++ * the task may be discovered to be isochronous.
++ */
++ bfq_mark_bfqq_softrt_update(bfqq);
++ }
++ }
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "expire (%d, slow %d, num_disp %d, short_ttime %d, weight %d)",
++ reason, slow, bfqq->dispatched,
++ bfq_bfqq_has_short_ttime(bfqq), entity->weight);
++
++ /*
++ * Increase, decrease or leave budget unchanged according to
++ * reason.
++ */
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
++ BUG_ON(bfqq->next_rq == NULL &&
++ bfqq->entity.budget < bfqq->entity.service);
++ ref = bfqq->ref;
++ __bfq_bfqq_expire(bfqd, bfqq);
++
++ BUG_ON(ref > 1 &&
++ !bfq_bfqq_busy(bfqq) && reason == BFQ_BFQQ_BUDGET_EXHAUSTED &&
++ !bfq_class_idle(bfqq));
++
++ /* mark bfqq as waiting a request only if a bic still points to it */
++ if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
++ reason != BFQ_BFQQ_BUDGET_TIMEOUT &&
++ reason != BFQ_BFQQ_BUDGET_EXHAUSTED)
++ bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
++}
++
++/*
++ * Budget timeout is not implemented through a dedicated timer, but
++ * just checked on request arrivals and completions, as well as on
++ * idle timer expirations.
++ */
++static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
++{
++ return time_is_before_eq_jiffies(bfqq->budget_timeout);
++}
++
++/*
++ * If we expire a queue that is actively waiting (i.e., with the
++ * device idled) for the arrival of a new request, then we may incur
++ * the timestamp misalignment problem described in the body of the
++ * function __bfq_activate_entity. Hence we return true only if this
++ * condition does not hold, or if the queue is slow enough to deserve
++ * only to be kicked off for preserving a high throughput.
++ */
++static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "may_budget_timeout: wait_request %d left %d timeout %d",
++ bfq_bfqq_wait_request(bfqq),
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
++ bfq_bfqq_budget_timeout(bfqq));
++
++ return (!bfq_bfqq_wait_request(bfqq) ||
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
++ &&
++ bfq_bfqq_budget_timeout(bfqq);
++}
++
++/*
++ * For a queue that becomes empty, device idling is allowed only if
++ * this function returns true for that queue. As a consequence, since
++ * device idling plays a critical role for both throughput boosting
++ * and service guarantees, the return value of this function plays a
++ * critical role as well.
++ *
++ * In a nutshell, this function returns true only if idling is
++ * beneficial for throughput or, even if detrimental for throughput,
++ * idling is however necessary to preserve service guarantees (low
++ * latency, desired throughput distribution, ...). In particular, on
++ * NCQ-capable devices, this function tries to return false, so as to
++ * help keep the drives' internal queues full, whenever this helps the
++ * device boost the throughput without causing any service-guarantee
++ * issue.
++ *
++ * In more detail, the return value of this function is obtained by,
++ * first, computing a number of boolean variables that take into
++ * account throughput and service-guarantee issues, and, then,
++ * combining these variables in a logical expression. Most of the
++ * issues taken into account are not trivial. We discuss these issues
++ * while introducing the variables.
++ */
++static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++ bool rot_without_queueing =
++ !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
++ bfqq_sequential_and_IO_bound,
++ idling_boosts_thr, idling_boosts_thr_without_issues,
++ idling_needed_for_service_guarantees,
++ asymmetric_scenario;
++
++ if (bfqd->strict_guarantees)
++ return true;
++
++ /*
++ * Idling is performed only if slice_idle > 0. In addition, we
++ * do not idle if
++ * (a) bfqq is async
++ * (b) bfqq is in the idle io prio class: in this case we do
++ * not idle because we want to minimize the bandwidth that
++ * queues in this class can steal to higher-priority queues
++ */
++ if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
++ bfq_class_idle(bfqq))
++ return false;
++
++ bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
++ bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
++ /*
++ * The next variable takes into account the cases where idling
++ * boosts the throughput.
++ *
++ * The value of the variable is computed considering, first, that
++ * idling is virtually always beneficial for the throughput if:
++ * (a) the device is not NCQ-capable and rotational, or
++ * (b) regardless of the presence of NCQ, the device is rotational and
++ * the request pattern for bfqq is I/O-bound and sequential, or
++ * (c) regardless of whether it is rotational, the device is
++ * not NCQ-capable and the request pattern for bfqq is
++ * I/O-bound and sequential.
++ *
++ * Secondly, and in contrast to the above item (b), idling an
++ * NCQ-capable flash-based device would not boost the
++ * throughput even with sequential I/O; rather it would lower
++ * the throughput in proportion to how fast the device
++ * is. Accordingly, the next variable is true if any of the
++ * above conditions (a), (b) or (c) is true, and, in
++ * particular, happens to be false if bfqd is an NCQ-capable
++ * flash-based device.
++ */
++ idling_boosts_thr = rot_without_queueing ||
++ ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
++ bfqq_sequential_and_IO_bound);
++
++ /*
++ * The value of the next variable,
++ * idling_boosts_thr_without_issues, is equal to that of
++ * idling_boosts_thr, unless a special case holds. In this
++ * special case, described below, idling may cause problems to
++ * weight-raised queues.
++ *
++ * When the request pool is saturated (e.g., in the presence
++ * of write hogs), if the processes associated with
++ * non-weight-raised queues ask for requests at a lower rate,
++ * then processes associated with weight-raised queues have a
++ * higher probability to get a request from the pool
++ * immediately (or at least soon) when they need one. Thus
++ * they have a higher probability to actually get a fraction
++ * of the device throughput proportional to their high
++ * weight. This is especially true with NCQ-capable drives,
++ * which enqueue several requests in advance, and further
++ * reorder internally-queued requests.
++ *
++ * For this reason, we force to false the value of
++ * idling_boosts_thr_without_issues if there are weight-raised
++ * busy queues. In this case, and if bfqq is not weight-raised,
++ * this guarantees that the device is not idled for bfqq (if,
++ * instead, bfqq is weight-raised, then idling will be
++ * guaranteed by another variable, see below). Combined with
++ * the timestamping rules of BFQ (see [1] for details), this
++ * behavior causes bfqq, and hence any sync non-weight-raised
++ * queue, to get a lower number of requests served, and thus
++ * to ask for a lower number of requests from the request
++ * pool, before the busy weight-raised queues get served
++ * again. This often mitigates starvation problems in the
++ * presence of heavy write workloads and NCQ, thereby
++ * guaranteeing a higher application and system responsiveness
++ * in these hostile scenarios.
++ */
++ idling_boosts_thr_without_issues = idling_boosts_thr &&
++ bfqd->wr_busy_queues == 0;
++
++ /*
++ * There is then a case where idling must be performed not
++ * for throughput concerns, but to preserve service
++ * guarantees.
++ *
++ * To introduce this case, we can note that allowing the drive
++ * to enqueue more than one request at a time, and hence
++ * delegating de facto final scheduling decisions to the
++ * drive's internal scheduler, entails loss of control on the
++ * actual request service order. In particular, the critical
++ * situation is when requests from different processes happen
++ * to be present, at the same time, in the internal queue(s)
++ * of the drive. In such a situation, the drive, by deciding
++ * the service order of the internally-queued requests, does
++ * determine also the actual throughput distribution among
++ * these processes. But the drive typically has no notion or
++ * concern about per-process throughput distribution, and
++ * makes its decisions only on a per-request basis. Therefore,
++ * the service distribution enforced by the drive's internal
++ * scheduler is likely to coincide with the desired
++ * device-throughput distribution only in a completely
++ * symmetric scenario where:
++ * (i) each of these processes must get the same throughput as
++ * the others;
++ * (ii) all these processes have the same I/O pattern
++ * (either sequential or random).
++ * In fact, in such a scenario, the drive will tend to treat
++ * the requests of each of these processes in about the same
++ * way as the requests of the others, and thus to provide
++ * each of these processes with about the same throughput
++ * (which is exactly the desired throughput distribution). In
++ * contrast, in any asymmetric scenario, device idling is
++ * certainly needed to guarantee that bfqq receives its
++ * assigned fraction of the device throughput (see [1] for
++ * details).
++ *
++ * We address this issue by controlling, actually, only the
++ * symmetry sub-condition (i), i.e., provided that
++ * sub-condition (i) holds, idling is not performed,
++ * regardless of whether sub-condition (ii) holds. In other
++ * words, only if sub-condition (i) holds, then idling is
++ * allowed, and the device tends to be prevented from queueing
++ * many requests, possibly of several processes. The reason
++ * for not controlling also sub-condition (ii) is that we
++ * exploit preemption to preserve guarantees in case of
++ * symmetric scenarios, even if (ii) does not hold, as
++ * explained in the next two paragraphs.
++ *
++ * Even if a queue, say Q, is expired when it remains idle, Q
++ * can still preempt the new in-service queue if the next
++ * request of Q arrives soon (see the comments on
++ * bfq_bfqq_update_budg_for_activation). If all queues and
++ * groups have the same weight, this form of preemption,
++ * combined with the hole-recovery heuristic described in the
++ * comments on function bfq_bfqq_update_budg_for_activation,
++ * are enough to preserve a correct bandwidth distribution in
++ * the mid term, even without idling. In fact, even if not
++ * idling allows the internal queues of the device to contain
++ * many requests, and thus to reorder requests, we can rather
++ * safely assume that the internal scheduler still preserves a
++ * minimum of mid-term fairness. The motivation for using
++ * preemption instead of idling is that, by not idling,
++ * service guarantees are preserved without minimally
++ * sacrificing throughput. In other words, both a high
++ * throughput and its desired distribution are obtained.
++ *
++ * More precisely, this preemption-based, idleless approach
++ * provides fairness in terms of IOPS, and not sectors per
++ * second. This can be seen with a simple example. Suppose
++ * that there are two queues with the same weight, but that
++ * the first queue receives requests of 8 sectors, while the
++ * second queue receives requests of 1024 sectors. In
++ * addition, suppose that each of the two queues contains at
++ * most one request at a time, which implies that each queue
++ * always remains idle after it is served. Finally, after
++ * remaining idle, each queue receives very quickly a new
++ * request. It follows that the two queues are served
++ * alternatively, preempting each other if needed. This
++ * implies that, although both queues have the same weight,
++ * the queue with large requests receives a service that is
++ * 1024/8 times as high as the service received by the other
++ * queue.
++ *
++ * On the other hand, device idling is performed, and thus
++ * pure sector-domain guarantees are provided, for the
++ * following queues, which are likely to need stronger
++ * throughput guarantees: weight-raised queues, and queues
++ * with a higher weight than other queues. When such queues
++ * are active, sub-condition (i) is false, which triggers
++ * device idling.
++ *
++ * According to the above considerations, the next variable is
++ * true (only) if sub-condition (i) holds. To compute the
++ * value of this variable, we not only use the return value of
++ * the function bfq_symmetric_scenario(), but also check
++ * whether bfqq is being weight-raised, because
++ * bfq_symmetric_scenario() does not take into account also
++ * weight-raised queues (see comments on
++ * bfq_weights_tree_add()).
++ *
++ * As a side note, it is worth considering that the above
++ * device-idling countermeasures may however fail in the
++ * following unlucky scenario: if idling is (correctly)
++ * disabled in a time period during which all symmetry
++ * sub-conditions hold, and hence the device is allowed to
++ * enqueue many requests, but at some later point in time some
++ * sub-condition stops to hold, then it may become impossible
++ * to let requests be served in the desired order until all
++ * the requests already queued in the device have been served.
++ */
++ asymmetric_scenario = bfqq->wr_coeff > 1 ||
++ !bfq_symmetric_scenario(bfqd);
++
++ /*
++ * Finally, there is a case where maximizing throughput is the
++ * best choice even if it may cause unfairness toward
++ * bfqq. Such a case is when bfqq became active in a burst of
++ * queue activations. Queues that became active during a large
++ * burst benefit only from throughput, as discussed in the
++ * comments on bfq_handle_burst. Thus, if bfqq became active
++ * in a burst and not idling the device maximizes throughput,
++ * then the device must no be idled, because not idling the
++ * device provides bfqq and all other queues in the burst with
++ * maximum benefit. Combining this and the above case, we can
++ * now establish when idling is actually needed to preserve
++ * service guarantees.
++ */
++ idling_needed_for_service_guarantees =
++ asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
++
++ /*
++ * We have now all the components we need to compute the
++ * return value of the function, which is true only if idling
++ * either boosts the throughput (without issues), or is
++ * necessary to preserve service guarantees.
++ */
++ bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d",
++ bfq_bfqq_sync(bfqq), idling_boosts_thr);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "may_idle: wr_busy %d boosts %d IO-bound %d guar %d",
++ bfqd->wr_busy_queues,
++ idling_boosts_thr_without_issues,
++ bfq_bfqq_IO_bound(bfqq),
++ idling_needed_for_service_guarantees);
++
++ return idling_boosts_thr_without_issues ||
++ idling_needed_for_service_guarantees;
++}
++
++/*
++ * If the in-service queue is empty but the function bfq_bfqq_may_idle
++ * returns true, then:
++ * 1) the queue must remain in service and cannot be expired, and
++ * 2) the device must be idled to wait for the possible arrival of a new
++ * request for the queue.
++ * See the comments on the function bfq_bfqq_may_idle for the reasons
++ * why performing device idling is the best choice to boost the throughput
++ * and preserve service guarantees when bfq_bfqq_may_idle itself
++ * returns true.
++ */
++static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
++{
++ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq);
++}
++
++/*
++ * Select a queue for service. If we have a current queue in service,
++ * check whether to continue servicing it, or retrieve and set a new one.
++ */
++static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++ struct request *next_rq;
++ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++
++ bfqq = bfqd->in_service_queue;
++ if (!bfqq)
++ goto new_queue;
++
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
++
++ if (bfq_may_expire_for_budg_timeout(bfqq) &&
++ !hrtimer_active(&bfqd->idle_slice_timer) &&
++ !bfq_bfqq_must_idle(bfqq))
++ goto expire;
++
++check_queue:
++ /*
++ * This loop is rarely executed more than once. Even when it
++ * happens, it is much more convenient to re-execute this loop
++ * than to return NULL and trigger a new dispatch to get a
++ * request served.
++ */
++ next_rq = bfqq->next_rq;
++ /*
++ * If bfqq has requests queued and it has enough budget left to
++ * serve them, keep the queue, otherwise expire it.
++ */
++ if (next_rq) {
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ if (bfq_serv_to_charge(next_rq, bfqq) >
++ bfq_bfqq_budget_left(bfqq)) {
++ /*
++ * Expire the queue for budget exhaustion,
++ * which makes sure that the next budget is
++ * enough to serve the next request, even if
++ * it comes from the fifo expired path.
++ */
++ reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
++ goto expire;
++ } else {
++ /*
++ * The idle timer may be pending because we may
++ * not disable disk idling even when a new request
++ * arrives.
++ */
++ if (bfq_bfqq_wait_request(bfqq)) {
++ BUG_ON(!hrtimer_active(&bfqd->idle_slice_timer));
++ /*
++ * If we get here: 1) at least a new request
++ * has arrived but we have not disabled the
++ * timer because the request was too small,
++ * 2) then the block layer has unplugged
++ * the device, causing the dispatch to be
++ * invoked.
++ *
++ * Since the device is unplugged, now the
++ * requests are probably large enough to
++ * provide a reasonable throughput.
++ * So we disable idling.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
++ bfqg_stats_update_idle_time(bfqq_group(bfqq));
++ }
++ goto keep_queue;
++ }
++ }
++
++ /*
++ * No requests pending. However, if the in-service queue is idling
++ * for a new request, or has requests waiting for a completion and
++ * may idle after their completion, then keep it anyway.
++ */
++ if (hrtimer_active(&bfqd->idle_slice_timer) ||
++ (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
++ bfqq = NULL;
++ goto keep_queue;
++ }
++
++ reason = BFQ_BFQQ_NO_MORE_REQUESTS;
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, false, reason);
++new_queue:
++ bfqq = bfq_set_in_service_queue(bfqd);
++ if (bfqq) {
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
++ goto check_queue;
++ }
++keep_queue:
++ if (bfqq)
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
++ else
++ bfq_log(bfqd, "select_queue: no queue returned");
++
++ return bfqq;
++}
++
++static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
++ BUG_ON(bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
++ time_is_after_jiffies(bfqq->last_wr_start_finish));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqq->wr_coeff,
++ bfqq->entity.weight, bfqq->entity.orig_weight);
++
++ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight !=
++ entity->orig_weight * bfqq->wr_coeff);
++ if (entity->prio_changed)
++ bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
++
++ /*
++ * If the queue was activated in a burst, or too much
++ * time has elapsed from the beginning of this
++ * weight-raising period, then end weight raising.
++ */
++ if (bfq_bfqq_in_large_burst(bfqq))
++ bfq_bfqq_end_wr(bfqq);
++ else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time)) {
++ if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
++ time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
++ bfq_wr_duration(bfqd)))
++ bfq_bfqq_end_wr(bfqq);
++ else {
++ /* switch back to interactive wr */
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ bfqq->last_wr_start_finish =
++ bfqq->wr_start_at_switch_to_srt;
++ BUG_ON(time_is_after_jiffies(
++ bfqq->last_wr_start_finish));
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "back to interactive wr");
++ }
++ }
++ }
++ /*
++ * To improve latency (for this or other queues), immediately
++ * update weight both if it must be raised and if it must be
++ * lowered. Since, entity may be on some active tree here, and
++ * might have a pending change of its ioprio class, invoke
++ * next function with the last parameter unset (see the
++ * comments on the function).
++ */
++ if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
++ __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
++ entity, false);
++}
++
++/*
++ * Dispatch one request from bfqq, moving it to the request queue
++ * dispatch list.
++ */
++static int bfq_dispatch_request(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++ struct request *rq = bfqq->next_rq;
++ unsigned long service_to_charge;
++
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++ BUG_ON(!rq);
++ service_to_charge = bfq_serv_to_charge(rq, bfqq);
++
++ BUG_ON(service_to_charge > bfq_bfqq_budget_left(bfqq));
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ bfq_bfqq_served(bfqq, service_to_charge);
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ bfq_dispatch_insert(bfqd->queue, rq);
++
++ /*
++ * If weight raising has to terminate for bfqq, then next
++ * function causes an immediate update of bfqq's weight,
++ * without waiting for next activation. As a consequence, on
++ * expiration, bfqq will be timestamped as if has never been
++ * weight-raised during this service slot, even if it has
++ * received part or even most of the service as a
++ * weight-raised queue. This inflates bfqq's timestamps, which
++ * is beneficial, as bfqq is then more willing to leave the
++ * device immediately to possible other weight-raised queues.
++ */
++ bfq_update_wr_data(bfqd, bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "dispatched %u sec req (%llu), budg left %d",
++ blk_rq_sectors(rq),
++ (unsigned long long) blk_rq_pos(rq),
++ bfq_bfqq_budget_left(bfqq));
++
++ dispatched++;
++
++ if (!bfqd->in_service_bic) {
++ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
++ bfqd->in_service_bic = RQ_BIC(rq);
++ }
++
++ if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
++ goto expire;
++
++ return dispatched;
++
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, false, BFQ_BFQQ_BUDGET_EXHAUSTED);
++ return dispatched;
++}
++
++static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++
++ while (bfqq->next_rq) {
++ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
++ dispatched++;
++ }
++
++ BUG_ON(!list_empty(&bfqq->fifo));
++ return dispatched;
++}
++
++/*
++ * Drain our current requests.
++ * Used for barriers and when switching io schedulers on-the-fly.
++ */
++static int bfq_forced_dispatch(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq, *n;
++ struct bfq_service_tree *st;
++ int dispatched = 0;
++
++ bfqq = bfqd->in_service_queue;
++ if (bfqq)
++ __bfq_bfqq_expire(bfqd, bfqq);
++
++ /*
++ * Loop through classes, and be careful to leave the scheduler
++ * in a consistent state, as feedback mechanisms and vtime
++ * updates cannot be disabled during the process.
++ */
++ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
++ st = bfq_entity_service_tree(&bfqq->entity);
++
++ dispatched += __bfq_forced_dispatch_bfqq(bfqq);
++
++ bfqq->max_budget = bfq_max_budget(bfqd);
++ bfq_forget_idle(st);
++ }
++
++ BUG_ON(bfqd->busy_queues != 0);
++
++ return dispatched;
++}
++
++static int bfq_dispatch_requests(struct request_queue *q, int force)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq;
++
++ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++
++ if (bfqd->busy_queues == 0)
++ return 0;
++
++ if (unlikely(force))
++ return bfq_forced_dispatch(bfqd);
++
++ /*
++ * Force device to serve one request at a time if
++ * strict_guarantees is true. Forcing this service scheme is
++ * currently the ONLY way to guarantee that the request
++ * service order enforced by the scheduler is respected by a
++ * queueing device. Otherwise the device is free even to make
++ * some unlucky request wait for as long as the device
++ * wishes.
++ *
++ * Of course, serving one request at at time may cause loss of
++ * throughput.
++ */
++ if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
++ return 0;
++
++ bfqq = bfq_select_queue(bfqd);
++ if (!bfqq)
++ return 0;
++
++ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
++
++ BUG_ON(bfq_bfqq_wait_request(bfqq));
++
++ if (!bfq_dispatch_request(bfqd, bfqq))
++ return 0;
++
++ bfq_log_bfqq(bfqd, bfqq, "dispatched %s request",
++ bfq_bfqq_sync(bfqq) ? "sync" : "async");
++
++ BUG_ON(bfqq->next_rq == NULL &&
++ bfqq->entity.budget < bfqq->entity.service);
++ return 1;
++}
++
++/*
++ * Task holds one reference to the queue, dropped when task exits. Each rq
++ * in-flight on this queue also holds a reference, dropped when rq is freed.
++ *
++ * Queue lock must be held here. Recall not to use bfqq after calling
++ * this function on it.
++ */
++static void bfq_put_queue(struct bfq_queue *bfqq)
++{
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ struct bfq_group *bfqg = bfqq_group(bfqq);
++#endif
++
++ BUG_ON(bfqq->ref <= 0);
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
++ bfqq->ref--;
++ if (bfqq->ref)
++ return;
++
++ BUG_ON(rb_first(&bfqq->sort_list));
++ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
++ BUG_ON(bfqq->entity.tree);
++ BUG_ON(bfq_bfqq_busy(bfqq));
++
++ if (bfq_bfqq_sync(bfqq))
++ /*
++ * The fact that this queue is being destroyed does not
++ * invalidate the fact that this queue may have been
++ * activated during the current burst. As a consequence,
++ * although the queue does not exist anymore, and hence
++ * needs to be removed from the burst list if there,
++ * the burst size has not to be decremented.
++ */
++ hlist_del_init(&bfqq->burst_list_node);
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
++
++ kmem_cache_free(bfq_pool, bfqq);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ bfqg_put(bfqg);
++#endif
++}
++
++static void bfq_put_cooperator(struct bfq_queue *bfqq)
++{
++ struct bfq_queue *__bfqq, *next;
++
++ /*
++ * If this queue was scheduled to merge with another queue, be
++ * sure to drop the reference taken on that queue (and others in
++ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
++ */
++ __bfqq = bfqq->new_bfqq;
++ while (__bfqq) {
++ if (__bfqq == bfqq)
++ break;
++ next = __bfqq->new_bfqq;
++ bfq_put_queue(__bfqq);
++ __bfqq = next;
++ }
++}
++
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ if (bfqq == bfqd->in_service_queue) {
++ __bfq_bfqq_expire(bfqd, bfqq);
++ bfq_schedule_dispatch(bfqd);
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq); /* release process reference */
++}
++
++static void bfq_init_icq(struct io_cq *icq)
++{
++ icq_to_bic(icq)->ttime.last_end_request = ktime_get_ns() - (1ULL<<32);
++}
++
++static void bfq_exit_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++
++ if (bic_to_bfqq(bic, false)) {
++ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, false));
++ bic_set_bfqq(bic, NULL, false);
++ }
++
++ if (bic_to_bfqq(bic, true)) {
++ /*
++ * If the bic is using a shared queue, put the reference
++ * taken on the io_context when the bic started using a
++ * shared bfq_queue.
++ */
++ if (bfq_bfqq_coop(bic_to_bfqq(bic, true)))
++ put_io_context(icq->ioc);
++ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, true));
++ bic_set_bfqq(bic, NULL, true);
++ }
++}
++
++/*
++ * Update the entity prio values; note that the new values will not
++ * be used until the next (re)activation.
++ */
++static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
++{
++ struct task_struct *tsk = current;
++ int ioprio_class;
++
++ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ switch (ioprio_class) {
++ default:
++ dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
++ "bfq: bad prio class %d\n", ioprio_class);
++ case IOPRIO_CLASS_NONE:
++ /*
++ * No prio set, inherit CPU scheduling settings.
++ */
++ bfqq->new_ioprio = task_nice_ioprio(tsk);
++ bfqq->new_ioprio_class = task_nice_ioclass(tsk);
++ break;
++ case IOPRIO_CLASS_RT:
++ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
++ break;
++ case IOPRIO_CLASS_BE:
++ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
++ break;
++ case IOPRIO_CLASS_IDLE:
++ bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
++ bfqq->new_ioprio = 7;
++ break;
++ }
++
++ if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
++ pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
++ bfqq->new_ioprio);
++ BUG();
++ }
++
++ bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
++ bfqq->entity.prio_changed = 1;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "set_next_ioprio_data: bic_class %d prio %d class %d",
++ ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class);
++}
++
++static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ struct bfq_queue *bfqq;
++ unsigned long uninitialized_var(flags);
++ int ioprio = bic->icq.ioc->ioprio;
++
++ /*
++ * This condition may trigger on a newly created bic, be sure to
++ * drop the lock before returning.
++ */
++ if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
++ return;
++
++ bic->ioprio = ioprio;
++
++ bfqq = bic_to_bfqq(bic, false);
++ if (bfqq) {
++ /* release process reference on this queue */
++ bfq_put_queue(bfqq);
++ bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
++ bic_set_bfqq(bic, bfqq, false);
++ bfq_log_bfqq(bfqd, bfqq,
++ "check_ioprio_change: bfqq %p %d",
++ bfqq, bfqq->ref);
++ }
++
++ bfqq = bic_to_bfqq(bic, true);
++ if (bfqq)
++ bfq_set_next_ioprio_data(bfqq, bic);
++}
++
++static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic, pid_t pid, int is_sync)
++{
++ RB_CLEAR_NODE(&bfqq->entity.rb_node);
++ INIT_LIST_HEAD(&bfqq->fifo);
++ INIT_HLIST_NODE(&bfqq->burst_list_node);
++ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
++
++ bfqq->ref = 0;
++ bfqq->bfqd = bfqd;
++
++ if (bic)
++ bfq_set_next_ioprio_data(bfqq, bic);
++
++ if (is_sync) {
++ /*
++ * No need to mark as has_short_ttime if in
++ * idle_class, because no device idling is performed
++ * for queues in idle class
++ */
++ if (!bfq_class_idle(bfqq))
++ /* tentatively mark as has_short_ttime */
++ bfq_mark_bfqq_has_short_ttime(bfqq);
++ bfq_mark_bfqq_sync(bfqq);
++ bfq_mark_bfqq_just_created(bfqq);
++ } else
++ bfq_clear_bfqq_sync(bfqq);
++ bfq_mark_bfqq_IO_bound(bfqq);
++
++ /* Tentative initial value to trade off between thr and lat */
++ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
++ bfqq->pid = pid;
++
++ bfqq->wr_coeff = 1;
++ bfqq->last_wr_start_finish = jiffies;
++ bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
++ bfqq->budget_timeout = bfq_smallest_from_now();
++ bfqq->split_time = bfq_smallest_from_now();
++
++ /*
++ * Set to the value for which bfqq will not be deemed as
++ * soft rt when it becomes backlogged.
++ */
++ bfqq->soft_rt_next_start = bfq_greatest_from_now();
++
++ /* first request is almost certainly seeky */
++ bfqq->seek_history = 1;
++}
++
++static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ int ioprio_class, int ioprio)
++{
++ switch (ioprio_class) {
++ case IOPRIO_CLASS_RT:
++ return &bfqg->async_bfqq[0][ioprio];
++ case IOPRIO_CLASS_NONE:
++ ioprio = IOPRIO_NORM;
++ /* fall through */
++ case IOPRIO_CLASS_BE:
++ return &bfqg->async_bfqq[1][ioprio];
++ case IOPRIO_CLASS_IDLE:
++ return &bfqg->async_idle_bfqq;
++ default:
++ BUG();
++ }
++}
++
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bio *bio, bool is_sync,
++ struct bfq_io_cq *bic)
++{
++ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ struct bfq_queue **async_bfqq = NULL;
++ struct bfq_queue *bfqq;
++ struct bfq_group *bfqg;
++
++ rcu_read_lock();
++
++ bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
++ if (!bfqg) {
++ bfqq = &bfqd->oom_bfqq;
++ goto out;
++ }
++
++ if (!is_sync) {
++ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
++ ioprio);
++ bfqq = *async_bfqq;
++ if (bfqq)
++ goto out;
++ }
++
++ bfqq = kmem_cache_alloc_node(bfq_pool,
++ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
++ bfqd->queue->node);
++
++ if (bfqq) {
++ bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
++ is_sync);
++ bfq_init_entity(&bfqq->entity, bfqg);
++ bfq_log_bfqq(bfqd, bfqq, "allocated");
++ } else {
++ bfqq = &bfqd->oom_bfqq;
++ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
++ goto out;
++ }
++
++ /*
++ * Pin the queue now that it's allocated, scheduler exit will
++ * prune it.
++ */
++ if (async_bfqq) {
++ bfqq->ref++; /*
++ * Extra group reference, w.r.t. sync
++ * queue. This extra reference is removed
++ * only if bfqq->bfqg disappears, to
++ * guarantee that this queue is not freed
++ * until its group goes away.
++ */
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++ bfqq, bfqq->ref);
++ *async_bfqq = bfqq;
++ }
++
++out:
++ bfqq->ref++; /* get a process reference to this queue */
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
++ rcu_read_unlock();
++ return bfqq;
++}
++
++static void bfq_update_io_thinktime(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic)
++{
++ struct bfq_ttime *ttime = &bic->ttime;
++ u64 elapsed = ktime_get_ns() - bic->ttime.last_end_request;
++
++ elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle);
++
++ ttime->ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
++ ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
++ ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
++ ttime->ttime_samples);
++}
++
++static void
++bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ bfqq->seek_history <<= 1;
++ bfqq->seek_history |=
++ get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
++ (!blk_queue_nonrot(bfqd->queue) ||
++ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
++}
++
++static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
++{
++ bool has_short_ttime = true;
++
++ /*
++ * No need to update has_short_ttime if bfqq is async or in
++ * idle io prio class, or if bfq_slice_idle is zero, because
++ * no device idling is performed for bfqq in this case.
++ */
++ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
++ bfqd->bfq_slice_idle == 0)
++ return;
++
++ /* Idle window just restored, statistics are meaningless. */
++ if (time_is_after_eq_jiffies(bfqq->split_time +
++ bfqd->bfq_wr_min_idle_time))
++ return;
++
++ /* Think time is infinite if no process is linked to
++ * bfqq. Otherwise check average think time to
++ * decide whether to mark as has_short_ttime
++ */
++ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
++ (bfq_sample_valid(bic->ttime.ttime_samples) &&
++ bic->ttime.ttime_mean > bfqd->bfq_slice_idle))
++ has_short_ttime = false;
++
++ bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
++ has_short_ttime);
++
++ if (has_short_ttime)
++ bfq_mark_bfqq_has_short_ttime(bfqq);
++ else
++ bfq_clear_bfqq_has_short_ttime(bfqq);
++}
++
++/*
++ * Called when a new fs request (rq) is added to bfqq. Check if there's
++ * something we should do about it.
++ */
++static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ struct bfq_io_cq *bic = RQ_BIC(rq);
++
++ if (rq->cmd_flags & REQ_META)
++ bfqq->meta_pending++;
++
++ bfq_update_io_thinktime(bfqd, bic);
++ bfq_update_has_short_ttime(bfqd, bfqq, bic);
++ bfq_update_io_seektime(bfqd, bfqq, rq);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "rq_enqueued: has_short_ttime=%d (seeky %d)",
++ bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
++
++ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
++
++ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
++ bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
++ blk_rq_sectors(rq) < 32;
++ bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
++
++ /*
++ * There is just this request queued: if the request
++ * is small and the queue is not to be expired, then
++ * just exit.
++ *
++ * In this way, if the device is being idled to wait
++ * for a new request from the in-service queue, we
++ * avoid unplugging the device and committing the
++ * device to serve just a small request. On the
++ * contrary, we wait for the block layer to decide
++ * when to unplug the device: hopefully, new requests
++ * will be merged to this one quickly, then the device
++ * will be unplugged and larger requests will be
++ * dispatched.
++ */
++ if (small_req && !budget_timeout)
++ return;
++
++ /*
++ * A large enough request arrived, or the queue is to
++ * be expired: in both cases disk idling is to be
++ * stopped, so clear wait_request flag and reset
++ * timer.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
++ bfqg_stats_update_idle_time(bfqq_group(bfqq));
++
++ /*
++ * The queue is not empty, because a new request just
++ * arrived. Hence we can safely expire the queue, in
++ * case of budget timeout, without risking that the
++ * timestamps of the queue are not updated correctly.
++ * See [1] for more details.
++ */
++ if (budget_timeout)
++ bfq_bfqq_expire(bfqd, bfqq, false,
++ BFQ_BFQQ_BUDGET_TIMEOUT);
++
++ /*
++ * Let the request rip immediately, or let a new queue be
++ * selected if bfqq has just been expired.
++ */
++ __blk_run_queue(bfqd->queue);
++ }
++}
++
++static void bfq_insert_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++
++ /*
++ * An unplug may trigger a requeue of a request from the device
++ * driver: make sure we are in process context while trying to
++ * merge two bfq_queues.
++ */
++ if (!in_interrupt()) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
++ if (new_bfqq) {
++ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
++ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
++ /*
++ * Release the request's reference to the old bfqq
++ * and make sure one is taken to the shared queue.
++ */
++ new_bfqq->allocated[rq_data_dir(rq)]++;
++ bfqq->allocated[rq_data_dir(rq)]--;
++ new_bfqq->ref++;
++ bfq_clear_bfqq_just_created(bfqq);
++ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
++ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
++ bfqq, new_bfqq);
++ /*
++ * rq is about to be enqueued into new_bfqq,
++ * release rq reference on bfqq
++ */
++ bfq_put_queue(bfqq);
++ rq->elv.priv[1] = new_bfqq;
++ bfqq = new_bfqq;
++ }
++ }
++
++ bfq_add_request(rq);
++
++ rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
++ list_add_tail(&rq->queuelist, &bfqq->fifo);
++
++ bfq_rq_enqueued(bfqd, bfqq, rq);
++}
++
++static void bfq_update_hw_tag(struct bfq_data *bfqd)
++{
++ bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
++ bfqd->rq_in_driver);
++
++ if (bfqd->hw_tag == 1)
++ return;
++
++ /*
++ * This sample is valid if the number of outstanding requests
++ * is large enough to allow a queueing behavior. Note that the
++ * sum is not exact, as it's not taking into account deactivated
++ * requests.
++ */
++ if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
++ return;
++
++ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
++ return;
++
++ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
++ bfqd->max_rq_in_driver = 0;
++ bfqd->hw_tag_samples = 0;
++}
++
++static void bfq_completed_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ u64 now_ns;
++ u32 delta_us;
++
++ bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left",
++ blk_rq_sectors(rq));
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++ bfq_update_hw_tag(bfqd);
++
++ BUG_ON(!bfqd->rq_in_driver);
++ BUG_ON(!bfqq->dispatched);
++ bfqd->rq_in_driver--;
++ bfqq->dispatched--;
++ bfqg_stats_update_completion(bfqq_group(bfqq),
++ rq_start_time_ns(rq),
++ rq_io_start_time_ns(rq),
++ rq->cmd_flags);
++
++ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++ /*
++ * Set budget_timeout (which we overload to store the
++ * time at which the queue remains with no backlog and
++ * no outstanding request; used by the weight-raising
++ * mechanism).
++ */
++ bfqq->budget_timeout = jiffies;
++
++ bfq_weights_tree_remove(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++ }
++
++ now_ns = ktime_get_ns();
++
++ RQ_BIC(rq)->ttime.last_end_request = now_ns;
++
++ /*
++ * Using us instead of ns, to get a reasonable precision in
++ * computing rate in next check.
++ */
++ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
++
++ bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
++ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
++ (USEC_PER_SEC*
++ (u64)((bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us))
++ >>BFQ_RATE_SHIFT,
++ (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT);
++
++ /*
++ * If the request took rather long to complete, and, according
++ * to the maximum request size recorded, this completion latency
++ * implies that the request was certainly served at a very low
++ * rate (less than 1M sectors/sec), then the whole observation
++ * interval that lasts up to this time instant cannot be a
++ * valid time interval for computing a new peak rate. Invoke
++ * bfq_update_rate_reset to have the following three steps
++ * taken:
++ * - close the observation interval at the last (previous)
++ * request dispatch or completion
++ * - compute rate, if possible, for that observation interval
++ * - reset to zero samples, which will trigger a proper
++ * re-initialization of the observation interval on next
++ * dispatch
++ */
++ if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
++ (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
++ 1UL<<(BFQ_RATE_SHIFT - 10))
++ bfq_update_rate_reset(bfqd, NULL);
++ bfqd->last_completion = now_ns;
++
++ /*
++ * If we are waiting to discover whether the request pattern
++ * of the task associated with the queue is actually
++ * isochronous, and both requisites for this condition to hold
++ * are now satisfied, then compute soft_rt_next_start (see the
++ * comments on the function bfq_bfqq_softrt_next_start()). We
++ * schedule this delayed check when bfqq expires, if it still
++ * has in-flight requests.
++ */
++ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list))
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++
++ /*
++ * If this is the in-service queue, check if it needs to be expired,
++ * or if we want to idle in case it has no pending requests.
++ */
++ if (bfqd->in_service_queue == bfqq) {
++ if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
++ bfq_arm_slice_timer(bfqd);
++ goto out;
++ } else if (bfq_may_expire_for_budg_timeout(bfqq))
++ bfq_bfqq_expire(bfqd, bfqq, false,
++ BFQ_BFQQ_BUDGET_TIMEOUT);
++ else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
++ (bfqq->dispatched == 0 ||
++ !bfq_bfqq_may_idle(bfqq)))
++ bfq_bfqq_expire(bfqd, bfqq, false,
++ BFQ_BFQQ_NO_MORE_REQUESTS);
++ }
++
++ if (!bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++
++out:
++ return;
++}
++
++static int __bfq_may_queue(struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
++ bfq_clear_bfqq_must_alloc(bfqq);
++ return ELV_MQUEUE_MUST;
++ }
++
++ return ELV_MQUEUE_MAY;
++}
++
++static int bfq_may_queue(struct request_queue *q, unsigned int op)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ /*
++ * Don't force setup of a queue from here, as a call to may_queue
++ * does not necessarily imply that a request actually will be
++ * queued. So just lookup a possibly existing queue, or return
++ * 'may queue' if that fails.
++ */
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (!bic)
++ return ELV_MQUEUE_MAY;
++
++ bfqq = bic_to_bfqq(bic, op_is_sync(op));
++ if (bfqq)
++ return __bfq_may_queue(bfqq);
++
++ return ELV_MQUEUE_MAY;
++}
++
++/*
++ * Queue lock held here.
++ */
++static void bfq_put_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ if (bfqq) {
++ const int rw = rq_data_dir(rq);
++
++ BUG_ON(!bfqq->allocated[rw]);
++ bfqq->allocated[rw]--;
++
++ rq->elv.priv[0] = NULL;
++ rq->elv.priv[1] = NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
++ bfqq, bfqq->ref);
++ bfq_put_queue(bfqq);
++ }
++}
++
++/*
++ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
++ * was the last process referring to that bfqq.
++ */
++static struct bfq_queue *
++bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
++
++ put_io_context(bic->icq.ioc);
++
++ if (bfqq_process_refs(bfqq) == 1) {
++ bfqq->pid = current->pid;
++ bfq_clear_bfqq_coop(bfqq);
++ bfq_clear_bfqq_split_coop(bfqq);
++ return bfqq;
++ }
++
++ bic_set_bfqq(bic, NULL, 1);
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq);
++ return NULL;
++}
++
++/*
++ * Allocate bfq data structures associated with this request.
++ */
++static int bfq_set_request(struct request_queue *q, struct request *rq,
++ struct bio *bio, gfp_t gfp_mask)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
++ const int rw = rq_data_dir(rq);
++ const int is_sync = rq_is_sync(rq);
++ struct bfq_queue *bfqq;
++ unsigned long flags;
++ bool bfqq_already_existing = false, split = false;
++
++ spin_lock_irqsave(q->queue_lock, flags);
++
++ if (!bic)
++ goto queue_fail;
++
++ bfq_check_ioprio_change(bic, bio);
++
++ bfq_bic_update_cgroup(bic, bio);
++
++new_queue:
++ bfqq = bic_to_bfqq(bic, is_sync);
++ if (!bfqq || bfqq == &bfqd->oom_bfqq) {
++ if (bfqq)
++ bfq_put_queue(bfqq);
++ bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
++ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
++
++ bic_set_bfqq(bic, bfqq, is_sync);
++ if (split && is_sync) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_request: was_in_list %d "
++ "was_in_large_burst %d "
++ "large burst in progress %d",
++ bic->was_in_burst_list,
++ bic->saved_in_large_burst,
++ bfqd->large_burst);
++
++ if ((bic->was_in_burst_list && bfqd->large_burst) ||
++ bic->saved_in_large_burst) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_request: marking in "
++ "large burst");
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ } else {
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_request: clearing in "
++ "large burst");
++ bfq_clear_bfqq_in_large_burst(bfqq);
++ if (bic->was_in_burst_list)
++ hlist_add_head(&bfqq->burst_list_node,
++ &bfqd->burst_list);
++ }
++ bfqq->split_time = jiffies;
++ }
++ } else {
++ /* If the queue was seeky for too long, break it apart. */
++ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
++ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
++
++ /* Update bic before losing reference to bfqq */
++ if (bfq_bfqq_in_large_burst(bfqq))
++ bic->saved_in_large_burst = true;
++
++ bfqq = bfq_split_bfqq(bic, bfqq);
++ split = true;
++ if (!bfqq)
++ goto new_queue;
++ else
++ bfqq_already_existing = true;
++ }
++ }
++
++ bfqq->allocated[rw]++;
++ bfqq->ref++;
++ bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref);
++
++ rq->elv.priv[0] = bic;
++ rq->elv.priv[1] = bfqq;
++
++ /*
++ * If a bfq_queue has only one process reference, it is owned
++ * by only one bfq_io_cq: we can set the bic field of the
++ * bfq_queue to the address of that structure. Also, if the
++ * queue has just been split, mark a flag so that the
++ * information is available to the other scheduler hooks.
++ */
++ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
++ bfqq->bic = bic;
++ if (split) {
++ /*
++ * If the queue has just been split from a shared
++ * queue, restore the idle window and the possible
++ * weight raising period.
++ */
++ bfq_bfqq_resume_state(bfqq, bfqd, bic,
++ bfqq_already_existing);
++ }
++ }
++
++ if (unlikely(bfq_bfqq_just_created(bfqq)))
++ bfq_handle_burst(bfqd, bfqq);
++
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 0;
++
++queue_fail:
++ bfq_schedule_dispatch(bfqd);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 1;
++}
++
++static void bfq_kick_queue(struct work_struct *work)
++{
++ struct bfq_data *bfqd =
++ container_of(work, struct bfq_data, unplug_work);
++ struct request_queue *q = bfqd->queue;
++
++ spin_lock_irq(q->queue_lock);
++ __blk_run_queue(q);
++ spin_unlock_irq(q->queue_lock);
++}
++
++/*
++ * Handler of the expiration of the timer running if the in-service queue
++ * is idling inside its time slice.
++ */
++static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
++{
++ struct bfq_data *bfqd = container_of(timer, struct bfq_data,
++ idle_slice_timer);
++ struct bfq_queue *bfqq;
++ unsigned long flags;
++ enum bfqq_expiration reason;
++
++ spin_lock_irqsave(bfqd->queue->queue_lock, flags);
++
++ bfqq = bfqd->in_service_queue;
++ /*
++ * Theoretical race here: the in-service queue can be NULL or
++ * different from the queue that was idling if the timer handler
++ * spins on the queue_lock and a new request arrives for the
++ * current queue and there is a full dispatch cycle that changes
++ * the in-service queue. This can hardly happen, but in the worst
++ * case we just expire a queue too early.
++ */
++ if (bfqq) {
++ bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
++ bfq_clear_bfqq_wait_request(bfqq);
++
++ if (bfq_bfqq_budget_timeout(bfqq))
++ /*
++ * Also here the queue can be safely expired
++ * for budget timeout without wasting
++ * guarantees
++ */
++ reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
++ /*
++ * The queue may not be empty upon timer expiration,
++ * because we may not disable the timer when the
++ * first request of the in-service queue arrives
++ * during disk idling.
++ */
++ reason = BFQ_BFQQ_TOO_IDLE;
++ else
++ goto schedule_dispatch;
++
++ bfq_bfqq_expire(bfqd, bfqq, true, reason);
++ }
++
++schedule_dispatch:
++ bfq_schedule_dispatch(bfqd);
++
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
++ return HRTIMER_NORESTART;
++}
++
++static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
++{
++ hrtimer_cancel(&bfqd->idle_slice_timer);
++ cancel_work_sync(&bfqd->unplug_work);
++}
++
++static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
++ struct bfq_queue **bfqq_ptr)
++{
++ struct bfq_group *root_group = bfqd->root_group;
++ struct bfq_queue *bfqq = *bfqq_ptr;
++
++ bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++ if (bfqq) {
++ bfq_bfqq_move(bfqd, bfqq, root_group);
++ bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++ bfqq, bfqq->ref);
++ bfq_put_queue(bfqq);
++ *bfqq_ptr = NULL;
++ }
++}
++
++/*
++ * Release all the bfqg references to its async queues. If we are
++ * deallocating the group these queues may still contain requests, so
++ * we reparent them to the root cgroup (i.e., the only one that will
++ * exist for sure until all the requests on a device are gone).
++ */
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
++
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
++}
++
++static void bfq_exit_queue(struct elevator_queue *e)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ struct request_queue *q = bfqd->queue;
++ struct bfq_queue *bfqq, *n;
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ spin_lock_irq(q->queue_lock);
++
++ BUG_ON(bfqd->in_service_queue);
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
++ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
++
++ spin_unlock_irq(q->queue_lock);
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
++#else
++ bfq_put_async_queues(bfqd, bfqd->root_group);
++ kfree(bfqd->root_group);
++#endif
++
++ kfree(bfqd);
++}
++
++static void bfq_init_root_group(struct bfq_group *root_group,
++ struct bfq_data *bfqd)
++{
++ int i;
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ root_group->entity.parent = NULL;
++ root_group->my_entity = NULL;
++ root_group->bfqd = bfqd;
++#endif
++ root_group->rq_pos_tree = RB_ROOT;
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++ root_group->sched_data.bfq_class_idle_last_service = jiffies;
++}
++
++static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
++{
++ struct bfq_data *bfqd;
++ struct elevator_queue *eq;
++
++ eq = elevator_alloc(q, e);
++ if (!eq)
++ return -ENOMEM;
++
++ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
++ if (!bfqd) {
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++ }
++ eq->elevator_data = bfqd;
++
++ /*
++ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
++ * Grab a permanent reference to it, so that the normal code flow
++ * will not attempt to free it.
++ */
++ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
++ bfqd->oom_bfqq.ref++;
++ bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
++ bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
++ bfqd->oom_bfqq.entity.new_weight =
++ bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
++
++ /* oom_bfqq does not participate to bursts */
++ bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
++ /*
++ * Trigger weight initialization, according to ioprio, at the
++ * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
++ * class won't be changed any more.
++ */
++ bfqd->oom_bfqq.entity.prio_changed = 1;
++
++ bfqd->queue = q;
++
++ spin_lock_irq(q->queue_lock);
++ q->elevator = eq;
++ spin_unlock_irq(q->queue_lock);
++
++ bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
++ if (!bfqd->root_group)
++ goto out_free;
++ bfq_init_root_group(bfqd->root_group, bfqd);
++ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
++
++ hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
++ HRTIMER_MODE_REL);
++ bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
++
++ bfqd->queue_weights_tree = RB_ROOT;
++ bfqd->group_weights_tree = RB_ROOT;
++
++ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
++
++ INIT_LIST_HEAD(&bfqd->active_list);
++ INIT_LIST_HEAD(&bfqd->idle_list);
++ INIT_HLIST_HEAD(&bfqd->burst_list);
++
++ bfqd->hw_tag = -1;
++
++ bfqd->bfq_max_budget = bfq_default_max_budget;
++
++ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
++ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
++ bfqd->bfq_back_max = bfq_back_max;
++ bfqd->bfq_back_penalty = bfq_back_penalty;
++ bfqd->bfq_slice_idle = bfq_slice_idle;
++ bfqd->bfq_timeout = bfq_timeout;
++
++ bfqd->bfq_requests_within_timer = 120;
++
++ bfqd->bfq_large_burst_thresh = 8;
++ bfqd->bfq_burst_interval = msecs_to_jiffies(180);
++
++ bfqd->low_latency = true;
++
++ /*
++ * Trade-off between responsiveness and fairness.
++ */
++ bfqd->bfq_wr_coeff = 30;
++ bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
++ bfqd->bfq_wr_max_time = 0;
++ bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
++ bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
++ bfqd->bfq_wr_max_softrt_rate = 7000; /*
++ * Approximate rate required
++ * to playback or record a
++ * high-definition compressed
++ * video.
++ */
++ bfqd->wr_busy_queues = 0;
++
++ /*
++ * Begin by assuming, optimistically, that the device is a
++ * high-speed one, and that its peak rate is equal to 2/3 of
++ * the highest reference rate.
++ */
++ bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
++ T_fast[blk_queue_nonrot(bfqd->queue)];
++ bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
++ bfqd->device_speed = BFQ_BFQD_FAST;
++
++ return 0;
++
++out_free:
++ kfree(bfqd);
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++}
++
++static void bfq_slab_kill(void)
++{
++ kmem_cache_destroy(bfq_pool);
++}
++
++static int __init bfq_slab_setup(void)
++{
++ bfq_pool = KMEM_CACHE(bfq_queue, 0);
++ if (!bfq_pool)
++ return -ENOMEM;
++ return 0;
++}
++
++static ssize_t bfq_var_show(unsigned int var, char *page)
++{
++ return sprintf(page, "%u\n", var);
++}
++
++static ssize_t bfq_var_store(unsigned long *var, const char *page,
++ size_t count)
++{
++ unsigned long new_val;
++ int ret = kstrtoul(page, 10, &new_val);
++
++ if (ret == 0)
++ *var = new_val;
++
++ return count;
++}
++
++static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++
++ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
++ jiffies_to_msecs(bfqd->bfq_wr_max_time) :
++ jiffies_to_msecs(bfq_wr_duration(bfqd)));
++}
++
++static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_queue *bfqq;
++ struct bfq_data *bfqd = e->elevator_data;
++ ssize_t num_char = 0;
++
++ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
++ bfqd->queued);
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ num_char += sprintf(page + num_char, "Active:\n");
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, nr_queued %d %d, ",
++ bfqq->pid,
++ bfqq->entity.weight,
++ bfqq->queued[0],
++ bfqq->queued[1]);
++ num_char += sprintf(page + num_char,
++ "dur %d/%u\n",
++ jiffies_to_msecs(
++ jiffies -
++ bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ num_char += sprintf(page + num_char, "Idle:\n");
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, dur %d/%u\n",
++ bfqq->pid,
++ bfqq->entity.weight,
++ jiffies_to_msecs(jiffies -
++ bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++
++ return num_char;
++}
++
++#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
++static ssize_t __FUNC(struct elevator_queue *e, char *page) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ u64 __data = __VAR; \
++ if (__CONV == 1) \
++ __data = jiffies_to_msecs(__data); \
++ else if (__CONV == 2) \
++ __data = div_u64(__data, NSEC_PER_MSEC); \
++ return bfq_var_show(__data, (page)); \
++}
++SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
++SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
++SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
++SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
++SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
++SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
++SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
++SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
++SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
++SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0);
++SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1);
++SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1);
++SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async,
++ 1);
++SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0);
++#undef SHOW_FUNCTION
++
++#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
++static ssize_t __FUNC(struct elevator_queue *e, char *page) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ u64 __data = __VAR; \
++ __data = div_u64(__data, NSEC_PER_USEC); \
++ return bfq_var_show(__data, (page)); \
++}
++USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
++#undef USEC_SHOW_FUNCTION
++
++#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
++static ssize_t \
++__FUNC(struct elevator_queue *e, const char *page, size_t count) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned long uninitialized_var(__data); \
++ int ret = bfq_var_store(&__data, (page), count); \
++ if (__data < (MIN)) \
++ __data = (MIN); \
++ else if (__data > (MAX)) \
++ __data = (MAX); \
++ if (__CONV == 1) \
++ *(__PTR) = msecs_to_jiffies(__data); \
++ else if (__CONV == 2) \
++ *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
++ else \
++ *(__PTR) = __data; \
++ return ret; \
++}
++STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
++ INT_MAX, 2);
++STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
++ INT_MAX, 2);
++STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
++STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
++ INT_MAX, 0);
++STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
++STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX,
++ 1);
++STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_min_inter_arr_async_store,
++ &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0,
++ INT_MAX, 0);
++#undef STORE_FUNCTION
++
++#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
++static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned long uninitialized_var(__data); \
++ int ret = bfq_var_store(&__data, (page), count); \
++ if (__data < (MIN)) \
++ __data = (MIN); \
++ else if (__data > (MAX)) \
++ __data = (MAX); \
++ *(__PTR) = (u64)__data * NSEC_PER_USEC; \
++ return ret; \
++}
++USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
++ UINT_MAX);
++#undef USEC_STORE_FUNCTION
++
++/* do nothing for the moment */
++static ssize_t bfq_weights_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ return count;
++}
++
++static ssize_t bfq_max_budget_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data == 0)
++ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
++ else {
++ if (__data > INT_MAX)
++ __data = INT_MAX;
++ bfqd->bfq_max_budget = __data;
++ }
++
++ bfqd->bfq_user_max_budget = __data;
++
++ return ret;
++}
++
++/*
++ * Leaving this name to preserve name compatibility with cfq
++ * parameters, but this timeout is used for both sync and async.
++ */
++static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data < 1)
++ __data = 1;
++ else if (__data > INT_MAX)
++ __data = INT_MAX;
++
++ bfqd->bfq_timeout = msecs_to_jiffies(__data);
++ if (bfqd->bfq_user_max_budget == 0)
++ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
++
++ return ret;
++}
++
++static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data > 1)
++ __data = 1;
++ if (!bfqd->strict_guarantees && __data == 1
++ && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
++ bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
++
++ bfqd->strict_guarantees = __data;
++
++ return ret;
++}
++
++static ssize_t bfq_low_latency_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data > 1)
++ __data = 1;
++ if (__data == 0 && bfqd->low_latency != 0)
++ bfq_end_wr(bfqd);
++ bfqd->low_latency = __data;
++
++ return ret;
++}
++
++#define BFQ_ATTR(name) \
++ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
++
++static struct elv_fs_entry bfq_attrs[] = {
++ BFQ_ATTR(fifo_expire_sync),
++ BFQ_ATTR(fifo_expire_async),
++ BFQ_ATTR(back_seek_max),
++ BFQ_ATTR(back_seek_penalty),
++ BFQ_ATTR(slice_idle),
++ BFQ_ATTR(slice_idle_us),
++ BFQ_ATTR(max_budget),
++ BFQ_ATTR(timeout_sync),
++ BFQ_ATTR(strict_guarantees),
++ BFQ_ATTR(low_latency),
++ BFQ_ATTR(wr_coeff),
++ BFQ_ATTR(wr_max_time),
++ BFQ_ATTR(wr_rt_max_time),
++ BFQ_ATTR(wr_min_idle_time),
++ BFQ_ATTR(wr_min_inter_arr_async),
++ BFQ_ATTR(wr_max_softrt_rate),
++ BFQ_ATTR(weights),
++ __ATTR_NULL
++};
++
++static struct elevator_type iosched_bfq = {
++ .ops.sq = {
++ .elevator_merge_fn = bfq_merge,
++ .elevator_merged_fn = bfq_merged_request,
++ .elevator_merge_req_fn = bfq_merged_requests,
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ .elevator_bio_merged_fn = bfq_bio_merged,
++#endif
++ .elevator_allow_bio_merge_fn = bfq_allow_bio_merge,
++ .elevator_allow_rq_merge_fn = bfq_allow_rq_merge,
++ .elevator_dispatch_fn = bfq_dispatch_requests,
++ .elevator_add_req_fn = bfq_insert_request,
++ .elevator_activate_req_fn = bfq_activate_request,
++ .elevator_deactivate_req_fn = bfq_deactivate_request,
++ .elevator_completed_req_fn = bfq_completed_request,
++ .elevator_former_req_fn = elv_rb_former_request,
++ .elevator_latter_req_fn = elv_rb_latter_request,
++ .elevator_init_icq_fn = bfq_init_icq,
++ .elevator_exit_icq_fn = bfq_exit_icq,
++ .elevator_set_req_fn = bfq_set_request,
++ .elevator_put_req_fn = bfq_put_request,
++ .elevator_may_queue_fn = bfq_may_queue,
++ .elevator_init_fn = bfq_init_queue,
++ .elevator_exit_fn = bfq_exit_queue,
++ },
++ .icq_size = sizeof(struct bfq_io_cq),
++ .icq_align = __alignof__(struct bfq_io_cq),
++ .elevator_attrs = bfq_attrs,
++ .elevator_name = "bfq-sq",
++ .elevator_owner = THIS_MODULE,
++};
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++static struct blkcg_policy blkcg_policy_bfq = {
++ .dfl_cftypes = bfq_blkg_files,
++ .legacy_cftypes = bfq_blkcg_legacy_files,
++
++ .cpd_alloc_fn = bfq_cpd_alloc,
++ .cpd_init_fn = bfq_cpd_init,
++ .cpd_bind_fn = bfq_cpd_init,
++ .cpd_free_fn = bfq_cpd_free,
++
++ .pd_alloc_fn = bfq_pd_alloc,
++ .pd_init_fn = bfq_pd_init,
++ .pd_offline_fn = bfq_pd_offline,
++ .pd_free_fn = bfq_pd_free,
++ .pd_reset_stats_fn = bfq_pd_reset_stats,
++};
++#endif
++
++static int __init bfq_init(void)
++{
++ int ret;
++ char msg[60] = "BFQ I/O-scheduler: v8r12";
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ ret = blkcg_policy_register(&blkcg_policy_bfq);
++ if (ret)
++ return ret;
++#endif
++
++ ret = -ENOMEM;
++ if (bfq_slab_setup())
++ goto err_pol_unreg;
++
++ /*
++ * Times to load large popular applications for the typical
++ * systems installed on the reference devices (see the
++ * comments before the definitions of the next two
++ * arrays). Actually, we use slightly slower values, as the
++ * estimated peak rate tends to be smaller than the actual
++ * peak rate. The reason for this last fact is that estimates
++ * are computed over much shorter time intervals than the long
++ * intervals typically used for benchmarking. Why? First, to
++ * adapt more quickly to variations. Second, because an I/O
++ * scheduler cannot rely on a peak-rate-evaluation workload to
++ * be run for a long time.
++ */
++ T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */
++ T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */
++ T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */
++ T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */
++
++ /*
++ * Thresholds that determine the switch between speed classes
++ * (see the comments before the definition of the array
++ * device_speed_thresh). These thresholds are biased towards
++ * transitions to the fast class. This is safer than the
++ * opposite bias. In fact, a wrong transition to the slow
++ * class results in short weight-raising periods, because the
++ * speed of the device then tends to be higher that the
++ * reference peak rate. On the opposite end, a wrong
++ * transition to the fast class tends to increase
++ * weight-raising periods, because of the opposite reason.
++ */
++ device_speed_thresh[0] = (4 * R_slow[0]) / 3;
++ device_speed_thresh[1] = (4 * R_slow[1]) / 3;
++
++ ret = elv_register(&iosched_bfq);
++ if (ret)
++ goto err_pol_unreg;
++
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ strcat(msg, " (with cgroups support)");
++#endif
++ pr_info("%s", msg);
++
++ return 0;
++
++err_pol_unreg:
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ blkcg_policy_unregister(&blkcg_policy_bfq);
++#endif
++ return ret;
++}
++
++static void __exit bfq_exit(void)
++{
++ elv_unregister(&iosched_bfq);
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++ blkcg_policy_unregister(&blkcg_policy_bfq);
++#endif
++ bfq_slab_kill();
++}
++
++module_init(bfq_init);
++module_exit(bfq_exit);
++
++MODULE_AUTHOR("Arianna Avanzini, Fabio Checconi, Paolo Valente");
++MODULE_LICENSE("GPL");
+
+From e24d2e6461479dbd13d58be2dc44b23b5e24487c Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 19 Dec 2016 17:13:39 +0100
+Subject: [PATCH 07/51] Add config and build bits for bfq-mq-iosched
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/Kconfig.iosched | 10 +++++++++
+ block/Makefile | 1 +
+ block/bfq-cgroup-included.c | 4 ++--
+ block/bfq-mq-iosched.c | 25 ++++++++++++-----------
+ block/bfq-sched.c | 50 ++++++++++++++++++++++-----------------------
+ block/bfq-sq-iosched.c | 24 +++++++++++-----------
+ block/bfq.h | 36 +++++++++++++++++++++-----------
+ 8 files changed, 88 insertions(+), 64 deletions(-)
+
+diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
+index 9e3f4c2f7390..2d94af3d8b0a 100644
+--- a/block/Kconfig.iosched
++++ b/block/Kconfig.iosched
+@@ -96,6 +96,16 @@ config DEFAULT_IOSCHED
+ default "bfq-sq" if DEFAULT_BFQ_SQ
+ default "noop" if DEFAULT_NOOP
+
++config MQ_IOSCHED_BFQ
++ tristate "BFQ-MQ I/O Scheduler"
++ default y
++ ---help---
++ BFQ I/O scheduler for BLK-MQ. BFQ-MQ distributes bandwidth
++ among all processes according to their weights, regardless of
++ the device parameters and with any workload. It also
++ guarantees a low latency to interactive and soft real-time
++ applications. Details in Documentation/block/bfq-iosched.txt
++
+ config MQ_IOSCHED_DEADLINE
+ tristate "MQ deadline I/O scheduler"
+ default y
+diff --git a/block/Makefile b/block/Makefile
+index 59026b425791..a571329c23f0 100644
+--- a/block/Makefile
++++ b/block/Makefile
+@@ -25,6 +25,7 @@ obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
+ bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
+ obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
+ obj-$(CONFIG_IOSCHED_BFQ_SQ) += bfq-sq-iosched.o
++obj-$(CONFIG_MQ_IOSCHED_BFQ) += bfq-mq-iosched.o
+
+ obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
+ obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index af7c216a3540..9c483b658179 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -15,7 +15,7 @@
+ * file.
+ */
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+
+ /* bfqg stats flags */
+ enum bfqg_stats_flags {
+@@ -1116,7 +1116,7 @@ static struct cftype bfq_blkg_files[] = {
+ {} /* terminate */
+ };
+
+-#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg,
+ struct bfq_queue *bfqq, unsigned int op) { }
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 30d019fc67e0..e88e00f1e0a7 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -82,6 +82,7 @@
+ #include <linux/rbtree.h>
+ #include <linux/ioprio.h>
+ #include "blk.h"
++#undef CONFIG_BFQ_GROUP_IOSCHED /* cgroups support not yet functional */
+ #include "bfq.h"
+
+ /* Expiration time of sync (0) and async (1) requests, in ns. */
+@@ -387,7 +388,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
+ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
+ (bfqd->queue_weights_tree.rb_node->rb_left ||
+ bfqd->queue_weights_tree.rb_node->rb_right)
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ ) ||
+ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
+ (bfqd->group_weights_tree.rb_node->rb_left ||
+@@ -1672,7 +1673,7 @@ static void bfq_merged_request(struct request_queue *q, struct request *req,
+ }
+ }
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static void bfq_bio_merged(struct request_queue *q, struct request *req,
+ struct bio *bio)
+ {
+@@ -3879,7 +3880,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
+ */
+ static void bfq_put_queue(struct bfq_queue *bfqq)
+ {
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ struct bfq_group *bfqg = bfqq_group(bfqq);
+ #endif
+
+@@ -3909,7 +3910,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+
+ kmem_cache_free(bfq_pool, bfqq);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ bfqg_put(bfqg);
+ #endif
+ }
+@@ -4835,7 +4836,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
+ #else
+ bfq_put_async_queues(bfqd, bfqd->root_group);
+@@ -4850,7 +4851,7 @@ static void bfq_init_root_group(struct bfq_group *root_group,
+ {
+ int i;
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ root_group->entity.parent = NULL;
+ root_group->my_entity = NULL;
+ root_group->bfqd = bfqd;
+@@ -5265,7 +5266,7 @@ static struct elevator_type iosched_bfq = {
+ .elevator_merge_fn = bfq_merge,
+ .elevator_merged_fn = bfq_merged_request,
+ .elevator_merge_req_fn = bfq_merged_requests,
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ .elevator_bio_merged_fn = bfq_bio_merged,
+ #endif
+ .elevator_allow_bio_merge_fn = bfq_allow_bio_merge,
+@@ -5292,7 +5293,7 @@ static struct elevator_type iosched_bfq = {
+ .elevator_owner = THIS_MODULE,
+ };
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct blkcg_policy blkcg_policy_bfq = {
+ .dfl_cftypes = bfq_blkg_files,
+ .legacy_cftypes = bfq_blkcg_legacy_files,
+@@ -5315,7 +5316,7 @@ static int __init bfq_init(void)
+ int ret;
+ char msg[60] = "BFQ I/O-scheduler: v8r12";
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ ret = blkcg_policy_register(&blkcg_policy_bfq);
+ if (ret)
+ return ret;
+@@ -5362,7 +5363,7 @@ static int __init bfq_init(void)
+ if (ret)
+ goto err_pol_unreg;
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ strcat(msg, " (with cgroups support)");
+ #endif
+ pr_info("%s", msg);
+@@ -5370,7 +5371,7 @@ static int __init bfq_init(void)
+ return 0;
+
+ err_pol_unreg:
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+ #endif
+ return ret;
+@@ -5379,7 +5380,7 @@ static int __init bfq_init(void)
+ static void __exit bfq_exit(void)
+ {
+ elv_unregister(&iosched_bfq);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+ #endif
+ bfq_slab_kill();
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 5c0f9290a79c..b54a638186e3 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -136,7 +136,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "update_next_in_service: chosen this queue");
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(next_in_service,
+@@ -149,7 +149,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ return parent_sched_may_change;
+ }
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ /* both next loops stop at one of the child entities of the root group */
+ #define for_each_entity(entity) \
+ for (; entity ; entity = entity->parent)
+@@ -243,7 +243,7 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
+ return false;
+ }
+
+-#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#else /* BFQ_GROUP_IOSCHED_ENABLED */
+ #define for_each_entity(entity) \
+ for (; entity ; entity = NULL)
+
+@@ -260,7 +260,7 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
+ return true;
+ }
+
+-#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ /*
+ * Shift for timestamp calculations. This actually limits the maximum
+@@ -323,7 +323,7 @@ static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "calc_finish: start %llu, finish %llu, delta %llu",
+ start, finish, delta);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -473,7 +473,7 @@ static void bfq_update_active_node(struct rb_node *node)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "update_active_node: new min_start %llu",
+ ((entity->min_start>>10)*1000)>>12);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -540,7 +540,7 @@ static void bfq_active_insert(struct bfq_service_tree *st,
+ {
+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
+ struct rb_node *node = &entity->rb_node;
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ struct bfq_sched_data *sd = NULL;
+ struct bfq_group *bfqg = NULL;
+ struct bfq_data *bfqd = NULL;
+@@ -555,7 +555,7 @@ static void bfq_active_insert(struct bfq_service_tree *st,
+
+ bfq_update_active_tree(node);
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ sd = entity->sched_data;
+ bfqg = container_of(sd, struct bfq_group, sched_data);
+ BUG_ON(!bfqg);
+@@ -563,7 +563,7 @@ static void bfq_active_insert(struct bfq_service_tree *st,
+ #endif
+ if (bfqq)
+ list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else { /* bfq_group */
+ BUG_ON(!bfqd);
+ bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
+@@ -652,7 +652,7 @@ static void bfq_active_extract(struct bfq_service_tree *st,
+ {
+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
+ struct rb_node *node;
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ struct bfq_sched_data *sd = NULL;
+ struct bfq_group *bfqg = NULL;
+ struct bfq_data *bfqd = NULL;
+@@ -664,7 +664,7 @@ static void bfq_active_extract(struct bfq_service_tree *st,
+ if (node)
+ bfq_update_active_tree(node);
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ sd = entity->sched_data;
+ bfqg = container_of(sd, struct bfq_group, sched_data);
+ BUG_ON(!bfqg);
+@@ -672,7 +672,7 @@ static void bfq_active_extract(struct bfq_service_tree *st,
+ #endif
+ if (bfqq)
+ list_del(&bfqq->bfqq_list);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else { /* bfq_group */
+ BUG_ON(!bfqd);
+ bfq_weights_tree_remove(bfqd, entity,
+@@ -809,14 +809,14 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
+ unsigned int prev_weight, new_weight;
+ struct bfq_data *bfqd = NULL;
+ struct rb_root *root;
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ struct bfq_sched_data *sd;
+ struct bfq_group *bfqg;
+ #endif
+
+ if (bfqq)
+ bfqd = bfqq->bfqd;
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ sd = entity->my_sched_data;
+ bfqg = container_of(sd, struct bfq_group, sched_data);
+@@ -907,7 +907,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
+ return new_st;
+ }
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
+ #endif
+
+@@ -936,7 +936,7 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
+ st->vtime += bfq_delta(served, st->wsum);
+ bfq_forget_idle(st);
+ }
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ bfqg_stats_set_start_empty_time(bfqq_group(bfqq));
+ #endif
+ st = bfq_entity_service_tree(&bfqq->entity);
+@@ -1060,7 +1060,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "__activate_entity: new queue finish %llu",
+ ((entity->finish>>10)*1000)>>12);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -1078,7 +1078,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "__activate_entity: queue %seligible in st %p",
+ entity->start <= st->vtime ? "" : "non ", st);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -1153,7 +1153,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
+
+ BUG_ON(entity->on_st && bfqq);
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ if (entity->on_st && !bfqq) {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group,
+@@ -1485,7 +1485,7 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "invoking udpdate_next for this queue");
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(entity,
+@@ -1525,7 +1525,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "calc_vtime_jump: new value %llu",
+ root_entity->min_start);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(root_entity, struct bfq_group,
+@@ -1661,7 +1661,7 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service
+ "__lookup_next: start %llu vtime %llu st %p",
+ ((entity->start>>10)*1000)>>12,
+ ((new_vtime>>10)*1000)>>12, st);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -1735,7 +1735,7 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd)
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "chosen from st %p %d",
+ st + class_idx, class_idx);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -1777,7 +1777,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ */
+ sd = &bfqd->root_group->sched_data;
+ for (; sd ; sd = entity->my_sched_data) {
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ if (entity) {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -1867,7 +1867,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ bfq_log_bfqq(bfqd, bfqq,
+ "get_next_queue: this queue, finish %llu",
+ (((entity->finish>>10)*1000)>>10)>>2);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 30d019fc67e0..25da0d1c0622 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -387,7 +387,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
+ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
+ (bfqd->queue_weights_tree.rb_node->rb_left ||
+ bfqd->queue_weights_tree.rb_node->rb_right)
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ ) ||
+ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
+ (bfqd->group_weights_tree.rb_node->rb_left ||
+@@ -1672,7 +1672,7 @@ static void bfq_merged_request(struct request_queue *q, struct request *req,
+ }
+ }
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static void bfq_bio_merged(struct request_queue *q, struct request *req,
+ struct bio *bio)
+ {
+@@ -3879,7 +3879,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
+ */
+ static void bfq_put_queue(struct bfq_queue *bfqq)
+ {
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ struct bfq_group *bfqg = bfqq_group(bfqq);
+ #endif
+
+@@ -3909,7 +3909,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+
+ kmem_cache_free(bfq_pool, bfqq);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ bfqg_put(bfqg);
+ #endif
+ }
+@@ -4835,7 +4835,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
+ #else
+ bfq_put_async_queues(bfqd, bfqd->root_group);
+@@ -4850,7 +4850,7 @@ static void bfq_init_root_group(struct bfq_group *root_group,
+ {
+ int i;
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ root_group->entity.parent = NULL;
+ root_group->my_entity = NULL;
+ root_group->bfqd = bfqd;
+@@ -5265,7 +5265,7 @@ static struct elevator_type iosched_bfq = {
+ .elevator_merge_fn = bfq_merge,
+ .elevator_merged_fn = bfq_merged_request,
+ .elevator_merge_req_fn = bfq_merged_requests,
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ .elevator_bio_merged_fn = bfq_bio_merged,
+ #endif
+ .elevator_allow_bio_merge_fn = bfq_allow_bio_merge,
+@@ -5292,7 +5292,7 @@ static struct elevator_type iosched_bfq = {
+ .elevator_owner = THIS_MODULE,
+ };
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct blkcg_policy blkcg_policy_bfq = {
+ .dfl_cftypes = bfq_blkg_files,
+ .legacy_cftypes = bfq_blkcg_legacy_files,
+@@ -5315,7 +5315,7 @@ static int __init bfq_init(void)
+ int ret;
+ char msg[60] = "BFQ I/O-scheduler: v8r12";
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ ret = blkcg_policy_register(&blkcg_policy_bfq);
+ if (ret)
+ return ret;
+@@ -5362,7 +5362,7 @@ static int __init bfq_init(void)
+ if (ret)
+ goto err_pol_unreg;
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ strcat(msg, " (with cgroups support)");
+ #endif
+ pr_info("%s", msg);
+@@ -5370,7 +5370,7 @@ static int __init bfq_init(void)
+ return 0;
+
+ err_pol_unreg:
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+ #endif
+ return ret;
+@@ -5379,7 +5379,7 @@ static int __init bfq_init(void)
+ static void __exit bfq_exit(void)
+ {
+ elv_unregister(&iosched_bfq);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+ #endif
+ bfq_slab_kill();
+diff --git a/block/bfq.h b/block/bfq.h
+index 34fc4697fd89..53954d1b87f8 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -19,6 +19,18 @@
+ #include <linux/hrtimer.h>
+ #include <linux/blk-cgroup.h>
+
++/*
++ * Define an alternative macro to compile cgroups support. This is one
++ * of the steps needed to let bfq-mq share the files bfq-sched.c and
++ * bfq-cgroup.c with bfq-sq. For bfq-mq, the macro
++ * BFQ_GROUP_IOSCHED_ENABLED will be defined as a function of whether
++ * the configuration option CONFIG_BFQ_MQ_GROUP_IOSCHED, and not
++ * CONFIG_BFQ_GROUP_IOSCHED, is defined.
++ */
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#define BFQ_GROUP_IOSCHED_ENABLED
++#endif
++
+ #define BFQ_IOPRIO_CLASSES 3
+ #define BFQ_CL_IDLE_TIMEOUT (HZ/5)
+
+@@ -344,7 +356,7 @@ struct bfq_io_cq {
+ struct bfq_ttime ttime;
+ /* per (request_queue, blkcg) ioprio */
+ int ioprio;
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ uint64_t blkcg_serial_nr; /* the current blkcg serial */
+ #endif
+
+@@ -671,7 +683,7 @@ static const char *checked_dev_name(const struct device *dev)
+ return nodev;
+ }
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+@@ -696,7 +708,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ __pbuf, ##args); \
+ } while (0)
+
+-#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
+ pr_crit("%s bfq%d%c " fmt "\n", \
+@@ -705,7 +717,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ ##args)
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
+
+-#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log(bfqd, fmt, args...) \
+ pr_crit("%s bfq " fmt "\n", \
+@@ -713,7 +725,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ ##args)
+
+ #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+@@ -735,7 +747,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
+ } while (0)
+
+-#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
+@@ -743,7 +755,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ ##args)
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
+
+-#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */
++#endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log(bfqd, fmt, args...) \
+ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
+@@ -763,7 +775,7 @@ enum bfqq_expiration {
+
+
+ struct bfqg_stats {
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ /* number of ios merged */
+ struct blkg_rwstat merged;
+ /* total time spent on device in ns, may not be accurate w/ queueing */
+@@ -794,7 +806,7 @@ struct bfqg_stats {
+ #endif
+ };
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ /*
+ * struct bfq_group_data - per-blkcg storage for the blkio subsystem.
+ *
+@@ -895,7 +907,7 @@ bfq_entity_service_tree(struct bfq_entity *entity)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "entity_service_tree %p %d",
+ sched_data->service_tree + idx, idx);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+ container_of(entity, struct bfq_group, entity);
+@@ -924,7 +936,7 @@ static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
+ return bic->icq.q->elevator->elevator_data;
+ }
+
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+
+ static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
+ {
+@@ -953,7 +965,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
+ struct bfq_io_cq *bic);
+ static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
+ struct bfq_group *bfqg);
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
+ #endif
+ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
+
+From add91dbd756cf8ca3aa3add9a19eef742d5fca6b Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 20 Jan 2017 09:18:25 +0100
+Subject: [PATCH 08/51] Increase max policies for io controller
+
+To let bfq-mq policy be plugged too (however cgroups
+suppport is not yet functional in bfq-mq).
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ include/linux/blkdev.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index bf000c58644b..10f892ca585d 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -54,7 +54,7 @@ struct blk_stat_callback;
+ * Maximum number of blkcg policies allowed to be registered concurrently.
+ * Defined here to simplify include dependency.
+ */
+-#define BLKCG_MAX_POLS 4
++#define BLKCG_MAX_POLS 5
+
+ typedef void (rq_end_io_fn)(struct request *, blk_status_t);
+
+
+From 2c39a1d9ab4516d44e01e96f19f578b927e7f2e9 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 19 Dec 2016 18:11:33 +0100
+Subject: [PATCH 09/51] Copy header file bfq.h as bfq-mq.h
+
+This commit introduces the header file bfq-mq.h, that will play
+for bfq-mq-iosched.c the same role that bfq.h plays for bfq-iosched.c.
+
+For the moment, the file bfq-mq.h is just a copy of bfq.h.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 2 +-
+ block/bfq-mq.h | 973 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 974 insertions(+), 1 deletion(-)
+ create mode 100644 block/bfq-mq.h
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index e88e00f1e0a7..d1125aee658c 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -83,7 +83,7 @@
+ #include <linux/ioprio.h>
+ #include "blk.h"
+ #undef CONFIG_BFQ_GROUP_IOSCHED /* cgroups support not yet functional */
+-#include "bfq.h"
++#include "bfq-mq.h"
+
+ /* Expiration time of sync (0) and async (1) requests, in ns. */
+ static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+new file mode 100644
+index 000000000000..53954d1b87f8
+--- /dev/null
++++ b/block/bfq-mq.h
+@@ -0,0 +1,973 @@
++/*
++ * BFQ v8r12 for 4.11.0: data structures and common functions prototypes.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
++ */
++
++#ifndef _BFQ_H
++#define _BFQ_H
++
++#include <linux/blktrace_api.h>
++#include <linux/hrtimer.h>
++#include <linux/blk-cgroup.h>
++
++/*
++ * Define an alternative macro to compile cgroups support. This is one
++ * of the steps needed to let bfq-mq share the files bfq-sched.c and
++ * bfq-cgroup.c with bfq-sq. For bfq-mq, the macro
++ * BFQ_GROUP_IOSCHED_ENABLED will be defined as a function of whether
++ * the configuration option CONFIG_BFQ_MQ_GROUP_IOSCHED, and not
++ * CONFIG_BFQ_GROUP_IOSCHED, is defined.
++ */
++#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++#define BFQ_GROUP_IOSCHED_ENABLED
++#endif
++
++#define BFQ_IOPRIO_CLASSES 3
++#define BFQ_CL_IDLE_TIMEOUT (HZ/5)
++
++#define BFQ_MIN_WEIGHT 1
++#define BFQ_MAX_WEIGHT 1000
++#define BFQ_WEIGHT_CONVERSION_COEFF 10
++
++#define BFQ_DEFAULT_QUEUE_IOPRIO 4
++
++#define BFQ_WEIGHT_LEGACY_DFL 100
++#define BFQ_DEFAULT_GRP_IOPRIO 0
++#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
++
++/*
++ * Soft real-time applications are extremely more latency sensitive
++ * than interactive ones. Over-raise the weight of the former to
++ * privilege them against the latter.
++ */
++#define BFQ_SOFTRT_WEIGHT_FACTOR 100
++
++struct bfq_entity;
++
++/**
++ * struct bfq_service_tree - per ioprio_class service tree.
++ *
++ * Each service tree represents a B-WF2Q+ scheduler on its own. Each
++ * ioprio_class has its own independent scheduler, and so its own
++ * bfq_service_tree. All the fields are protected by the queue lock
++ * of the containing bfqd.
++ */
++struct bfq_service_tree {
++ /* tree for active entities (i.e., those backlogged) */
++ struct rb_root active;
++ /* tree for idle entities (i.e., not backlogged, with V <= F_i)*/
++ struct rb_root idle;
++
++ struct bfq_entity *first_idle; /* idle entity with minimum F_i */
++ struct bfq_entity *last_idle; /* idle entity with maximum F_i */
++
++ u64 vtime; /* scheduler virtual time */
++ /* scheduler weight sum; active and idle entities contribute to it */
++ unsigned long wsum;
++};
++
++/**
++ * struct bfq_sched_data - multi-class scheduler.
++ *
++ * bfq_sched_data is the basic scheduler queue. It supports three
++ * ioprio_classes, and can be used either as a toplevel queue or as an
++ * intermediate queue in a hierarchical setup.
++ *
++ * The supported ioprio_classes are the same as in CFQ, in descending
++ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
++ * Requests from higher priority queues are served before all the
++ * requests from lower priority queues; among requests of the same
++ * queue requests are served according to B-WF2Q+.
++ *
++ * The schedule is implemented by the service trees, plus the field
++ * @next_in_service, which points to the entity on the active trees
++ * that will be served next, if 1) no changes in the schedule occurs
++ * before the current in-service entity is expired, 2) the in-service
++ * queue becomes idle when it expires, and 3) if the entity pointed by
++ * in_service_entity is not a queue, then the in-service child entity
++ * of the entity pointed by in_service_entity becomes idle on
++ * expiration. This peculiar definition allows for the following
++ * optimization, not yet exploited: while a given entity is still in
++ * service, we already know which is the best candidate for next
++ * service among the other active entitities in the same parent
++ * entity. We can then quickly compare the timestamps of the
++ * in-service entity with those of such best candidate.
++ *
++ * All the fields are protected by the queue lock of the containing
++ * bfqd.
++ */
++struct bfq_sched_data {
++ struct bfq_entity *in_service_entity; /* entity in service */
++ /* head-of-the-line entity in the scheduler (see comments above) */
++ struct bfq_entity *next_in_service;
++ /* array of service trees, one per ioprio_class */
++ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
++ /* last time CLASS_IDLE was served */
++ unsigned long bfq_class_idle_last_service;
++
++};
++
++/**
++ * struct bfq_weight_counter - counter of the number of all active entities
++ * with a given weight.
++ */
++struct bfq_weight_counter {
++ unsigned int weight; /* weight of the entities this counter refers to */
++ unsigned int num_active; /* nr of active entities with this weight */
++ /*
++ * Weights tree member (see bfq_data's @queue_weights_tree and
++ * @group_weights_tree)
++ */
++ struct rb_node weights_node;
++};
++
++/**
++ * struct bfq_entity - schedulable entity.
++ *
++ * A bfq_entity is used to represent either a bfq_queue (leaf node in the
++ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each
++ * entity belongs to the sched_data of the parent group in the cgroup
++ * hierarchy. Non-leaf entities have also their own sched_data, stored
++ * in @my_sched_data.
++ *
++ * Each entity stores independently its priority values; this would
++ * allow different weights on different devices, but this
++ * functionality is not exported to userspace by now. Priorities and
++ * weights are updated lazily, first storing the new values into the
++ * new_* fields, then setting the @prio_changed flag. As soon as
++ * there is a transition in the entity state that allows the priority
++ * update to take place the effective and the requested priority
++ * values are synchronized.
++ *
++ * Unless cgroups are used, the weight value is calculated from the
++ * ioprio to export the same interface as CFQ. When dealing with
++ * ``well-behaved'' queues (i.e., queues that do not spend too much
++ * time to consume their budget and have true sequential behavior, and
++ * when there are no external factors breaking anticipation) the
++ * relative weights at each level of the cgroups hierarchy should be
++ * guaranteed. All the fields are protected by the queue lock of the
++ * containing bfqd.
++ */
++struct bfq_entity {
++ struct rb_node rb_node; /* service_tree member */
++ /* pointer to the weight counter associated with this entity */
++ struct bfq_weight_counter *weight_counter;
++
++ /*
++ * Flag, true if the entity is on a tree (either the active or
++ * the idle one of its service_tree) or is in service.
++ */
++ bool on_st;
++
++ u64 finish; /* B-WF2Q+ finish timestamp (aka F_i) */
++ u64 start; /* B-WF2Q+ start timestamp (aka S_i) */
++
++ /* tree the entity is enqueued into; %NULL if not on a tree */
++ struct rb_root *tree;
++
++ /*
++ * minimum start time of the (active) subtree rooted at this
++ * entity; used for O(log N) lookups into active trees
++ */
++ u64 min_start;
++
++ /* amount of service received during the last service slot */
++ int service;
++
++ /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */
++ int budget;
++
++ unsigned int weight; /* weight of the queue */
++ unsigned int new_weight; /* next weight if a change is in progress */
++
++ /* original weight, used to implement weight boosting */
++ unsigned int orig_weight;
++
++ /* parent entity, for hierarchical scheduling */
++ struct bfq_entity *parent;
++
++ /*
++ * For non-leaf nodes in the hierarchy, the associated
++ * scheduler queue, %NULL on leaf nodes.
++ */
++ struct bfq_sched_data *my_sched_data;
++ /* the scheduler queue this entity belongs to */
++ struct bfq_sched_data *sched_data;
++
++ /* flag, set to request a weight, ioprio or ioprio_class change */
++ int prio_changed;
++};
++
++struct bfq_group;
++
++/**
++ * struct bfq_queue - leaf schedulable entity.
++ *
++ * A bfq_queue is a leaf request queue; it can be associated with an
++ * io_context or more, if it is async or shared between cooperating
++ * processes. @cgroup holds a reference to the cgroup, to be sure that it
++ * does not disappear while a bfqq still references it (mostly to avoid
++ * races between request issuing and task migration followed by cgroup
++ * destruction).
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_queue {
++ /* reference counter */
++ int ref;
++ /* parent bfq_data */
++ struct bfq_data *bfqd;
++
++ /* current ioprio and ioprio class */
++ unsigned short ioprio, ioprio_class;
++ /* next ioprio and ioprio class if a change is in progress */
++ unsigned short new_ioprio, new_ioprio_class;
++
++ /*
++ * Shared bfq_queue if queue is cooperating with one or more
++ * other queues.
++ */
++ struct bfq_queue *new_bfqq;
++ /* request-position tree member (see bfq_group's @rq_pos_tree) */
++ struct rb_node pos_node;
++ /* request-position tree root (see bfq_group's @rq_pos_tree) */
++ struct rb_root *pos_root;
++
++ /* sorted list of pending requests */
++ struct rb_root sort_list;
++ /* if fifo isn't expired, next request to serve */
++ struct request *next_rq;
++ /* number of sync and async requests queued */
++ int queued[2];
++ /* number of sync and async requests currently allocated */
++ int allocated[2];
++ /* number of pending metadata requests */
++ int meta_pending;
++ /* fifo list of requests in sort_list */
++ struct list_head fifo;
++
++ /* entity representing this queue in the scheduler */
++ struct bfq_entity entity;
++
++ /* maximum budget allowed from the feedback mechanism */
++ int max_budget;
++ /* budget expiration (in jiffies) */
++ unsigned long budget_timeout;
++
++ /* number of requests on the dispatch list or inside driver */
++ int dispatched;
++
++ unsigned int flags; /* status flags.*/
++
++ /* node for active/idle bfqq list inside parent bfqd */
++ struct list_head bfqq_list;
++
++ /* bit vector: a 1 for each seeky requests in history */
++ u32 seek_history;
++
++ /* node for the device's burst list */
++ struct hlist_node burst_list_node;
++
++ /* position of the last request enqueued */
++ sector_t last_request_pos;
++
++ /* Number of consecutive pairs of request completion and
++ * arrival, such that the queue becomes idle after the
++ * completion, but the next request arrives within an idle
++ * time slice; used only if the queue's IO_bound flag has been
++ * cleared.
++ */
++ unsigned int requests_within_timer;
++
++ /* pid of the process owning the queue, used for logging purposes */
++ pid_t pid;
++
++ /*
++ * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL
++ * if the queue is shared.
++ */
++ struct bfq_io_cq *bic;
++
++ /* current maximum weight-raising time for this queue */
++ unsigned long wr_cur_max_time;
++ /*
++ * Minimum time instant such that, only if a new request is
++ * enqueued after this time instant in an idle @bfq_queue with
++ * no outstanding requests, then the task associated with the
++ * queue it is deemed as soft real-time (see the comments on
++ * the function bfq_bfqq_softrt_next_start())
++ */
++ unsigned long soft_rt_next_start;
++ /*
++ * Start time of the current weight-raising period if
++ * the @bfq-queue is being weight-raised, otherwise
++ * finish time of the last weight-raising period.
++ */
++ unsigned long last_wr_start_finish;
++ /* factor by which the weight of this queue is multiplied */
++ unsigned int wr_coeff;
++ /*
++ * Time of the last transition of the @bfq_queue from idle to
++ * backlogged.
++ */
++ unsigned long last_idle_bklogged;
++ /*
++ * Cumulative service received from the @bfq_queue since the
++ * last transition from idle to backlogged.
++ */
++ unsigned long service_from_backlogged;
++ /*
++ * Value of wr start time when switching to soft rt
++ */
++ unsigned long wr_start_at_switch_to_srt;
++
++ unsigned long split_time; /* time of last split */
++};
++
++/**
++ * struct bfq_ttime - per process thinktime stats.
++ */
++struct bfq_ttime {
++ u64 last_end_request; /* completion time of last request */
++
++ u64 ttime_total; /* total process thinktime */
++ unsigned long ttime_samples; /* number of thinktime samples */
++ u64 ttime_mean; /* average process thinktime */
++
++};
++
++/**
++ * struct bfq_io_cq - per (request_queue, io_context) structure.
++ */
++struct bfq_io_cq {
++ /* associated io_cq structure */
++ struct io_cq icq; /* must be the first member */
++ /* array of two process queues, the sync and the async */
++ struct bfq_queue *bfqq[2];
++ /* associated @bfq_ttime struct */
++ struct bfq_ttime ttime;
++ /* per (request_queue, blkcg) ioprio */
++ int ioprio;
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ uint64_t blkcg_serial_nr; /* the current blkcg serial */
++#endif
++
++ /*
++ * Snapshot of the has_short_time flag before merging; taken
++ * to remember its value while the queue is merged, so as to
++ * be able to restore it in case of split.
++ */
++ bool saved_has_short_ttime;
++ /*
++ * Same purpose as the previous two fields for the I/O bound
++ * classification of a queue.
++ */
++ bool saved_IO_bound;
++
++ /*
++ * Same purpose as the previous fields for the value of the
++ * field keeping the queue's belonging to a large burst
++ */
++ bool saved_in_large_burst;
++ /*
++ * True if the queue belonged to a burst list before its merge
++ * with another cooperating queue.
++ */
++ bool was_in_burst_list;
++
++ /*
++ * Similar to previous fields: save wr information.
++ */
++ unsigned long saved_wr_coeff;
++ unsigned long saved_last_wr_start_finish;
++ unsigned long saved_wr_start_at_switch_to_srt;
++ unsigned int saved_wr_cur_max_time;
++};
++
++enum bfq_device_speed {
++ BFQ_BFQD_FAST,
++ BFQ_BFQD_SLOW,
++};
++
++/**
++ * struct bfq_data - per-device data structure.
++ *
++ * All the fields are protected by the @queue lock.
++ */
++struct bfq_data {
++ /* request queue for the device */
++ struct request_queue *queue;
++
++ /* root bfq_group for the device */
++ struct bfq_group *root_group;
++
++ /*
++ * rbtree of weight counters of @bfq_queues, sorted by
++ * weight. Used to keep track of whether all @bfq_queues have
++ * the same weight. The tree contains one counter for each
++ * distinct weight associated to some active and not
++ * weight-raised @bfq_queue (see the comments to the functions
++ * bfq_weights_tree_[add|remove] for further details).
++ */
++ struct rb_root queue_weights_tree;
++ /*
++ * rbtree of non-queue @bfq_entity weight counters, sorted by
++ * weight. Used to keep track of whether all @bfq_groups have
++ * the same weight. The tree contains one counter for each
++ * distinct weight associated to some active @bfq_group (see
++ * the comments to the functions bfq_weights_tree_[add|remove]
++ * for further details).
++ */
++ struct rb_root group_weights_tree;
++
++ /*
++ * Number of bfq_queues containing requests (including the
++ * queue in service, even if it is idling).
++ */
++ int busy_queues;
++ /* number of weight-raised busy @bfq_queues */
++ int wr_busy_queues;
++ /* number of queued requests */
++ int queued;
++ /* number of requests dispatched and waiting for completion */
++ int rq_in_driver;
++
++ /*
++ * Maximum number of requests in driver in the last
++ * @hw_tag_samples completed requests.
++ */
++ int max_rq_in_driver;
++ /* number of samples used to calculate hw_tag */
++ int hw_tag_samples;
++ /* flag set to one if the driver is showing a queueing behavior */
++ int hw_tag;
++
++ /* number of budgets assigned */
++ int budgets_assigned;
++
++ /*
++ * Timer set when idling (waiting) for the next request from
++ * the queue in service.
++ */
++ struct hrtimer idle_slice_timer;
++ /* delayed work to restart dispatching on the request queue */
++ struct work_struct unplug_work;
++
++ /* bfq_queue in service */
++ struct bfq_queue *in_service_queue;
++ /* bfq_io_cq (bic) associated with the @in_service_queue */
++ struct bfq_io_cq *in_service_bic;
++
++ /* on-disk position of the last served request */
++ sector_t last_position;
++
++ /* time of last request completion (ns) */
++ u64 last_completion;
++
++ /* time of first rq dispatch in current observation interval (ns) */
++ u64 first_dispatch;
++ /* time of last rq dispatch in current observation interval (ns) */
++ u64 last_dispatch;
++
++ /* beginning of the last budget */
++ ktime_t last_budget_start;
++ /* beginning of the last idle slice */
++ ktime_t last_idling_start;
++
++ /* number of samples in current observation interval */
++ int peak_rate_samples;
++ /* num of samples of seq dispatches in current observation interval */
++ u32 sequential_samples;
++ /* total num of sectors transferred in current observation interval */
++ u64 tot_sectors_dispatched;
++ /* max rq size seen during current observation interval (sectors) */
++ u32 last_rq_max_size;
++ /* time elapsed from first dispatch in current observ. interval (us) */
++ u64 delta_from_first;
++ /* current estimate of device peak rate */
++ u32 peak_rate;
++
++ /* maximum budget allotted to a bfq_queue before rescheduling */
++ int bfq_max_budget;
++
++ /* list of all the bfq_queues active on the device */
++ struct list_head active_list;
++ /* list of all the bfq_queues idle on the device */
++ struct list_head idle_list;
++
++ /*
++ * Timeout for async/sync requests; when it fires, requests
++ * are served in fifo order.
++ */
++ u64 bfq_fifo_expire[2];
++ /* weight of backward seeks wrt forward ones */
++ unsigned int bfq_back_penalty;
++ /* maximum allowed backward seek */
++ unsigned int bfq_back_max;
++ /* maximum idling time */
++ u32 bfq_slice_idle;
++
++ /* user-configured max budget value (0 for auto-tuning) */
++ int bfq_user_max_budget;
++ /*
++ * Timeout for bfq_queues to consume their budget; used to
++ * prevent seeky queues from imposing long latencies to
++ * sequential or quasi-sequential ones (this also implies that
++ * seeky queues cannot receive guarantees in the service
++ * domain; after a timeout they are charged for the time they
++ * have been in service, to preserve fairness among them, but
++ * without service-domain guarantees).
++ */
++ unsigned int bfq_timeout;
++
++ /*
++ * Number of consecutive requests that must be issued within
++ * the idle time slice to set again idling to a queue which
++ * was marked as non-I/O-bound (see the definition of the
++ * IO_bound flag for further details).
++ */
++ unsigned int bfq_requests_within_timer;
++
++ /*
++ * Force device idling whenever needed to provide accurate
++ * service guarantees, without caring about throughput
++ * issues. CAVEAT: this may even increase latencies, in case
++ * of useless idling for processes that did stop doing I/O.
++ */
++ bool strict_guarantees;
++
++ /*
++ * Last time at which a queue entered the current burst of
++ * queues being activated shortly after each other; for more
++ * details about this and the following parameters related to
++ * a burst of activations, see the comments on the function
++ * bfq_handle_burst.
++ */
++ unsigned long last_ins_in_burst;
++ /*
++ * Reference time interval used to decide whether a queue has
++ * been activated shortly after @last_ins_in_burst.
++ */
++ unsigned long bfq_burst_interval;
++ /* number of queues in the current burst of queue activations */
++ int burst_size;
++
++ /* common parent entity for the queues in the burst */
++ struct bfq_entity *burst_parent_entity;
++ /* Maximum burst size above which the current queue-activation
++ * burst is deemed as 'large'.
++ */
++ unsigned long bfq_large_burst_thresh;
++ /* true if a large queue-activation burst is in progress */
++ bool large_burst;
++ /*
++ * Head of the burst list (as for the above fields, more
++ * details in the comments on the function bfq_handle_burst).
++ */
++ struct hlist_head burst_list;
++
++ /* if set to true, low-latency heuristics are enabled */
++ bool low_latency;
++ /*
++ * Maximum factor by which the weight of a weight-raised queue
++ * is multiplied.
++ */
++ unsigned int bfq_wr_coeff;
++ /* maximum duration of a weight-raising period (jiffies) */
++ unsigned int bfq_wr_max_time;
++
++ /* Maximum weight-raising duration for soft real-time processes */
++ unsigned int bfq_wr_rt_max_time;
++ /*
++ * Minimum idle period after which weight-raising may be
++ * reactivated for a queue (in jiffies).
++ */
++ unsigned int bfq_wr_min_idle_time;
++ /*
++ * Minimum period between request arrivals after which
++ * weight-raising may be reactivated for an already busy async
++ * queue (in jiffies).
++ */
++ unsigned long bfq_wr_min_inter_arr_async;
++
++ /* Max service-rate for a soft real-time queue, in sectors/sec */
++ unsigned int bfq_wr_max_softrt_rate;
++ /*
++ * Cached value of the product R*T, used for computing the
++ * maximum duration of weight raising automatically.
++ */
++ u64 RT_prod;
++ /* device-speed class for the low-latency heuristic */
++ enum bfq_device_speed device_speed;
++
++ /* fallback dummy bfqq for extreme OOM conditions */
++ struct bfq_queue oom_bfqq;
++};
++
++enum bfqq_state_flags {
++ BFQ_BFQQ_FLAG_just_created = 0, /* queue just allocated */
++ BFQ_BFQQ_FLAG_busy, /* has requests or is in service */
++ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */
++ BFQ_BFQQ_FLAG_non_blocking_wait_rq, /*
++ * waiting for a request
++ * without idling the device
++ */
++ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
++ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
++ BFQ_BFQQ_FLAG_has_short_ttime, /* queue has a short think time */
++ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
++ BFQ_BFQQ_FLAG_IO_bound, /*
++ * bfqq has timed-out at least once
++ * having consumed at most 2/10 of
++ * its budget
++ */
++ BFQ_BFQQ_FLAG_in_large_burst, /*
++ * bfqq activated in a large burst,
++ * see comments to bfq_handle_burst.
++ */
++ BFQ_BFQQ_FLAG_softrt_update, /*
++ * may need softrt-next-start
++ * update
++ */
++ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
++ BFQ_BFQQ_FLAG_split_coop /* shared bfqq will be split */
++};
++
++#define BFQ_BFQQ_FNS(name) \
++static void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
++{ \
++ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \
++}
++
++BFQ_BFQQ_FNS(just_created);
++BFQ_BFQQ_FNS(busy);
++BFQ_BFQQ_FNS(wait_request);
++BFQ_BFQQ_FNS(non_blocking_wait_rq);
++BFQ_BFQQ_FNS(must_alloc);
++BFQ_BFQQ_FNS(fifo_expire);
++BFQ_BFQQ_FNS(has_short_ttime);
++BFQ_BFQQ_FNS(sync);
++BFQ_BFQQ_FNS(IO_bound);
++BFQ_BFQQ_FNS(in_large_burst);
++BFQ_BFQQ_FNS(coop);
++BFQ_BFQQ_FNS(split_coop);
++BFQ_BFQQ_FNS(softrt_update);
++#undef BFQ_BFQQ_FNS
++
++/* Logging facilities. */
++#ifdef CONFIG_BFQ_REDIRECT_TO_CONSOLE
++
++static const char *checked_dev_name(const struct device *dev)
++{
++ static const char nodev[] = "nodev";
++
++ if (dev)
++ return dev_name(dev);
++
++ return nodev;
++}
++
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
++static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ assert_spin_locked((bfqd)->queue->queue_lock); \
++ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
++ pr_crit("%s bfq%d%c %s " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ (bfqq)->pid, \
++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ __pbuf, ##args); \
++} while (0)
++
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
++ pr_crit("%s %s " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ __pbuf, ##args); \
++} while (0)
++
++#else /* BFQ_GROUP_IOSCHED_ENABLED */
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++ pr_crit("%s bfq%d%c " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ ##args)
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
++
++#endif /* BFQ_GROUP_IOSCHED_ENABLED */
++
++#define bfq_log(bfqd, fmt, args...) \
++ pr_crit("%s bfq " fmt "\n", \
++ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
++ ##args)
++
++#else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
++static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ assert_spin_locked((bfqd)->queue->queue_lock); \
++ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \
++ (bfqq)->pid, \
++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ __pbuf, ##args); \
++} while (0)
++
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
++ char __pbuf[128]; \
++ \
++ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
++ blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
++} while (0)
++
++#else /* BFQ_GROUP_IOSCHED_ENABLED */
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
++ ##args)
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
++
++#endif /* BFQ_GROUP_IOSCHED_ENABLED */
++
++#define bfq_log(bfqd, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++#endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
++
++/* Expiration reasons. */
++enum bfqq_expiration {
++ BFQ_BFQQ_TOO_IDLE = 0, /*
++ * queue has been idling for
++ * too long
++ */
++ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */
++ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */
++ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */
++ BFQ_BFQQ_PREEMPTED /* preemption in progress */
++};
++
++
++struct bfqg_stats {
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ /* number of ios merged */
++ struct blkg_rwstat merged;
++ /* total time spent on device in ns, may not be accurate w/ queueing */
++ struct blkg_rwstat service_time;
++ /* total time spent waiting in scheduler queue in ns */
++ struct blkg_rwstat wait_time;
++ /* number of IOs queued up */
++ struct blkg_rwstat queued;
++ /* total disk time and nr sectors dispatched by this group */
++ struct blkg_stat time;
++ /* sum of number of ios queued across all samples */
++ struct blkg_stat avg_queue_size_sum;
++ /* count of samples taken for average */
++ struct blkg_stat avg_queue_size_samples;
++ /* how many times this group has been removed from service tree */
++ struct blkg_stat dequeue;
++ /* total time spent waiting for it to be assigned a timeslice. */
++ struct blkg_stat group_wait_time;
++ /* time spent idling for this blkcg_gq */
++ struct blkg_stat idle_time;
++ /* total time with empty current active q with other requests queued */
++ struct blkg_stat empty_time;
++ /* fields after this shouldn't be cleared on stat reset */
++ uint64_t start_group_wait_time;
++ uint64_t start_idle_time;
++ uint64_t start_empty_time;
++ uint16_t flags;
++#endif
++};
++
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++/*
++ * struct bfq_group_data - per-blkcg storage for the blkio subsystem.
++ *
++ * @ps: @blkcg_policy_storage that this structure inherits
++ * @weight: weight of the bfq_group
++ */
++struct bfq_group_data {
++ /* must be the first member */
++ struct blkcg_policy_data pd;
++
++ unsigned int weight;
++};
++
++/**
++ * struct bfq_group - per (device, cgroup) data structure.
++ * @entity: schedulable entity to insert into the parent group sched_data.
++ * @sched_data: own sched_data, to contain child entities (they may be
++ * both bfq_queues and bfq_groups).
++ * @bfqd: the bfq_data for the device this group acts upon.
++ * @async_bfqq: array of async queues for all the tasks belonging to
++ * the group, one queue per ioprio value per ioprio_class,
++ * except for the idle class that has only one queue.
++ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
++ * @my_entity: pointer to @entity, %NULL for the toplevel group; used
++ * to avoid too many special cases during group creation/
++ * migration.
++ * @active_entities: number of active entities belonging to the group;
++ * unused for the root group. Used to know whether there
++ * are groups with more than one active @bfq_entity
++ * (see the comments to the function
++ * bfq_bfqq_may_idle()).
++ * @rq_pos_tree: rbtree sorted by next_request position, used when
++ * determining if two or more queues have interleaving
++ * requests (see bfq_find_close_cooperator()).
++ *
++ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
++ * there is a set of bfq_groups, each one collecting the lower-level
++ * entities belonging to the group that are acting on the same device.
++ *
++ * Locking works as follows:
++ * o @bfqd is protected by the queue lock, RCU is used to access it
++ * from the readers.
++ * o All the other fields are protected by the @bfqd queue lock.
++ */
++struct bfq_group {
++ /* must be the first member */
++ struct blkg_policy_data pd;
++
++ struct bfq_entity entity;
++ struct bfq_sched_data sched_data;
++
++ void *bfqd;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++
++ struct bfq_entity *my_entity;
++
++ int active_entities;
++
++ struct rb_root rq_pos_tree;
++
++ struct bfqg_stats stats;
++};
++
++#else
++struct bfq_group {
++ struct bfq_sched_data sched_data;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++
++ struct rb_root rq_pos_tree;
++};
++#endif
++
++static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
++
++static unsigned int bfq_class_idx(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ return bfqq ? bfqq->ioprio_class - 1 :
++ BFQ_DEFAULT_GRP_CLASS - 1;
++}
++
++static struct bfq_service_tree *
++bfq_entity_service_tree(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sched_data = entity->sched_data;
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ unsigned int idx = bfq_class_idx(entity);
++
++ BUG_ON(idx >= BFQ_IOPRIO_CLASSES);
++ BUG_ON(sched_data == NULL);
++
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "entity_service_tree %p %d",
++ sched_data->service_tree + idx, idx);
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ else {
++ struct bfq_group *bfqg =
++ container_of(entity, struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
++ "entity_service_tree %p %d",
++ sched_data->service_tree + idx, idx);
++ }
++#endif
++ return sched_data->service_tree + idx;
++}
++
++static struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
++{
++ return bic->bfqq[is_sync];
++}
++
++static void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq,
++ bool is_sync)
++{
++ bic->bfqq[is_sync] = bfqq;
++}
++
++static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
++{
++ return bic->icq.q->elevator->elevator_data;
++}
++
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++
++static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *group_entity = bfqq->entity.parent;
++
++ if (!group_entity)
++ group_entity = &bfqq->bfqd->root_group->entity;
++
++ return container_of(group_entity, struct bfq_group, entity);
++}
++
++#else
++
++static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
++{
++ return bfqq->bfqd->root_group;
++}
++
++#endif
++
++static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio);
++static void bfq_put_queue(struct bfq_queue *bfqq);
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bio *bio, bool is_sync,
++ struct bfq_io_cq *bic);
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg);
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
++#endif
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
++
++#endif /* _BFQ_H */
+
+From 0bd96428e086fd28800efdf5f0a5f62869af6e30 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Sat, 21 Jan 2017 12:41:14 +0100
+Subject: [PATCH 10/51] Move thinktime from bic to bfqq
+
+Prep change to make it possible to protect this field with a
+scheduler lock.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 28 ++++++++++++++--------------
+ block/bfq-mq.h | 30 ++++++++++++++++--------------
+ 2 files changed, 30 insertions(+), 28 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index d1125aee658c..65f5dfb79417 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -698,6 +698,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ if (unlikely(busy))
+ old_wr_coeff = bfqq->wr_coeff;
+
++ bfqq->ttime = bic->saved_ttime;
+ bfqq->wr_coeff = bic->saved_wr_coeff;
+ bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
+ BUG_ON(time_is_after_jiffies(bfqq->wr_start_at_switch_to_srt));
+@@ -1287,7 +1288,7 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
+ * details on the usage of the next variable.
+ */
+ arrived_in_time = ktime_get_ns() <=
+- RQ_BIC(rq)->ttime.last_end_request +
++ bfqq->ttime.last_end_request +
+ bfqd->bfq_slice_idle * 3;
+
+ bfq_log_bfqq(bfqd, bfqq,
+@@ -2048,6 +2049,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ if (!bic)
+ return;
+
++ bic->saved_ttime = bfqq->ttime;
+ bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
+ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
+@@ -3948,11 +3950,6 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_put_queue(bfqq); /* release process reference */
+ }
+
+-static void bfq_init_icq(struct io_cq *icq)
+-{
+- icq_to_bic(icq)->ttime.last_end_request = ktime_get_ns() - (1ULL<<32);
+-}
+-
+ static void bfq_exit_icq(struct io_cq *icq)
+ {
+ struct bfq_io_cq *bic = icq_to_bic(icq);
+@@ -4084,6 +4081,9 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfq_mark_bfqq_just_created(bfqq);
+ } else
+ bfq_clear_bfqq_sync(bfqq);
++
++ bfqq->ttime.last_end_request = ktime_get_ns() - (1ULL<<32);
++
+ bfq_mark_bfqq_IO_bound(bfqq);
+
+ /* Tentative initial value to trade off between thr and lat */
+@@ -4191,14 +4191,14 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
+ }
+
+ static void bfq_update_io_thinktime(struct bfq_data *bfqd,
+- struct bfq_io_cq *bic)
++ struct bfq_queue *bfqq)
+ {
+- struct bfq_ttime *ttime = &bic->ttime;
+- u64 elapsed = ktime_get_ns() - bic->ttime.last_end_request;
++ struct bfq_ttime *ttime = &bfqq->ttime;
++ u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
+
+ elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle);
+
+- ttime->ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
++ ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
+ ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
+ ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
+ ttime->ttime_samples);
+@@ -4240,8 +4240,8 @@ static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
+ * decide whether to mark as has_short_ttime
+ */
+ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
+- (bfq_sample_valid(bic->ttime.ttime_samples) &&
+- bic->ttime.ttime_mean > bfqd->bfq_slice_idle))
++ (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
++ bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
+ has_short_ttime = false;
+
+ bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
+@@ -4265,7 +4265,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ if (rq->cmd_flags & REQ_META)
+ bfqq->meta_pending++;
+
+- bfq_update_io_thinktime(bfqd, bic);
++ bfq_update_io_thinktime(bfqd, bfqq);
+ bfq_update_has_short_ttime(bfqd, bfqq, bic);
+ bfq_update_io_seektime(bfqd, bfqq, rq);
+
+@@ -4436,7 +4436,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
+
+ now_ns = ktime_get_ns();
+
+- RQ_BIC(rq)->ttime.last_end_request = now_ns;
++ bfqq->ttime.last_end_request = now_ns;
+
+ /*
+ * Using us instead of ns, to get a reasonable precision in
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 53954d1b87f8..0f51f270469c 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -210,6 +210,18 @@ struct bfq_entity {
+ struct bfq_group;
+
+ /**
++ * struct bfq_ttime - per process thinktime stats.
++ */
++struct bfq_ttime {
++ u64 last_end_request; /* completion time of last request */
++
++ u64 ttime_total; /* total process thinktime */
++ unsigned long ttime_samples; /* number of thinktime samples */
++ u64 ttime_mean; /* average process thinktime */
++
++};
++
++/**
+ * struct bfq_queue - leaf schedulable entity.
+ *
+ * A bfq_queue is a leaf request queue; it can be associated with an
+@@ -270,6 +282,9 @@ struct bfq_queue {
+ /* node for active/idle bfqq list inside parent bfqd */
+ struct list_head bfqq_list;
+
++ /* associated @bfq_ttime struct */
++ struct bfq_ttime ttime;
++
+ /* bit vector: a 1 for each seeky requests in history */
+ u32 seek_history;
+
+@@ -333,18 +348,6 @@ struct bfq_queue {
+ };
+
+ /**
+- * struct bfq_ttime - per process thinktime stats.
+- */
+-struct bfq_ttime {
+- u64 last_end_request; /* completion time of last request */
+-
+- u64 ttime_total; /* total process thinktime */
+- unsigned long ttime_samples; /* number of thinktime samples */
+- u64 ttime_mean; /* average process thinktime */
+-
+-};
+-
+-/**
+ * struct bfq_io_cq - per (request_queue, io_context) structure.
+ */
+ struct bfq_io_cq {
+@@ -352,8 +355,6 @@ struct bfq_io_cq {
+ struct io_cq icq; /* must be the first member */
+ /* array of two process queues, the sync and the async */
+ struct bfq_queue *bfqq[2];
+- /* associated @bfq_ttime struct */
+- struct bfq_ttime ttime;
+ /* per (request_queue, blkcg) ioprio */
+ int ioprio;
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+@@ -390,6 +391,7 @@ struct bfq_io_cq {
+ unsigned long saved_last_wr_start_finish;
+ unsigned long saved_wr_start_at_switch_to_srt;
+ unsigned int saved_wr_cur_max_time;
++ struct bfq_ttime saved_ttime;
+ };
+
+ enum bfq_device_speed {
+
+From 351a9aea7c0c9c30edacdbf2a3c0d089470de1e8 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 18 Jan 2017 11:42:22 +0100
+Subject: [PATCH 11/51] Embed bfq-ioc.c and add locking on request queue
+
+The version of bfq-ioc.c for bfq-iosched.c is not correct any more for
+bfq-mq, because, in bfq-mq, the request queue lock is not being held
+when bfq_bic_lookup is invoked. That function must then take that look
+on its own. This commit removes the inclusion of bfq-ioc.c, copies the
+content of bfq-ioc.c into bfq-mq-iosched.c, and adds the grabbing of
+the lock.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 39 ++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 36 insertions(+), 3 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 65f5dfb79417..756a618d5902 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -195,7 +195,39 @@ static int device_speed_thresh[2];
+
+ static void bfq_schedule_dispatch(struct bfq_data *bfqd);
+
+-#include "bfq-ioc.c"
++/**
++ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
++ * @icq: the iocontext queue.
++ */
++static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
++{
++ /* bic->icq is the first member, %NULL will convert to %NULL */
++ return container_of(icq, struct bfq_io_cq, icq);
++}
++
++/**
++ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
++ * @bfqd: the lookup key.
++ * @ioc: the io_context of the process doing I/O.
++ * @q: the request queue.
++ */
++static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
++ struct io_context *ioc,
++ struct request_queue *q)
++{
++ if (ioc) {
++ struct bfq_io_cq *icq;
++
++ spin_lock_irq(q->queue_lock);
++ icq = icq_to_bic(ioc_lookup_icq(ioc, q));
++ spin_unlock_irq(q->queue_lock);
++
++ return icq;
++ }
++
++ return NULL;
++}
++
+ #include "bfq-sched.c"
+ #include "bfq-cgroup-included.c"
+
+@@ -1520,13 +1552,14 @@ static void bfq_add_request(struct request *rq)
+ }
+
+ static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
+- struct bio *bio)
++ struct bio *bio,
++ struct request_queue *q)
+ {
+ struct task_struct *tsk = current;
+ struct bfq_io_cq *bic;
+ struct bfq_queue *bfqq;
+
+- bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ bic = bfq_bic_lookup(bfqd, tsk->io_context, q);
+ if (!bic)
+ return NULL;
+
+
+From ed0d64e27b2308813a2a846139e405e0479f0849 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Tue, 20 Dec 2016 09:07:19 +0100
+Subject: [PATCH 12/51] Modify interface and operation to comply with
+ blk-mq-sched
+
+As for modifications of the operation, the major changes are the introduction
+of a scheduler lock, and the moving to deferred work of the body of the hook
+exit_icq. The latter change has been made to avoid deadlocks caused by the
+combination of the following facts: 1) such a body takes the scheduler lock,
+and, if not deferred, 2) it does so from inside the exit_icq hook, which is
+invoked with the queue lock held, and 3) there is at least one code path,
+namely that starting from bfq_bio_merge, which takes these locks in the
+opposite order.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 4 -
+ block/bfq-mq-iosched.c | 695 ++++++++++++++++++++++++--------------------
+ block/bfq-mq.h | 35 +--
+ 3 files changed, 394 insertions(+), 340 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 9c483b658179..8a73de76f32b 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -472,8 +472,6 @@ static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
+ struct bfq_group *bfqg, *parent;
+ struct bfq_entity *entity;
+
+- assert_spin_locked(bfqd->queue->queue_lock);
+-
+ bfqg = bfq_lookup_bfqg(bfqd, blkcg);
+
+ if (unlikely(!bfqg))
+@@ -602,8 +600,6 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
+ struct bfq_group *bfqg;
+ struct bfq_entity *entity;
+
+- lockdep_assert_held(bfqd->queue->queue_lock);
+-
+ bfqg = bfq_find_set_group(bfqd, blkcg);
+
+ if (unlikely(!bfqg))
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 756a618d5902..c963d92a32c2 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -81,7 +81,13 @@
+ #include <linux/jiffies.h>
+ #include <linux/rbtree.h>
+ #include <linux/ioprio.h>
++#include <linux/sbitmap.h>
++#include <linux/delay.h>
++
+ #include "blk.h"
++#include "blk-mq.h"
++#include "blk-mq-tag.h"
++#include "blk-mq-sched.h"
+ #undef CONFIG_BFQ_GROUP_IOSCHED /* cgroups support not yet functional */
+ #include "bfq-mq.h"
+
+@@ -193,8 +199,6 @@ static int device_speed_thresh[2];
+ #define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
+ #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
+
+-static void bfq_schedule_dispatch(struct bfq_data *bfqd);
+-
+ /**
+ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
+ * @icq: the iocontext queue.
+@@ -216,11 +220,12 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
+ struct request_queue *q)
+ {
+ if (ioc) {
++ unsigned long flags;
+ struct bfq_io_cq *icq;
+
+- spin_lock_irq(q->queue_lock);
++ spin_lock_irqsave(q->queue_lock, flags);
+ icq = icq_to_bic(ioc_lookup_icq(ioc, q));
+- spin_unlock_irq(q->queue_lock);
++ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return icq;
+ }
+@@ -244,7 +249,7 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd)
+ {
+ if (bfqd->queued != 0) {
+ bfq_log(bfqd, "schedule dispatch");
+- kblockd_schedule_work(&bfqd->unplug_work);
++ blk_mq_run_hw_queues(bfqd->queue, true);
+ }
+ }
+
+@@ -768,9 +773,7 @@ static int bfqq_process_refs(struct bfq_queue *bfqq)
+ {
+ int process_refs, io_refs;
+
+- lockdep_assert_held(bfqq->bfqd->queue->queue_lock);
+-
+- io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++ io_refs = bfqq->allocated;
+ process_refs = bfqq->ref - io_refs - bfqq->entity.on_st;
+ BUG_ON(process_refs < 0);
+ return process_refs;
+@@ -1584,6 +1587,7 @@ static sector_t get_sdist(sector_t last_pos, struct request *rq)
+ return sdist;
+ }
+
++#if 0 /* Still not clear if we can do without next two functions */
+ static void bfq_activate_request(struct request_queue *q, struct request *rq)
+ {
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+@@ -1597,8 +1601,10 @@ static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
+ BUG_ON(bfqd->rq_in_driver == 0);
+ bfqd->rq_in_driver--;
+ }
++#endif
+
+-static void bfq_remove_request(struct request *rq)
++static void bfq_remove_request(struct request_queue *q,
++ struct request *rq)
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
+ struct bfq_data *bfqd = bfqq->bfqd;
+@@ -1619,6 +1625,10 @@ static void bfq_remove_request(struct request *rq)
+ bfqd->queued--;
+ elv_rb_del(&bfqq->sort_list, rq);
+
++ elv_rqhash_del(q, rq);
++ if (q->last_merge == rq)
++ q->last_merge = NULL;
++
+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
+ bfqq->next_rq = NULL;
+
+@@ -1659,13 +1669,36 @@ static void bfq_remove_request(struct request *rq)
+ bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
+ }
+
+-static enum elv_merge bfq_merge(struct request_queue *q, struct request **req,
+- struct bio *bio)
++static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
++{
++ struct request_queue *q = hctx->queue;
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct request *free = NULL;
++ bool ret;
++
++ spin_lock_irq(&bfqd->lock);
++ ret = blk_mq_sched_try_merge(q, bio, &free);
++
++ /*
++ * XXX Not yet freeing without lock held, to avoid an
++ * inconsistency with respect to the lock-protected invocation
++ * of blk_mq_sched_try_insert_merge in bfq_bio_merge. Waiting
++ * for clarifications from Jens.
++ */
++ if (free)
++ blk_mq_free_request(free);
++ spin_unlock_irq(&bfqd->lock);
++
++ return ret;
++}
++
++static int bfq_request_merge(struct request_queue *q, struct request **req,
++ struct bio *bio)
+ {
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct request *__rq;
+
+- __rq = bfq_find_rq_fmerge(bfqd, bio);
++ __rq = bfq_find_rq_fmerge(bfqd, bio, q);
+ if (__rq && elv_bio_merge_ok(__rq, bio)) {
+ *req = __rq;
+ return ELEVATOR_FRONT_MERGE;
+@@ -1674,7 +1707,7 @@ static enum elv_merge bfq_merge(struct request_queue *q, struct request **req,
+ return ELEVATOR_NO_MERGE;
+ }
+
+-static void bfq_merged_request(struct request_queue *q, struct request *req,
++static void bfq_request_merged(struct request_queue *q, struct request *req,
+ enum elv_merge type)
+ {
+ if (type == ELEVATOR_FRONT_MERGE &&
+@@ -1689,6 +1722,8 @@ static void bfq_merged_request(struct request_queue *q, struct request *req,
+ /* Reposition request in its sort_list */
+ elv_rb_del(&bfqq->sort_list, req);
+ elv_rb_add(&bfqq->sort_list, req);
++
++ spin_lock_irq(&bfqd->lock);
+ /* Choose next request to be served for bfqq */
+ prev = bfqq->next_rq;
+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
+@@ -1704,22 +1739,19 @@ static void bfq_merged_request(struct request_queue *q, struct request *req,
+ bfq_updated_next_req(bfqd, bfqq);
+ bfq_pos_tree_add_move(bfqd, bfqq);
+ }
++ spin_unlock_irq(&bfqd->lock);
+ }
+ }
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+-static void bfq_bio_merged(struct request_queue *q, struct request *req,
+- struct bio *bio)
+-{
+- bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_opf);
+-}
+-#endif
+-
+-static void bfq_merged_requests(struct request_queue *q, struct request *rq,
++static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+ struct request *next)
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
+
++ if (!RB_EMPTY_NODE(&rq->rb_node))
++ goto end;
++ spin_lock_irq(&bfqq->bfqd->lock);
++
+ /*
+ * If next and rq belong to the same bfq_queue and next is older
+ * than rq, then reposition rq in the fifo (by substituting next
+@@ -1740,7 +1772,10 @@ static void bfq_merged_requests(struct request_queue *q, struct request *rq,
+ if (bfqq->next_rq == next)
+ bfqq->next_rq = rq;
+
+- bfq_remove_request(next);
++ bfq_remove_request(q, next);
++
++ spin_unlock_irq(&bfqq->bfqd->lock);
++end:
+ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
+ }
+
+@@ -1786,7 +1821,7 @@ static void bfq_end_wr(struct bfq_data *bfqd)
+ {
+ struct bfq_queue *bfqq;
+
+- spin_lock_irq(bfqd->queue->queue_lock);
++ spin_lock_irq(&bfqd->lock);
+
+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
+ bfq_bfqq_end_wr(bfqq);
+@@ -1794,7 +1829,7 @@ static void bfq_end_wr(struct bfq_data *bfqd)
+ bfq_bfqq_end_wr(bfqq);
+ bfq_end_wr_async(bfqd);
+
+- spin_unlock_irq(bfqd->queue->queue_lock);
++ spin_unlock_irq(&bfqd->lock);
+ }
+
+ static sector_t bfq_io_struct_pos(void *io_struct, bool request)
+@@ -2184,8 +2219,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+ bfq_put_queue(bfqq);
+ }
+
+-static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+- struct bio *bio)
++static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
++ struct bio *bio)
+ {
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ bool is_sync = op_is_sync(bio->bi_opf);
+@@ -2203,7 +2238,7 @@ static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ * merge only if rq is queued there.
+ * Queue lock is held here.
+ */
+- bic = bfq_bic_lookup(bfqd, current->io_context);
++ bic = bfq_bic_lookup(bfqd, current->io_context, q);
+ if (!bic)
+ return false;
+
+@@ -2228,12 +2263,6 @@ static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ return bfqq == RQ_BFQQ(rq);
+ }
+
+-static int bfq_allow_rq_merge(struct request_queue *q, struct request *rq,
+- struct request *next)
+-{
+- return RQ_BFQQ(rq) == RQ_BFQQ(next);
+-}
+-
+ /*
+ * Set the maximum time for the in-service queue to consume its
+ * budget. This prevents seeky processes from lowering the throughput.
+@@ -2264,7 +2293,6 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
+ {
+ if (bfqq) {
+ bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
+- bfq_mark_bfqq_must_alloc(bfqq);
+ bfq_clear_bfqq_fifo_expire(bfqq);
+
+ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
+@@ -2703,27 +2731,28 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ }
+
+ /*
+- * Move request from internal lists to the dispatch list of the request queue
++ * Remove request from internal lists.
+ */
+-static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
++static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
+
+ /*
+- * For consistency, the next instruction should have been executed
+- * after removing the request from the queue and dispatching it.
+- * We execute instead this instruction before bfq_remove_request()
+- * (and hence introduce a temporary inconsistency), for efficiency.
+- * In fact, in a forced_dispatch, this prevents two counters related
+- * to bfqq->dispatched to risk to be uselessly decremented if bfqq
+- * is not in service, and then to be incremented again after
+- * incrementing bfqq->dispatched.
++ * For consistency, the next instruction should have been
++ * executed after removing the request from the queue and
++ * dispatching it. We execute instead this instruction before
++ * bfq_remove_request() (and hence introduce a temporary
++ * inconsistency), for efficiency. In fact, should this
++ * dispatch occur for a non in-service bfqq, this anticipated
++ * increment prevents two counters related to bfqq->dispatched
++ * from risking to be, first, uselessly decremented, and then
++ * incremented again when the (new) value of bfqq->dispatched
++ * happens to be taken into account.
+ */
+ bfqq->dispatched++;
+ bfq_update_peak_rate(q->elevator->elevator_data, rq);
+
+- bfq_remove_request(rq);
+- elv_dispatch_sort(q, rq);
++ bfq_remove_request(q, rq);
+ }
+
+ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+@@ -3605,7 +3634,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
+
+ if (bfq_may_expire_for_budg_timeout(bfqq) &&
+- !hrtimer_active(&bfqd->idle_slice_timer) &&
++ !bfq_bfqq_wait_request(bfqq) &&
+ !bfq_bfqq_must_idle(bfqq))
+ goto expire;
+
+@@ -3641,7 +3670,6 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ * arrives.
+ */
+ if (bfq_bfqq_wait_request(bfqq)) {
+- BUG_ON(!hrtimer_active(&bfqd->idle_slice_timer));
+ /*
+ * If we get here: 1) at least a new request
+ * has arrived but we have not disabled the
+@@ -3668,7 +3696,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ * for a new request, or has requests waiting for a completion and
+ * may idle after their completion, then keep it anyway.
+ */
+- if (hrtimer_active(&bfqd->idle_slice_timer) ||
++ if (bfq_bfqq_wait_request(bfqq) ||
+ (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
+ bfqq = NULL;
+ goto keep_queue;
+@@ -3753,13 +3781,11 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ }
+
+ /*
+- * Dispatch one request from bfqq, moving it to the request queue
+- * dispatch list.
++ * Dispatch next request from bfqq.
+ */
+-static int bfq_dispatch_request(struct bfq_data *bfqd,
+- struct bfq_queue *bfqq)
++static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
+ {
+- int dispatched = 0;
+ struct request *rq = bfqq->next_rq;
+ unsigned long service_to_charge;
+
+@@ -3775,7 +3801,7 @@ static int bfq_dispatch_request(struct bfq_data *bfqd,
+
+ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
+
+- bfq_dispatch_insert(bfqd->queue, rq);
++ bfq_dispatch_remove(bfqd->queue, rq);
+
+ /*
+ * If weight raising has to terminate for bfqq, then next
+@@ -3791,86 +3817,61 @@ static int bfq_dispatch_request(struct bfq_data *bfqd,
+ bfq_update_wr_data(bfqd, bfqq);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "dispatched %u sec req (%llu), budg left %d",
++ "dispatched %u sec req (%llu), budg left %d, new disp_nr %d",
+ blk_rq_sectors(rq),
+ (unsigned long long) blk_rq_pos(rq),
+- bfq_bfqq_budget_left(bfqq));
+-
+- dispatched++;
++ bfq_bfqq_budget_left(bfqq),
++ bfqq->dispatched);
+
+ if (!bfqd->in_service_bic) {
+ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
+ bfqd->in_service_bic = RQ_BIC(rq);
+ }
+
++ /*
++ * Expire bfqq, pretending that its budget expired, if bfqq
++ * belongs to CLASS_IDLE and other queues are waiting for
++ * service.
++ */
+ if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
+ goto expire;
+
+- return dispatched;
++ return rq;
+
+ expire:
+ bfq_bfqq_expire(bfqd, bfqq, false, BFQ_BFQQ_BUDGET_EXHAUSTED);
+- return dispatched;
+-}
+-
+-static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
+-{
+- int dispatched = 0;
+-
+- while (bfqq->next_rq) {
+- bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
+- dispatched++;
+- }
+-
+- BUG_ON(!list_empty(&bfqq->fifo));
+- return dispatched;
++ return rq;
+ }
+
+-/*
+- * Drain our current requests.
+- * Used for barriers and when switching io schedulers on-the-fly.
+- */
+-static int bfq_forced_dispatch(struct bfq_data *bfqd)
++static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
+ {
+- struct bfq_queue *bfqq, *n;
+- struct bfq_service_tree *st;
+- int dispatched = 0;
+-
+- bfqq = bfqd->in_service_queue;
+- if (bfqq)
+- __bfq_bfqq_expire(bfqd, bfqq);
++ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+
+ /*
+- * Loop through classes, and be careful to leave the scheduler
+- * in a consistent state, as feedback mechanisms and vtime
+- * updates cannot be disabled during the process.
++ * Avoiding lock: a race on bfqd->busy_queues should cause at
++ * most a call to dispatch for nothing
+ */
+- list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
+- st = bfq_entity_service_tree(&bfqq->entity);
+-
+- dispatched += __bfq_forced_dispatch_bfqq(bfqq);
+-
+- bfqq->max_budget = bfq_max_budget(bfqd);
+- bfq_forget_idle(st);
+- }
+-
+- BUG_ON(bfqd->busy_queues != 0);
+-
+- return dispatched;
++ return !list_empty_careful(&bfqd->dispatch) ||
++ bfqd->busy_queues > 0;
+ }
+
+-static int bfq_dispatch_requests(struct request_queue *q, int force)
++static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ {
+- struct bfq_data *bfqd = q->elevator->elevator_data;
+- struct bfq_queue *bfqq;
++ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
++ struct request *rq = NULL;
++ struct bfq_queue *bfqq = NULL;
++
++ if (!list_empty(&bfqd->dispatch)) {
++ rq = list_first_entry(&bfqd->dispatch, struct request,
++ queuelist);
++ list_del_init(&rq->queuelist);
++ goto exit;
++ }
+
+ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
+
+ if (bfqd->busy_queues == 0)
+- return 0;
+-
+- if (unlikely(force))
+- return bfq_forced_dispatch(bfqd);
++ goto exit;
+
+ /*
+ * Force device to serve one request at a time if
+@@ -3885,25 +3886,39 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
+ * throughput.
+ */
+ if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
+- return 0;
++ goto exit;
+
+ bfqq = bfq_select_queue(bfqd);
+ if (!bfqq)
+- return 0;
++ goto exit;
+
+ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
+
+ BUG_ON(bfq_bfqq_wait_request(bfqq));
+
+- if (!bfq_dispatch_request(bfqd, bfqq))
+- return 0;
+-
+- bfq_log_bfqq(bfqd, bfqq, "dispatched %s request",
+- bfq_bfqq_sync(bfqq) ? "sync" : "async");
++ rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
+
+ BUG_ON(bfqq->next_rq == NULL &&
+ bfqq->entity.budget < bfqq->entity.service);
+- return 1;
++exit:
++ if (rq) {
++ rq->rq_flags |= RQF_STARTED;
++ bfqd->rq_in_driver++;
++ }
++
++ return rq;
++}
++
++static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
++{
++ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
++ struct request *rq;
++
++ spin_lock_irq(&bfqd->lock);
++ rq = __bfq_dispatch_request(hctx);
++ spin_unlock_irq(&bfqd->lock);
++
++ return rq;
+ }
+
+ /*
+@@ -3921,13 +3936,14 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+
+ BUG_ON(bfqq->ref <= 0);
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
++ if (bfqq->bfqd)
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
++
+ bfqq->ref--;
+ if (bfqq->ref)
+ return;
+
+ BUG_ON(rb_first(&bfqq->sort_list));
+- BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
+ BUG_ON(bfqq->entity.tree);
+ BUG_ON(bfq_bfqq_busy(bfqq));
+
+@@ -3942,7 +3958,8 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ */
+ hlist_del_init(&bfqq->burst_list_node);
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
++ if (bfqq->bfqd)
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+
+ kmem_cache_free(bfq_pool, bfqq);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+@@ -3983,29 +4000,52 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_put_queue(bfqq); /* release process reference */
+ }
+
+-static void bfq_exit_icq(struct io_cq *icq)
++static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ {
+- struct bfq_io_cq *bic = icq_to_bic(icq);
+- struct bfq_data *bfqd = bic_to_bfqd(bic);
++ struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
++ struct bfq_data *bfqd;
+
+- if (bic_to_bfqq(bic, false)) {
+- bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, false));
+- bic_set_bfqq(bic, NULL, false);
+- }
++ if (bfqq)
++ bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
+
+- if (bic_to_bfqq(bic, true)) {
++ if (bfqq && bfqd) {
++ spin_lock_irq(&bfqd->lock);
+ /*
+ * If the bic is using a shared queue, put the reference
+ * taken on the io_context when the bic started using a
+ * shared bfq_queue.
+ */
+- if (bfq_bfqq_coop(bic_to_bfqq(bic, true)))
+- put_io_context(icq->ioc);
+- bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, true));
+- bic_set_bfqq(bic, NULL, true);
++ if (is_sync && bfq_bfqq_coop(bfqq))
++ put_io_context(bic->icq.ioc);
++ bfq_exit_bfqq(bfqd, bfqq);
++ bic_set_bfqq(bic, NULL, is_sync);
++ spin_unlock_irq(&bfqd->lock);
+ }
+ }
+
++static void bfq_exit_icq_body(struct work_struct *work)
++{
++ struct bfq_io_cq *bic =
++ container_of(work, struct bfq_io_cq, exit_icq_work);
++
++ bfq_exit_icq_bfqq(bic, true);
++ bfq_exit_icq_bfqq(bic, false);
++}
++
++static void bfq_init_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++
++ INIT_WORK(&bic->exit_icq_work, bfq_exit_icq_body);
++}
++
++static void bfq_exit_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++
++ kblockd_schedule_work(&bic->exit_icq_work);
++}
++
+ /*
+ * Update the entity prio values; note that the new values will not
+ * be used until the next (re)activation.
+@@ -4015,6 +4055,10 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
+ {
+ struct task_struct *tsk = current;
+ int ioprio_class;
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ if (!bfqd)
++ return;
+
+ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
+ switch (ioprio_class) {
+@@ -4095,6 +4139,8 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ INIT_HLIST_NODE(&bfqq->burst_list_node);
+ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
+
++ spin_lock_init(&bfqq->lock);
++
+ bfqq->ref = 0;
+ bfqq->bfqd = bfqd;
+
+@@ -4351,22 +4397,13 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ if (budget_timeout)
+ bfq_bfqq_expire(bfqd, bfqq, false,
+ BFQ_BFQQ_BUDGET_TIMEOUT);
+-
+- /*
+- * Let the request rip immediately, or let a new queue be
+- * selected if bfqq has just been expired.
+- */
+- __blk_run_queue(bfqd->queue);
+ }
+ }
+
+-static void bfq_insert_request(struct request_queue *q, struct request *rq)
++static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ {
+- struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
+
+- assert_spin_locked(bfqd->queue->queue_lock);
+-
+ /*
+ * An unplug may trigger a requeue of a request from the device
+ * driver: make sure we are in process context while trying to
+@@ -4381,8 +4418,8 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq)
+ * Release the request's reference to the old bfqq
+ * and make sure one is taken to the shared queue.
+ */
+- new_bfqq->allocated[rq_data_dir(rq)]++;
+- bfqq->allocated[rq_data_dir(rq)]--;
++ new_bfqq->allocated++;
++ bfqq->allocated--;
+ new_bfqq->ref++;
+ bfq_clear_bfqq_just_created(bfqq);
+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
+@@ -4406,6 +4443,55 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq)
+ bfq_rq_enqueued(bfqd, bfqq, rq);
+ }
+
++static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
++ bool at_head)
++{
++ struct request_queue *q = hctx->queue;
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ spin_lock_irq(&bfqd->lock);
++ if (blk_mq_sched_try_insert_merge(q, rq))
++ goto done;
++ spin_unlock_irq(&bfqd->lock);
++
++ blk_mq_sched_request_inserted(rq);
++
++ spin_lock_irq(&bfqd->lock);
++ if (at_head || blk_rq_is_passthrough(rq)) {
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ if (at_head)
++ list_add(&rq->queuelist, &bfqd->dispatch);
++ else
++ list_add_tail(&rq->queuelist, &bfqd->dispatch);
++
++ if (bfqq)
++ bfqq->dispatched++;
++ } else {
++ __bfq_insert_request(bfqd, rq);
++
++ if (rq_mergeable(rq)) {
++ elv_rqhash_add(q, rq);
++ if (!q->last_merge)
++ q->last_merge = rq;
++ }
++ }
++done:
++ spin_unlock_irq(&bfqd->lock);
++}
++
++static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
++ struct list_head *list, bool at_head)
++{
++ while (!list_empty(list)) {
++ struct request *rq;
++
++ rq = list_first_entry(list, struct request, queuelist);
++ list_del_init(&rq->queuelist);
++ bfq_insert_request(hctx, rq, at_head);
++ }
++}
++
+ static void bfq_update_hw_tag(struct bfq_data *bfqd)
+ {
+ bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
+@@ -4431,27 +4517,17 @@ static void bfq_update_hw_tag(struct bfq_data *bfqd)
+ bfqd->hw_tag_samples = 0;
+ }
+
+-static void bfq_completed_request(struct request_queue *q, struct request *rq)
++static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ {
+- struct bfq_queue *bfqq = RQ_BFQQ(rq);
+- struct bfq_data *bfqd = bfqq->bfqd;
+ u64 now_ns;
+ u32 delta_us;
+
+- bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left",
+- blk_rq_sectors(rq));
+-
+- assert_spin_locked(bfqd->queue->queue_lock);
+ bfq_update_hw_tag(bfqd);
+
+ BUG_ON(!bfqd->rq_in_driver);
+ BUG_ON(!bfqq->dispatched);
+ bfqd->rq_in_driver--;
+ bfqq->dispatched--;
+- bfqg_stats_update_completion(bfqq_group(bfqq),
+- rq_start_time_ns(rq),
+- rq_io_start_time_ns(rq),
+- rq->cmd_flags);
+
+ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
+@@ -4477,7 +4553,8 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
+ */
+ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
+
+- bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
++ bfq_log_bfqq(bfqd, bfqq,
++ "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
+ (USEC_PER_SEC*
+ (u64)((bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us))
+@@ -4527,7 +4604,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
+ if (bfqd->in_service_queue == bfqq) {
+ if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
+ bfq_arm_slice_timer(bfqd);
+- goto out;
++ return;
+ } else if (bfq_may_expire_for_budg_timeout(bfqq))
+ bfq_bfqq_expire(bfqd, bfqq, false,
+ BFQ_BFQQ_BUDGET_TIMEOUT);
+@@ -4537,68 +4614,55 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
+ bfq_bfqq_expire(bfqd, bfqq, false,
+ BFQ_BFQQ_NO_MORE_REQUESTS);
+ }
+-
+- if (!bfqd->rq_in_driver)
+- bfq_schedule_dispatch(bfqd);
+-
+-out:
+- return;
+ }
+
+-static int __bfq_may_queue(struct bfq_queue *bfqq)
++static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
+ {
+- if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
+- bfq_clear_bfqq_must_alloc(bfqq);
+- return ELV_MQUEUE_MUST;
+- }
++ bfqq->allocated--;
+
+- return ELV_MQUEUE_MAY;
++ bfq_put_queue(bfqq);
+ }
+
+-static int bfq_may_queue(struct request_queue *q, unsigned int op)
++static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ {
+- struct bfq_data *bfqd = q->elevator->elevator_data;
+- struct task_struct *tsk = current;
+- struct bfq_io_cq *bic;
+- struct bfq_queue *bfqq;
+-
+- /*
+- * Don't force setup of a queue from here, as a call to may_queue
+- * does not necessarily imply that a request actually will be
+- * queued. So just lookup a possibly existing queue, or return
+- * 'may queue' if that fails.
+- */
+- bic = bfq_bic_lookup(bfqd, tsk->io_context);
+- if (!bic)
+- return ELV_MQUEUE_MAY;
+-
+- bfqq = bic_to_bfqq(bic, op_is_sync(op));
+- if (bfqq)
+- return __bfq_may_queue(bfqq);
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
+
+- return ELV_MQUEUE_MAY;
+-}
++ if (rq->rq_flags & RQF_STARTED)
++ bfqg_stats_update_completion(bfqq_group(bfqq),
++ rq_start_time_ns(rq),
++ rq_io_start_time_ns(rq),
++ rq->cmd_flags);
+
+-/*
+- * Queue lock held here.
+- */
+-static void bfq_put_request(struct request *rq)
+-{
+- struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ if (likely(rq->rq_flags & RQF_STARTED)) {
++ unsigned long flags;
+
+- if (bfqq) {
+- const int rw = rq_data_dir(rq);
++ spin_lock_irqsave(&bfqd->lock, flags);
+
+- BUG_ON(!bfqq->allocated[rw]);
+- bfqq->allocated[rw]--;
++ bfq_completed_request(bfqq, bfqd);
++ bfq_put_rq_priv_body(bfqq);
+
+- rq->elv.priv[0] = NULL;
+- rq->elv.priv[1] = NULL;
++ spin_unlock_irqrestore(&bfqd->lock, flags);
++ } else {
++ /*
++ * Request rq may be still/already in the scheduler,
++ * in which case we need to remove it. And we cannot
++ * defer such a check and removal, to avoid
++ * inconsistencies in the time interval from the end
++ * of this function to the start of the deferred work.
++ * Fortunately, this situation occurs only in process
++ * context, so taking the scheduler lock does not
++ * cause any deadlock, even if other locks are already
++ * (correctly) held by this process.
++ */
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
+- bfqq, bfqq->ref);
+- bfq_put_queue(bfqq);
++ if (!RB_EMPTY_NODE(&rq->rb_node))
++ bfq_remove_request(q, rq);
++ bfq_put_rq_priv_body(bfqq);
+ }
++
++ rq->elv.priv[0] = NULL;
++ rq->elv.priv[1] = NULL;
+ }
+
+ /*
+@@ -4630,18 +4694,16 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
+ /*
+ * Allocate bfq data structures associated with this request.
+ */
+-static int bfq_set_request(struct request_queue *q, struct request *rq,
+- struct bio *bio, gfp_t gfp_mask)
++static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
++ struct bio *bio)
+ {
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
+- const int rw = rq_data_dir(rq);
+ const int is_sync = rq_is_sync(rq);
+ struct bfq_queue *bfqq;
+- unsigned long flags;
+ bool bfqq_already_existing = false, split = false;
+
+- spin_lock_irqsave(q->queue_lock, flags);
++ spin_lock_irq(&bfqd->lock);
+
+ if (!bic)
+ goto queue_fail;
+@@ -4661,7 +4723,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ bic_set_bfqq(bic, bfqq, is_sync);
+ if (split && is_sync) {
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_request: was_in_list %d "
++ "get_request: was_in_list %d "
+ "was_in_large_burst %d "
+ "large burst in progress %d",
+ bic->was_in_burst_list,
+@@ -4671,12 +4733,12 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ if ((bic->was_in_burst_list && bfqd->large_burst) ||
+ bic->saved_in_large_burst) {
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_request: marking in "
++ "get_request: marking in "
+ "large burst");
+ bfq_mark_bfqq_in_large_burst(bfqq);
+ } else {
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_request: clearing in "
++ "get_request: clearing in "
+ "large burst");
+ bfq_clear_bfqq_in_large_burst(bfqq);
+ if (bic->was_in_burst_list)
+@@ -4703,9 +4765,12 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ }
+ }
+
+- bfqq->allocated[rw]++;
++ bfqq->allocated++;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "get_request: new allocated %d", bfqq->allocated);
++
+ bfqq->ref++;
+- bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "get_request: bfqq %p, %d", bfqq, bfqq->ref);
+
+ rq->elv.priv[0] = bic;
+ rq->elv.priv[1] = bfqq;
+@@ -4733,26 +4798,53 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ if (unlikely(bfq_bfqq_just_created(bfqq)))
+ bfq_handle_burst(bfqd, bfqq);
+
+- spin_unlock_irqrestore(q->queue_lock, flags);
++ spin_unlock_irq(&bfqd->lock);
+
+ return 0;
+
+ queue_fail:
+- bfq_schedule_dispatch(bfqd);
+- spin_unlock_irqrestore(q->queue_lock, flags);
++ spin_unlock_irq(&bfqd->lock);
+
+ return 1;
+ }
+
+-static void bfq_kick_queue(struct work_struct *work)
++static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+ {
+- struct bfq_data *bfqd =
+- container_of(work, struct bfq_data, unplug_work);
+- struct request_queue *q = bfqd->queue;
++ struct bfq_data *bfqd = bfqq->bfqd;
++ enum bfqq_expiration reason;
++ unsigned long flags;
++
++ spin_lock_irqsave(&bfqd->lock, flags);
++ bfq_clear_bfqq_wait_request(bfqq);
+
+- spin_lock_irq(q->queue_lock);
+- __blk_run_queue(q);
+- spin_unlock_irq(q->queue_lock);
++ if (bfqq != bfqd->in_service_queue) {
++ spin_unlock_irqrestore(&bfqd->lock, flags);
++ return;
++ }
++
++ if (bfq_bfqq_budget_timeout(bfqq))
++ /*
++ * Also here the queue can be safely expired
++ * for budget timeout without wasting
++ * guarantees
++ */
++ reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
++ /*
++ * The queue may not be empty upon timer expiration,
++ * because we may not disable the timer when the
++ * first request of the in-service queue arrives
++ * during disk idling.
++ */
++ reason = BFQ_BFQQ_TOO_IDLE;
++ else
++ goto schedule_dispatch;
++
++ bfq_bfqq_expire(bfqd, bfqq, true, reason);
++
++schedule_dispatch:
++ spin_unlock_irqrestore(&bfqd->lock, flags);
++ bfq_schedule_dispatch(bfqd);
+ }
+
+ /*
+@@ -4763,59 +4855,22 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
+ {
+ struct bfq_data *bfqd = container_of(timer, struct bfq_data,
+ idle_slice_timer);
+- struct bfq_queue *bfqq;
+- unsigned long flags;
+- enum bfqq_expiration reason;
+-
+- spin_lock_irqsave(bfqd->queue->queue_lock, flags);
++ struct bfq_queue *bfqq = bfqd->in_service_queue;
+
+- bfqq = bfqd->in_service_queue;
+ /*
+ * Theoretical race here: the in-service queue can be NULL or
+- * different from the queue that was idling if the timer handler
+- * spins on the queue_lock and a new request arrives for the
+- * current queue and there is a full dispatch cycle that changes
+- * the in-service queue. This can hardly happen, but in the worst
+- * case we just expire a queue too early.
++ * different from the queue that was idling if a new request
++ * arrives for the current queue and there is a full dispatch
++ * cycle that changes the in-service queue. This can hardly
++ * happen, but in the worst case we just expire a queue too
++ * early.
+ */
+- if (bfqq) {
+- bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
+- bfq_clear_bfqq_wait_request(bfqq);
+-
+- if (bfq_bfqq_budget_timeout(bfqq))
+- /*
+- * Also here the queue can be safely expired
+- * for budget timeout without wasting
+- * guarantees
+- */
+- reason = BFQ_BFQQ_BUDGET_TIMEOUT;
+- else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
+- /*
+- * The queue may not be empty upon timer expiration,
+- * because we may not disable the timer when the
+- * first request of the in-service queue arrives
+- * during disk idling.
+- */
+- reason = BFQ_BFQQ_TOO_IDLE;
+- else
+- goto schedule_dispatch;
+-
+- bfq_bfqq_expire(bfqd, bfqq, true, reason);
+- }
+-
+-schedule_dispatch:
+- bfq_schedule_dispatch(bfqd);
++ if (bfqq)
++ bfq_idle_slice_timer_body(bfqq);
+
+- spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
+ return HRTIMER_NORESTART;
+ }
+
+-static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
+-{
+- hrtimer_cancel(&bfqd->idle_slice_timer);
+- cancel_work_sync(&bfqd->unplug_work);
+-}
+-
+ static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
+ struct bfq_queue **bfqq_ptr)
+ {
+@@ -4852,28 +4907,40 @@ static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
+ static void bfq_exit_queue(struct elevator_queue *e)
+ {
+ struct bfq_data *bfqd = e->elevator_data;
+- struct request_queue *q = bfqd->queue;
+ struct bfq_queue *bfqq, *n;
+
+- bfq_shutdown_timer_wq(bfqd);
+-
+- spin_lock_irq(q->queue_lock);
++ hrtimer_cancel(&bfqd->idle_slice_timer);
+
+ BUG_ON(bfqd->in_service_queue);
+- list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
+- bfq_deactivate_bfqq(bfqd, bfqq, false, false);
+
+- spin_unlock_irq(q->queue_lock);
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) {
++ if (bfqq->bic) /* bfqqs without bic are handled below */
++ cancel_work_sync(&bfqq->bic->exit_icq_work);
++ }
++
++ spin_lock_irq(&bfqd->lock);
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) {
++ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
++ /*
++ * Make sure that deferred exit_icq_work completes
++ * without errors for bfq_queues without bic
++ */
++ if (!bfqq->bic)
++ bfqq->bfqd = NULL;
++ }
++ spin_unlock_irq(&bfqd->lock);
+
+- bfq_shutdown_timer_wq(bfqd);
++ hrtimer_cancel(&bfqd->idle_slice_timer);
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+- blkcg_deactivate_policy(q, &blkcg_policy_bfq);
++ blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
+ #else
++ spin_lock_irq(&bfqd->lock);
+ bfq_put_async_queues(bfqd, bfqd->root_group);
+ kfree(bfqd->root_group);
++ spin_unlock_irq(&bfqd->lock);
+ #endif
+
+ kfree(bfqd);
+@@ -4934,10 +5001,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+
+ bfqd->queue = q;
+
+- spin_lock_irq(q->queue_lock);
+- q->elevator = eq;
+- spin_unlock_irq(q->queue_lock);
+-
+ bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
+ if (!bfqd->root_group)
+ goto out_free;
+@@ -4951,8 +5014,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ bfqd->queue_weights_tree = RB_ROOT;
+ bfqd->group_weights_tree = RB_ROOT;
+
+- INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
+-
+ INIT_LIST_HEAD(&bfqd->active_list);
+ INIT_LIST_HEAD(&bfqd->idle_list);
+ INIT_HLIST_HEAD(&bfqd->burst_list);
+@@ -5001,6 +5062,11 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
+ bfqd->device_speed = BFQ_BFQD_FAST;
+
++ spin_lock_init(&bfqd->lock);
++ INIT_LIST_HEAD(&bfqd->dispatch);
++
++ q->elevator = eq;
++
+ return 0;
+
+ out_free:
+@@ -5057,7 +5123,7 @@ static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
+ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
+ bfqd->queued);
+
+- spin_lock_irq(bfqd->queue->queue_lock);
++ spin_lock_irq(&bfqd->lock);
+
+ num_char += sprintf(page + num_char, "Active:\n");
+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
+@@ -5086,7 +5152,7 @@ static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
+ jiffies_to_msecs(bfqq->wr_cur_max_time));
+ }
+
+- spin_unlock_irq(bfqd->queue->queue_lock);
++ spin_unlock_irq(&bfqd->lock);
+
+ return num_char;
+ }
+@@ -5294,35 +5360,31 @@ static struct elv_fs_entry bfq_attrs[] = {
+ __ATTR_NULL
+ };
+
+-static struct elevator_type iosched_bfq = {
+- .ops.sq = {
+- .elevator_merge_fn = bfq_merge,
+- .elevator_merged_fn = bfq_merged_request,
+- .elevator_merge_req_fn = bfq_merged_requests,
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+- .elevator_bio_merged_fn = bfq_bio_merged,
+-#endif
+- .elevator_allow_bio_merge_fn = bfq_allow_bio_merge,
+- .elevator_allow_rq_merge_fn = bfq_allow_rq_merge,
+- .elevator_dispatch_fn = bfq_dispatch_requests,
+- .elevator_add_req_fn = bfq_insert_request,
+- .elevator_activate_req_fn = bfq_activate_request,
+- .elevator_deactivate_req_fn = bfq_deactivate_request,
+- .elevator_completed_req_fn = bfq_completed_request,
+- .elevator_former_req_fn = elv_rb_former_request,
+- .elevator_latter_req_fn = elv_rb_latter_request,
+- .elevator_init_icq_fn = bfq_init_icq,
+- .elevator_exit_icq_fn = bfq_exit_icq,
+- .elevator_set_req_fn = bfq_set_request,
+- .elevator_put_req_fn = bfq_put_request,
+- .elevator_may_queue_fn = bfq_may_queue,
+- .elevator_init_fn = bfq_init_queue,
+- .elevator_exit_fn = bfq_exit_queue,
++static struct elevator_type iosched_bfq_mq = {
++ .ops.mq = {
++ .get_rq_priv = bfq_get_rq_private,
++ .put_rq_priv = bfq_put_rq_private,
++ .init_icq = bfq_init_icq,
++ .exit_icq = bfq_exit_icq,
++ .insert_requests = bfq_insert_requests,
++ .dispatch_request = bfq_dispatch_request,
++ .next_request = elv_rb_latter_request,
++ .former_request = elv_rb_former_request,
++ .allow_merge = bfq_allow_bio_merge,
++ .bio_merge = bfq_bio_merge,
++ .request_merge = bfq_request_merge,
++ .requests_merged = bfq_requests_merged,
++ .request_merged = bfq_request_merged,
++ .has_work = bfq_has_work,
++ .init_sched = bfq_init_queue,
++ .exit_sched = bfq_exit_queue,
+ },
++
++ .uses_mq = true,
+ .icq_size = sizeof(struct bfq_io_cq),
+ .icq_align = __alignof__(struct bfq_io_cq),
+ .elevator_attrs = bfq_attrs,
+- .elevator_name = "bfq-sq",
++ .elevator_name = "bfq-mq",
+ .elevator_owner = THIS_MODULE,
+ };
+
+@@ -5392,7 +5454,7 @@ static int __init bfq_init(void)
+ device_speed_thresh[0] = (4 * R_slow[0]) / 3;
+ device_speed_thresh[1] = (4 * R_slow[1]) / 3;
+
+- ret = elv_register(&iosched_bfq);
++ ret = elv_register(&iosched_bfq_mq);
+ if (ret)
+ goto err_pol_unreg;
+
+@@ -5412,8 +5474,8 @@ static int __init bfq_init(void)
+
+ static void __exit bfq_exit(void)
+ {
+- elv_unregister(&iosched_bfq);
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ elv_unregister(&iosched_bfq_mq);
++#ifdef CONFIG_BFQ_GROUP_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+ #endif
+ bfq_slab_kill();
+@@ -5422,5 +5484,6 @@ static void __exit bfq_exit(void)
+ module_init(bfq_init);
+ module_exit(bfq_exit);
+
+-MODULE_AUTHOR("Arianna Avanzini, Fabio Checconi, Paolo Valente");
++MODULE_AUTHOR("Paolo Valente");
+ MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 0f51f270469c..c3fcd5ebd735 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -19,15 +19,8 @@
+ #include <linux/hrtimer.h>
+ #include <linux/blk-cgroup.h>
+
+-/*
+- * Define an alternative macro to compile cgroups support. This is one
+- * of the steps needed to let bfq-mq share the files bfq-sched.c and
+- * bfq-cgroup.c with bfq-sq. For bfq-mq, the macro
+- * BFQ_GROUP_IOSCHED_ENABLED will be defined as a function of whether
+- * the configuration option CONFIG_BFQ_MQ_GROUP_IOSCHED, and not
+- * CONFIG_BFQ_GROUP_IOSCHED, is defined.
+- */
+-#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED
++/* see comments on CONFIG_BFQ_GROUP_IOSCHED in bfq.h */
++#ifdef CONFIG_BFQ_MQ_GROUP_IOSCHED
+ #define BFQ_GROUP_IOSCHED_ENABLED
+ #endif
+
+@@ -259,8 +252,8 @@ struct bfq_queue {
+ struct request *next_rq;
+ /* number of sync and async requests queued */
+ int queued[2];
+- /* number of sync and async requests currently allocated */
+- int allocated[2];
++ /* number of requests currently allocated */
++ int allocated;
+ /* number of pending metadata requests */
+ int meta_pending;
+ /* fifo list of requests in sort_list */
+@@ -345,6 +338,8 @@ struct bfq_queue {
+ unsigned long wr_start_at_switch_to_srt;
+
+ unsigned long split_time; /* time of last split */
++
++ spinlock_t lock;
+ };
+
+ /**
+@@ -361,6 +356,9 @@ struct bfq_io_cq {
+ uint64_t blkcg_serial_nr; /* the current blkcg serial */
+ #endif
+
++ /* delayed work to exec the body of the the exit_icq handler */
++ struct work_struct exit_icq_work;
++
+ /*
+ * Snapshot of the has_short_time flag before merging; taken
+ * to remember its value while the queue is merged, so as to
+@@ -402,11 +400,13 @@ enum bfq_device_speed {
+ /**
+ * struct bfq_data - per-device data structure.
+ *
+- * All the fields are protected by the @queue lock.
++ * All the fields are protected by @lock.
+ */
+ struct bfq_data {
+- /* request queue for the device */
++ /* device request queue */
+ struct request_queue *queue;
++ /* dispatch queue */
++ struct list_head dispatch;
+
+ /* root bfq_group for the device */
+ struct bfq_group *root_group;
+@@ -460,8 +460,6 @@ struct bfq_data {
+ * the queue in service.
+ */
+ struct hrtimer idle_slice_timer;
+- /* delayed work to restart dispatching on the request queue */
+- struct work_struct unplug_work;
+
+ /* bfq_queue in service */
+ struct bfq_queue *in_service_queue;
+@@ -612,6 +610,8 @@ struct bfq_data {
+
+ /* fallback dummy bfqq for extreme OOM conditions */
+ struct bfq_queue oom_bfqq;
++
++ spinlock_t lock;
+ };
+
+ enum bfqq_state_flags {
+@@ -622,7 +622,6 @@ enum bfqq_state_flags {
+ * waiting for a request
+ * without idling the device
+ */
+- BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
+ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
+ BFQ_BFQQ_FLAG_has_short_ttime, /* queue has a short think time */
+ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
+@@ -661,7 +660,6 @@ BFQ_BFQQ_FNS(just_created);
+ BFQ_BFQQ_FNS(busy);
+ BFQ_BFQQ_FNS(wait_request);
+ BFQ_BFQQ_FNS(non_blocking_wait_rq);
+-BFQ_BFQQ_FNS(must_alloc);
+ BFQ_BFQQ_FNS(fifo_expire);
+ BFQ_BFQQ_FNS(has_short_ttime);
+ BFQ_BFQQ_FNS(sync);
+@@ -692,7 +690,6 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+- assert_spin_locked((bfqd)->queue->queue_lock); \
+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
+ pr_crit("%s bfq%d%c %s " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+@@ -734,7 +731,6 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+- assert_spin_locked((bfqd)->queue->queue_lock); \
+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \
+ (bfqq)->pid, \
+@@ -961,7 +957,6 @@ static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
+
+ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio);
+ static void bfq_put_queue(struct bfq_queue *bfqq);
+-static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
+ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
+ struct bio *bio, bool is_sync,
+ struct bfq_io_cq *bic);
+
+From bde5235de2241502c1c00337bd51c96d9b60b6df Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 3 Mar 2017 08:52:40 +0100
+Subject: [PATCH 13/51] Add checks and extra log messages - Part I
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 112 +++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 109 insertions(+), 3 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index c963d92a32c2..40eadb3f7073 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -773,6 +773,8 @@ static int bfqq_process_refs(struct bfq_queue *bfqq)
+ {
+ int process_refs, io_refs;
+
++ lockdep_assert_held(&bfqq->bfqd->lock);
++
+ io_refs = bfqq->allocated;
+ process_refs = bfqq->ref - io_refs - bfqq->entity.on_st;
+ BUG_ON(process_refs < 0);
+@@ -1483,6 +1485,8 @@ static void bfq_add_request(struct request *rq)
+ bfqq->queued[rq_is_sync(rq)]++;
+ bfqd->queued++;
+
++ BUG_ON(!RQ_BFQQ(rq));
++ BUG_ON(RQ_BFQQ(rq) != bfqq);
+ elv_rb_add(&bfqq->sort_list, rq);
+
+ /*
+@@ -1491,6 +1495,8 @@ static void bfq_add_request(struct request *rq)
+ prev = bfqq->next_rq;
+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
+ BUG_ON(!next_rq);
++ BUG_ON(!RQ_BFQQ(next_rq));
++ BUG_ON(RQ_BFQQ(next_rq) != bfqq);
+ bfqq->next_rq = next_rq;
+
+ /*
+@@ -1615,6 +1621,19 @@ static void bfq_remove_request(struct request_queue *q,
+
+ if (bfqq->next_rq == rq) {
+ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
++ if (bfqq->next_rq && !RQ_BFQQ(bfqq->next_rq)) {
++ pr_crit("no bfqq! for next rq %p bfqq %p\n",
++ bfqq->next_rq, bfqq);
++ }
++
++ BUG_ON(bfqq->next_rq && !RQ_BFQQ(bfqq->next_rq));
++ if (bfqq->next_rq && RQ_BFQQ(bfqq->next_rq) != bfqq) {
++ pr_crit(
++ "wrong bfqq! for next rq %p, rq_bfqq %p bfqq %p\n",
++ bfqq->next_rq, RQ_BFQQ(bfqq->next_rq), bfqq);
++ }
++ BUG_ON(bfqq->next_rq && RQ_BFQQ(bfqq->next_rq) != bfqq);
++
+ bfq_updated_next_req(bfqd, bfqq);
+ }
+
+@@ -1701,6 +1720,8 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
+ __rq = bfq_find_rq_fmerge(bfqd, bio, q);
+ if (__rq && elv_bio_merge_ok(__rq, bio)) {
+ *req = __rq;
++ bfq_log(bfqd, "request_merge: req %p", __rq);
++
+ return ELEVATOR_FRONT_MERGE;
+ }
+
+@@ -1721,6 +1742,8 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+
+ /* Reposition request in its sort_list */
+ elv_rb_del(&bfqq->sort_list, req);
++ BUG_ON(!RQ_BFQQ(req));
++ BUG_ON(RQ_BFQQ(req) != bfqq);
+ elv_rb_add(&bfqq->sort_list, req);
+
+ spin_lock_irq(&bfqd->lock);
+@@ -1729,7 +1752,13 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
+ bfqd->last_position);
+ BUG_ON(!next_rq);
++
+ bfqq->next_rq = next_rq;
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "requests_merged: req %p prev %p next_rq %p bfqq %p",
++ req, prev, next_rq, bfqq);
++
+ /*
+ * If next_rq changes, update both the queue's budget to
+ * fit the new request and the queue's position in its
+@@ -1748,8 +1777,16 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
+
++ BUG_ON(!RQ_BFQQ(rq));
++ BUG_ON(!RQ_BFQQ(next));
++
+ if (!RB_EMPTY_NODE(&rq->rb_node))
+ goto end;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "requests_merged: rq %p next %p bfqq %p next_bfqq %p",
++ rq, next, bfqq, next_bfqq);
++
+ spin_lock_irq(&bfqq->bfqd->lock);
+
+ /*
+@@ -3847,6 +3884,9 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
+ {
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+
++ bfq_log(bfqd, "has_work, dispatch_non_empty %d busy_queues %d",
++ !list_empty_careful(&bfqd->dispatch), bfqd->busy_queues > 0);
++
+ /*
+ * Avoiding lock: a race on bfqd->busy_queues should cause at
+ * most a call to dispatch for nothing
+@@ -3865,6 +3905,8 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ rq = list_first_entry(&bfqd->dispatch, struct request,
+ queuelist);
+ list_del_init(&rq->queuelist);
++ bfq_log(bfqd,
++ "dispatch requests: picked %p from dispatch list", rq);
+ goto exit;
+ }
+
+@@ -3904,7 +3946,20 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ if (rq) {
+ rq->rq_flags |= RQF_STARTED;
+ bfqd->rq_in_driver++;
+- }
++ if (bfqq)
++ bfq_log_bfqq(bfqd, bfqq,
++ "dispatched %s request %p, rq_in_driver %d",
++ bfq_bfqq_sync(bfqq) ? "sync" : "async",
++ rq,
++ bfqd->rq_in_driver);
++ else
++ bfq_log(bfqd,
++ "dispatched request %p from dispatch list, rq_in_driver %d",
++ rq, bfqd->rq_in_driver);
++ } else
++ bfq_log(bfqd,
++ "returned NULL request, rq_in_driver %d",
++ bfqd->rq_in_driver);
+
+ return rq;
+ }
+@@ -3944,6 +3999,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ return;
+
+ BUG_ON(rb_first(&bfqq->sort_list));
++ BUG_ON(bfqq->allocated != 0);
+ BUG_ON(bfqq->entity.tree);
+ BUG_ON(bfq_bfqq_busy(bfqq));
+
+@@ -4043,6 +4099,7 @@ static void bfq_exit_icq(struct io_cq *icq)
+ {
+ struct bfq_io_cq *bic = icq_to_bic(icq);
+
++ BUG_ON(!bic);
+ kblockd_schedule_work(&bic->exit_icq_work);
+ }
+
+@@ -4057,6 +4114,7 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
+ int ioprio_class;
+ struct bfq_data *bfqd = bfqq->bfqd;
+
++ WARN_ON(!bfqd);
+ if (!bfqd)
+ return;
+
+@@ -4404,6 +4462,10 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
+
++ assert_spin_locked(&bfqd->lock);
++
++ bfq_log_bfqq(bfqd, bfqq, "__insert_req: rq %p bfqq %p", rq, bfqq);
++
+ /*
+ * An unplug may trigger a requeue of a request from the device
+ * driver: make sure we are in process context while trying to
+@@ -4420,6 +4482,12 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ */
+ new_bfqq->allocated++;
+ bfqq->allocated--;
++ bfq_log_bfqq(bfqd, bfqq,
++ "insert_request: new allocated %d", bfqq->allocated);
++ bfq_log_bfqq(bfqd, new_bfqq,
++ "insert_request: new_bfqq new allocated %d",
++ bfqq->allocated);
++
+ new_bfqq->ref++;
+ bfq_clear_bfqq_just_created(bfqq);
+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
+@@ -4529,6 +4597,10 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ bfqd->rq_in_driver--;
+ bfqq->dispatched--;
+
++ bfq_log_bfqq(bfqd, bfqq,
++ "completed_requests: new disp %d, new rq_in_driver %d",
++ bfqq->dispatched, bfqd->rq_in_driver);
++
+ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
+ /*
+@@ -4618,6 +4690,9 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+
+ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
+ {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "put_request_body: allocated %d", bfqq->allocated);
++ BUG_ON(!bfqq->allocated);
+ bfqq->allocated--;
+
+ bfq_put_queue(bfqq);
+@@ -4625,8 +4700,27 @@ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
+
+ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ {
+- struct bfq_queue *bfqq = RQ_BFQQ(rq);
+- struct bfq_data *bfqd = bfqq->bfqd;
++ struct bfq_queue *bfqq;
++ struct bfq_data *bfqd;
++ struct bfq_io_cq *bic;
++
++ BUG_ON(!rq);
++ bfqq = RQ_BFQQ(rq);
++ BUG_ON(!bfqq);
++
++ bic = RQ_BIC(rq);
++ BUG_ON(!bic);
++
++ bfqd = bfqq->bfqd;
++ BUG_ON(!bfqd);
++
++ BUG_ON(rq->rq_flags & RQF_QUEUED);
++ BUG_ON(!(rq->rq_flags & RQF_ELVPRIV));
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "putting rq %p with %u sects left, STARTED %d",
++ rq, blk_rq_sectors(rq),
++ rq->rq_flags & RQF_STARTED);
+
+ if (rq->rq_flags & RQF_STARTED)
+ bfqg_stats_update_completion(bfqq_group(bfqq),
+@@ -4634,6 +4728,8 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ rq_io_start_time_ns(rq),
+ rq->cmd_flags);
+
++ BUG_ON(blk_rq_sectors(rq) == 0 && !(rq->rq_flags & RQF_STARTED));
++
+ if (likely(rq->rq_flags & RQF_STARTED)) {
+ unsigned long flags;
+
+@@ -4655,7 +4751,9 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ * cause any deadlock, even if other locks are already
+ * (correctly) held by this process.
+ */
++ BUG_ON(in_interrupt());
+
++ assert_spin_locked(&bfqd->lock);
+ if (!RB_EMPTY_NODE(&rq->rb_node))
+ bfq_remove_request(q, rq);
+ bfq_put_rq_priv_body(bfqq);
+@@ -4814,7 +4912,9 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+ enum bfqq_expiration reason;
+ unsigned long flags;
+
++ BUG_ON(!bfqd);
+ spin_lock_irqsave(&bfqd->lock, flags);
++ bfq_log_bfqq(bfqd, bfqq, "handling slice_timer expiration");
+ bfq_clear_bfqq_wait_request(bfqq);
+
+ if (bfqq != bfqd->in_service_queue) {
+@@ -4857,6 +4957,8 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
+ idle_slice_timer);
+ struct bfq_queue *bfqq = bfqd->in_service_queue;
+
++ bfq_log(bfqd, "slice_timer expired");
++
+ /*
+ * Theoretical race here: the in-service queue can be NULL or
+ * different from the queue that was idling if a new request
+@@ -4909,9 +5011,12 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ struct bfq_data *bfqd = e->elevator_data;
+ struct bfq_queue *bfqq, *n;
+
++ bfq_log(bfqd, "exit_queue: starting ...");
++
+ hrtimer_cancel(&bfqd->idle_slice_timer);
+
+ BUG_ON(bfqd->in_service_queue);
++ BUG_ON(!list_empty(&bfqd->active_list));
+
+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) {
+ if (bfqq->bic) /* bfqqs without bic are handled below */
+@@ -4943,6 +5048,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ spin_unlock_irq(&bfqd->lock);
+ #endif
+
++ bfq_log(bfqd, "exit_queue: finished ...");
+ kfree(bfqd);
+ }
+
+
+From 7f59486861e368d25f59d4136cf8e51a75b7edf9 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 9 Feb 2017 10:36:27 +0100
+Subject: [PATCH 14/51] Add lock check in bfq_allow_bio_merge
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 40eadb3f7073..21b876aeba16 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -2279,6 +2279,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ if (!bic)
+ return false;
+
++ assert_spin_locked(&bfqd->lock);
+ bfqq = bic_to_bfqq(bic, is_sync);
+ /*
+ * We take advantage of this function to perform an early merge
+
+From a2dd19a4d95cf401268c144c79ce549c7fc4bbca Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Tue, 7 Feb 2017 15:14:29 +0100
+Subject: [PATCH 15/51] bfq-mq: execute exit_icq operations immediately
+
+Exploting Omar's patch that removes the taking of the queue lock in
+put_io_context_active, this patch moves back the operation of the bfq_exit_icq
+hook from a deferred work to the body of the function.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 34 +++-------------------------------
+ block/bfq-mq.h | 3 ---
+ 2 files changed, 3 insertions(+), 34 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 21b876aeba16..1deb79a47181 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4080,28 +4080,13 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ }
+ }
+
+-static void bfq_exit_icq_body(struct work_struct *work)
+-{
+- struct bfq_io_cq *bic =
+- container_of(work, struct bfq_io_cq, exit_icq_work);
+-
+- bfq_exit_icq_bfqq(bic, true);
+- bfq_exit_icq_bfqq(bic, false);
+-}
+-
+-static void bfq_init_icq(struct io_cq *icq)
+-{
+- struct bfq_io_cq *bic = icq_to_bic(icq);
+-
+- INIT_WORK(&bic->exit_icq_work, bfq_exit_icq_body);
+-}
+-
+ static void bfq_exit_icq(struct io_cq *icq)
+ {
+ struct bfq_io_cq *bic = icq_to_bic(icq);
+
+ BUG_ON(!bic);
+- kblockd_schedule_work(&bic->exit_icq_work);
++ bfq_exit_icq_bfqq(bic, true);
++ bfq_exit_icq_bfqq(bic, false);
+ }
+
+ /*
+@@ -5019,21 +5004,9 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ BUG_ON(bfqd->in_service_queue);
+ BUG_ON(!list_empty(&bfqd->active_list));
+
+- list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) {
+- if (bfqq->bic) /* bfqqs without bic are handled below */
+- cancel_work_sync(&bfqq->bic->exit_icq_work);
+- }
+-
+ spin_lock_irq(&bfqd->lock);
+- list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) {
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
+ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
+- /*
+- * Make sure that deferred exit_icq_work completes
+- * without errors for bfq_queues without bic
+- */
+- if (!bfqq->bic)
+- bfqq->bfqd = NULL;
+- }
+ spin_unlock_irq(&bfqd->lock);
+
+ hrtimer_cancel(&bfqd->idle_slice_timer);
+@@ -5471,7 +5444,6 @@ static struct elevator_type iosched_bfq_mq = {
+ .ops.mq = {
+ .get_rq_priv = bfq_get_rq_private,
+ .put_rq_priv = bfq_put_rq_private,
+- .init_icq = bfq_init_icq,
+ .exit_icq = bfq_exit_icq,
+ .insert_requests = bfq_insert_requests,
+ .dispatch_request = bfq_dispatch_request,
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index c3fcd5ebd735..23744b246db6 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -356,9 +356,6 @@ struct bfq_io_cq {
+ uint64_t blkcg_serial_nr; /* the current blkcg serial */
+ #endif
+
+- /* delayed work to exec the body of the the exit_icq handler */
+- struct work_struct exit_icq_work;
+-
+ /*
+ * Snapshot of the has_short_time flag before merging; taken
+ * to remember its value while the queue is merged, so as to
+
+From ab7e78a0ff095101de74e700f8743295a500bb20 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Tue, 21 Feb 2017 10:26:22 +0100
+Subject: [PATCH 16/51] Unnest request-queue and ioc locks from scheduler locks
+
+In some bio-merging functions, the request-queue lock needs to be
+taken, to lookup for the bic associated with the process that issued
+the bio that may need to be merged. In addition, put_io_context must
+be invoked in some other functions, and put_io_context may cause the
+lock of the involved ioc to be taken. In both cases, these extra
+request-queue or ioc locks are taken, or might be taken, while the
+scheduler lock is being held. In this respect, there are other code
+paths, in part external to bfq-mq, in which the same locks are taken
+(nested) in the opposite order, i.e., it is the scheduler lock to be
+taken while the request-queue or the ioc lock is being held. This
+leads to circular deadlocks.
+
+This commit addresses this issue by modifying the logic of the above
+functions, so as to let the lookup and put_io_context be performed,
+and thus the extra locks be taken, outside the critical sections
+protected by the scheduler lock.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 9 ++
+ block/bfq-mq-iosched.c | 264 ++++++++++++++++++++++++++++----------------
+ block/bfq-mq.h | 25 ++++-
+ block/bfq-sched.c | 11 ++
+ 4 files changed, 213 insertions(+), 96 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 8a73de76f32b..cf59eeb7f08e 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -716,6 +716,9 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ struct bfq_group *bfqg;
+ struct bfq_data *bfqd;
+ struct bfq_entity *entity;
++#ifdef BFQ_MQ
++ unsigned long flags;
++#endif
+ int i;
+
+ BUG_ON(!pd);
+@@ -729,6 +732,9 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ if (!entity) /* root group */
+ return;
+
++#ifdef BFQ_MQ
++ spin_lock_irqsave(&bfqd->lock, flags);
++#endif
+ /*
+ * Empty all service_trees belonging to this group before
+ * deactivating the group itself.
+@@ -766,6 +772,9 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ __bfq_deactivate_entity(entity, false);
+ bfq_put_async_queues(bfqd, bfqg);
+
++#ifdef BFQ_MQ
++ bfq_unlock_put_ioc_restore(bfqd, flags);
++#endif
+ /*
+ * @blkg is going offline and will be ignored by
+ * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 1deb79a47181..69ef3761c95d 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -233,6 +233,7 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
+ return NULL;
+ }
+
++#define BFQ_MQ
+ #include "bfq-sched.c"
+ #include "bfq-cgroup-included.c"
+
+@@ -1564,15 +1565,9 @@ static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
+ struct bio *bio,
+ struct request_queue *q)
+ {
+- struct task_struct *tsk = current;
+- struct bfq_io_cq *bic;
+- struct bfq_queue *bfqq;
++ struct bfq_queue *bfqq = bfqd->bio_bfqq;
+
+- bic = bfq_bic_lookup(bfqd, tsk->io_context, q);
+- if (!bic)
+- return NULL;
+
+- bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
+ if (bfqq)
+ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
+
+@@ -1693,9 +1688,26 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
+ struct request_queue *q = hctx->queue;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct request *free = NULL;
++ /*
++ * bfq_bic_lookup grabs the queue_lock: invoke it now and
++ * store its return value for later use, to avoid nesting
++ * queue_lock inside the bfqd->lock. We assume that the bic
++ * returned by bfq_bic_lookup does not go away before
++ * bfqd->lock is taken.
++ */
++ struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
+ bool ret;
+
+ spin_lock_irq(&bfqd->lock);
++
++ if (bic)
++ bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
++ else
++ bfqd->bio_bfqq = NULL;
++ bfqd->bio_bic = bic;
++ /* Set next flag just for testing purposes */
++ bfqd->bio_bfqq_set = true;
++
+ ret = blk_mq_sched_try_merge(q, bio, &free);
+
+ /*
+@@ -1706,6 +1718,7 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
+ */
+ if (free)
+ blk_mq_free_request(free);
++ bfqd->bio_bfqq_set = false;
+ spin_unlock_irq(&bfqd->lock);
+
+ return ret;
+@@ -2261,8 +2274,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ {
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ bool is_sync = op_is_sync(bio->bi_opf);
+- struct bfq_io_cq *bic;
+- struct bfq_queue *bfqq, *new_bfqq;
++ struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
+
+ /*
+ * Disallow merge of a sync bio into an async request.
+@@ -2273,31 +2285,40 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ /*
+ * Lookup the bfqq that this bio will be queued with. Allow
+ * merge only if rq is queued there.
+- * Queue lock is held here.
+ */
+- bic = bfq_bic_lookup(bfqd, current->io_context, q);
+- if (!bic)
++ if (!bfqq)
+ return false;
+
+- assert_spin_locked(&bfqd->lock);
+- bfqq = bic_to_bfqq(bic, is_sync);
+ /*
+ * We take advantage of this function to perform an early merge
+ * of the queues of possible cooperating processes.
+ */
+- if (bfqq) {
+- new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
+- if (new_bfqq) {
+- bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
+- /*
+- * If we get here, the bio will be queued in the
+- * shared queue, i.e., new_bfqq, so use new_bfqq
+- * to decide whether bio and rq can be merged.
+- */
+- bfqq = new_bfqq;
+- }
+- }
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
++ if (new_bfqq) {
++ /*
++ * bic still points to bfqq, then it has not yet been
++ * redirected to some other bfq_queue, and a queue
++ * merge beween bfqq and new_bfqq can be safely
++ * fulfillled, i.e., bic can be redirected to new_bfqq
++ * and bfqq can be put.
++ */
++ bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
++ new_bfqq);
++ /*
++ * If we get here, bio will be queued into new_queue,
++ * so use new_bfqq to decide whether bio and rq can be
++ * merged.
++ */
++ bfqq = new_bfqq;
+
++ /*
++ * Change also bqfd->bio_bfqq, as
++ * bfqd->bio_bic now points to new_bfqq, and
++ * this function may be invoked again (and then may
++ * use again bqfd->bio_bfqq).
++ */
++ bfqd->bio_bfqq = bfqq;
++ }
+ return bfqq == RQ_BFQQ(rq);
+ }
+
+@@ -3965,14 +3986,43 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ return rq;
+ }
+
++/*
++ * Next two functions release bfqd->lock and put the io context
++ * pointed by bfqd->ioc_to_put. This delayed put is used to not risk
++ * to take an ioc->lock while the scheduler lock is being held.
++ */
++static void bfq_unlock_put_ioc(struct bfq_data *bfqd)
++{
++ struct io_context *ioc_to_put = bfqd->ioc_to_put;
++
++ bfqd->ioc_to_put = NULL;
++ spin_unlock_irq(&bfqd->lock);
++
++ if (ioc_to_put)
++ put_io_context(ioc_to_put);
++}
++
++static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd,
++ unsigned long flags)
++{
++ struct io_context *ioc_to_put = bfqd->ioc_to_put;
++
++ bfqd->ioc_to_put = NULL;
++ spin_unlock_irqrestore(&bfqd->lock, flags);
++
++ if (ioc_to_put)
++ put_io_context(ioc_to_put);
++}
++
+ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ {
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+ struct request *rq;
+
+ spin_lock_irq(&bfqd->lock);
++
+ rq = __bfq_dispatch_request(hctx);
+- spin_unlock_irq(&bfqd->lock);
++ bfq_unlock_put_ioc(bfqd);
+
+ return rq;
+ }
+@@ -3981,7 +4031,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ * Task holds one reference to the queue, dropped when task exits. Each rq
+ * in-flight on this queue also holds a reference, dropped when rq is freed.
+ *
+- * Queue lock must be held here. Recall not to use bfqq after calling
++ * Scheduler lock must be held here. Recall not to use bfqq after calling
+ * this function on it.
+ */
+ static void bfq_put_queue(struct bfq_queue *bfqq)
+@@ -4066,17 +4116,23 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
+
+ if (bfqq && bfqd) {
+- spin_lock_irq(&bfqd->lock);
++ unsigned long flags;
++
++ spin_lock_irqsave(&bfqd->lock, flags);
+ /*
+- * If the bic is using a shared queue, put the reference
+- * taken on the io_context when the bic started using a
+- * shared bfq_queue.
++ * If the bic is using a shared queue, put the
++ * reference taken on the io_context when the bic
++ * started using a shared bfq_queue. This put cannot
++ * make ioc->ref_count reach 0, then no ioc->lock
++ * risks to be taken (leading to possible deadlock
++ * scenarios).
+ */
+ if (is_sync && bfq_bfqq_coop(bfqq))
+ put_io_context(bic->icq.ioc);
++
+ bfq_exit_bfqq(bfqd, bfqq);
+ bic_set_bfqq(bic, NULL, is_sync);
+- spin_unlock_irq(&bfqd->lock);
++ bfq_unlock_put_ioc_restore(bfqd, flags);
+ }
+ }
+
+@@ -4183,8 +4239,6 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ INIT_HLIST_NODE(&bfqq->burst_list_node);
+ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
+
+- spin_lock_init(&bfqq->lock);
+-
+ bfqq->ref = 0;
+ bfqq->bfqd = bfqd;
+
+@@ -4476,6 +4530,14 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+
+ new_bfqq->ref++;
+ bfq_clear_bfqq_just_created(bfqq);
++ /*
++ * If the bic associated with the process
++ * issuing this request still points to bfqq
++ * (and thus has not been already redirected
++ * to new_bfqq or even some other bfq_queue),
++ * then complete the merge and redirect it to
++ * new_bfqq.
++ */
+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
+ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
+ bfqq, new_bfqq);
+@@ -4498,14 +4560,17 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ }
+
+ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+- bool at_head)
++ bool at_head)
+ {
+ struct request_queue *q = hctx->queue;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+
+ spin_lock_irq(&bfqd->lock);
+- if (blk_mq_sched_try_insert_merge(q, rq))
+- goto done;
++ if (blk_mq_sched_try_insert_merge(q, rq)) {
++ spin_unlock_irq(&bfqd->lock);
++ return;
++ }
++
+ spin_unlock_irq(&bfqd->lock);
+
+ blk_mq_sched_request_inserted(rq);
+@@ -4530,8 +4595,8 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ q->last_merge = rq;
+ }
+ }
+-done:
+- spin_unlock_irq(&bfqd->lock);
++
++ bfq_unlock_put_ioc(bfqd);
+ }
+
+ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
+@@ -4724,7 +4789,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ bfq_completed_request(bfqq, bfqd);
+ bfq_put_rq_priv_body(bfqq);
+
+- spin_unlock_irqrestore(&bfqd->lock, flags);
++ bfq_unlock_put_ioc_restore(bfqd, flags);
+ } else {
+ /*
+ * Request rq may be still/already in the scheduler,
+@@ -4732,10 +4797,10 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ * defer such a check and removal, to avoid
+ * inconsistencies in the time interval from the end
+ * of this function to the start of the deferred work.
+- * Fortunately, this situation occurs only in process
+- * context, so taking the scheduler lock does not
+- * cause any deadlock, even if other locks are already
+- * (correctly) held by this process.
++ * This situation seems to occur only in process
++ * context, as a consequence of a merge. In the
++ * current version of the code, this implies that the
++ * lock is held.
+ */
+ BUG_ON(in_interrupt());
+
+@@ -4758,8 +4823,6 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
+
+- put_io_context(bic->icq.ioc);
+-
+ if (bfqq_process_refs(bfqq) == 1) {
+ bfqq->pid = current->pid;
+ bfq_clear_bfqq_coop(bfqq);
+@@ -4775,6 +4838,41 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
+ return NULL;
+ }
+
++static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic,
++ struct bio *bio,
++ bool split, bool is_sync,
++ bool *new_queue)
++{
++ struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
++
++ if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
++ return bfqq;
++
++ if (new_queue)
++ *new_queue = true;
++
++ if (bfqq)
++ bfq_put_queue(bfqq);
++ bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
++
++ bic_set_bfqq(bic, bfqq, is_sync);
++ if (split && is_sync) {
++ if ((bic->was_in_burst_list && bfqd->large_burst) ||
++ bic->saved_in_large_burst)
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ else {
++ bfq_clear_bfqq_in_large_burst(bfqq);
++ if (bic->was_in_burst_list)
++ hlist_add_head(&bfqq->burst_list_node,
++ &bfqd->burst_list);
++ }
++ bfqq->split_time = jiffies;
++ }
++
++ return bfqq;
++}
++
+ /*
+ * Allocate bfq data structures associated with this request.
+ */
+@@ -4786,6 +4884,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ const int is_sync = rq_is_sync(rq);
+ struct bfq_queue *bfqq;
+ bool bfqq_already_existing = false, split = false;
++ bool new_queue = false;
+
+ spin_lock_irq(&bfqd->lock);
+
+@@ -4796,42 +4895,10 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ bfq_bic_update_cgroup(bic, bio);
+
+-new_queue:
+- bfqq = bic_to_bfqq(bic, is_sync);
+- if (!bfqq || bfqq == &bfqd->oom_bfqq) {
+- if (bfqq)
+- bfq_put_queue(bfqq);
+- bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
+- BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
++ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
++ &new_queue);
+
+- bic_set_bfqq(bic, bfqq, is_sync);
+- if (split && is_sync) {
+- bfq_log_bfqq(bfqd, bfqq,
+- "get_request: was_in_list %d "
+- "was_in_large_burst %d "
+- "large burst in progress %d",
+- bic->was_in_burst_list,
+- bic->saved_in_large_burst,
+- bfqd->large_burst);
+-
+- if ((bic->was_in_burst_list && bfqd->large_burst) ||
+- bic->saved_in_large_burst) {
+- bfq_log_bfqq(bfqd, bfqq,
+- "get_request: marking in "
+- "large burst");
+- bfq_mark_bfqq_in_large_burst(bfqq);
+- } else {
+- bfq_log_bfqq(bfqd, bfqq,
+- "get_request: clearing in "
+- "large burst");
+- bfq_clear_bfqq_in_large_burst(bfqq);
+- if (bic->was_in_burst_list)
+- hlist_add_head(&bfqq->burst_list_node,
+- &bfqd->burst_list);
+- }
+- bfqq->split_time = jiffies;
+- }
+- } else {
++ if (unlikely(!new_queue)) {
+ /* If the queue was seeky for too long, break it apart. */
+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
+ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
+@@ -4841,9 +4908,19 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ bic->saved_in_large_burst = true;
+
+ bfqq = bfq_split_bfqq(bic, bfqq);
+- split = true;
++ /*
++ * A reference to bic->icq.ioc needs to be
++ * released after a queue split. Do not do it
++ * immediately, to not risk to possibly take
++ * an ioc->lock while holding the scheduler
++ * lock.
++ */
++ bfqd->ioc_to_put = bic->icq.ioc;
++
+ if (!bfqq)
+- goto new_queue;
++ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
++ true, is_sync,
++ NULL);
+ else
+ bfqq_already_existing = true;
+ }
+@@ -4861,18 +4938,17 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ /*
+ * If a bfq_queue has only one process reference, it is owned
+- * by only one bfq_io_cq: we can set the bic field of the
+- * bfq_queue to the address of that structure. Also, if the
+- * queue has just been split, mark a flag so that the
+- * information is available to the other scheduler hooks.
++ * by only this bic: we can then set bfqq->bic = bic. in
++ * addition, if the queue has also just been split, we have to
++ * resume its state.
+ */
+ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
+ bfqq->bic = bic;
+- if (split) {
++ if (bfqd->ioc_to_put) { /* if true, then there has been a split */
+ /*
+- * If the queue has just been split from a shared
+- * queue, restore the idle window and the possible
+- * weight raising period.
++ * The queue has just been split from a shared
++ * queue: restore the idle window and the
++ * possible weight raising period.
+ */
+ bfq_bfqq_resume_state(bfqq, bfqd, bic,
+ bfqq_already_existing);
+@@ -4882,7 +4958,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ if (unlikely(bfq_bfqq_just_created(bfqq)))
+ bfq_handle_burst(bfqd, bfqq);
+
+- spin_unlock_irq(&bfqd->lock);
++ bfq_unlock_put_ioc(bfqd);
+
+ return 0;
+
+@@ -4929,7 +5005,7 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+ bfq_bfqq_expire(bfqd, bfqq, true, reason);
+
+ schedule_dispatch:
+- spin_unlock_irqrestore(&bfqd->lock, flags);
++ bfq_unlock_put_ioc_restore(bfqd, flags);
+ bfq_schedule_dispatch(bfqd);
+ }
+
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 23744b246db6..bd83f1c02573 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -338,8 +338,6 @@ struct bfq_queue {
+ unsigned long wr_start_at_switch_to_srt;
+
+ unsigned long split_time; /* time of last split */
+-
+- spinlock_t lock;
+ };
+
+ /**
+@@ -609,6 +607,29 @@ struct bfq_data {
+ struct bfq_queue oom_bfqq;
+
+ spinlock_t lock;
++
++ /*
++ * bic associated with the task issuing current bio for
++ * merging. This and the next field are used as a support to
++ * be able to perform the bic lookup, needed by bio-merge
++ * functions, before the scheduler lock is taken, and thus
++ * avoid taking the request-queue lock while the scheduler
++ * lock is being held.
++ */
++ struct bfq_io_cq *bio_bic;
++ /* bfqq associated with the task issuing current bio for merging */
++ struct bfq_queue *bio_bfqq;
++ /* Extra flag used only for TESTING */
++ bool bio_bfqq_set;
++
++ /*
++ * io context to put right after bfqd->lock is released. This
++ * filed is used to perform put_io_context, when needed, to
++ * after the scheduler lock has been released, and thus
++ * prevent an ioc->lock from being possibly taken while the
++ * scheduler lock is being held.
++ */
++ struct io_context *ioc_to_put;
+ };
+
+ enum bfqq_state_flags {
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index b54a638186e3..a5c8b4acd33c 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -1905,7 +1905,18 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+ struct bfq_entity *entity = in_serv_entity;
+
+ if (bfqd->in_service_bic) {
++#ifdef BFQ_MQ
++ /*
++ * Schedule the release of a reference to
++ * bfqd->in_service_bic->icq.ioc to right after the
++ * scheduler lock is released. This ioc is not
++ * released immediately, to not risk to possibly take
++ * an ioc->lock while holding the scheduler lock.
++ */
++ bfqd->ioc_to_put = bfqd->in_service_bic->icq.ioc;
++#else
+ put_io_context(bfqd->in_service_bic->icq.ioc);
++#endif
+ bfqd->in_service_bic = NULL;
+ }
+
+
+From 84cc7140cb4f0574710625f51abbb076a1dd2920 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 3 Mar 2017 09:31:14 +0100
+Subject: [PATCH 17/51] Add checks and extra log messages - Part II
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 42 ++++++++++++++++++++++++++++++++++++++++--
+ block/bfq-sched.c | 1 +
+ 2 files changed, 41 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 69ef3761c95d..5707d42b160d 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1567,6 +1567,7 @@ static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
+ {
+ struct bfq_queue *bfqq = bfqd->bio_bfqq;
+
++ BUG_ON(!bfqd->bio_bfqq_set);
+
+ if (bfqq)
+ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
+@@ -1719,6 +1720,7 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
+ if (free)
+ blk_mq_free_request(free);
+ bfqd->bio_bfqq_set = false;
++ BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+
+ return ret;
+@@ -1781,6 +1783,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ bfq_updated_next_req(bfqd, bfqq);
+ bfq_pos_tree_add_move(bfqd, bfqq);
+ }
++ BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+ }
+ }
+@@ -1824,6 +1827,7 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+
+ bfq_remove_request(q, next);
+
++ BUG_ON(bfqq->bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqq->bfqd->lock);
+ end:
+ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
+@@ -2195,9 +2199,11 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+ {
+ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
+ (unsigned long) new_bfqq->pid);
++ BUG_ON(bfqq->bic && bfqq->bic == new_bfqq->bic);
+ /* Save weight raising and idle window of the merged queues */
+ bfq_bfqq_save_state(bfqq);
+ bfq_bfqq_save_state(new_bfqq);
++
+ if (bfq_bfqq_IO_bound(bfqq))
+ bfq_mark_bfqq_IO_bound(new_bfqq);
+ bfq_clear_bfqq_IO_bound(bfqq);
+@@ -2276,6 +2282,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ bool is_sync = op_is_sync(bio->bi_opf);
+ struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
+
++ assert_spin_locked(&bfqd->lock);
+ /*
+ * Disallow merge of a sync bio into an async request.
+ */
+@@ -2286,6 +2293,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ * Lookup the bfqq that this bio will be queued with. Allow
+ * merge only if rq is queued there.
+ */
++ BUG_ON(!bfqd->bio_bfqq_set);
+ if (!bfqq)
+ return false;
+
+@@ -2294,6 +2302,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ * of the queues of possible cooperating processes.
+ */
+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
++ BUG_ON(new_bfqq == bfqq);
+ if (new_bfqq) {
+ /*
+ * bic still points to bfqq, then it has not yet been
+@@ -4040,6 +4049,8 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ struct bfq_group *bfqg = bfqq_group(bfqq);
+ #endif
+
++ assert_spin_locked(&bfqq->bfqd->lock);
++
+ BUG_ON(bfqq->ref <= 0);
+
+ if (bfqq->bfqd)
+@@ -4119,6 +4130,7 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfqd->lock, flags);
++ BUG_ON(bfqd->ioc_to_put);
+ /*
+ * If the bic is using a shared queue, put the
+ * reference taken on the io_context when the bic
+@@ -4567,10 +4579,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+
+ spin_lock_irq(&bfqd->lock);
+ if (blk_mq_sched_try_insert_merge(q, rq)) {
++ BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+ return;
+ }
+
++ BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+
+ blk_mq_sched_request_inserted(rq);
+@@ -4785,6 +4799,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfqd->lock, flags);
++ BUG_ON(bfqd->ioc_to_put);
+
+ bfq_completed_request(bfqq, bfqd);
+ bfq_put_rq_priv_body(bfqq);
+@@ -4855,13 +4870,28 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
+ if (bfqq)
+ bfq_put_queue(bfqq);
+ bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
++ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node));
+
+ bic_set_bfqq(bic, bfqq, is_sync);
+ if (split && is_sync) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "get_request: was_in_list %d "
++ "was_in_large_burst %d "
++ "large burst in progress %d",
++ bic->was_in_burst_list,
++ bic->saved_in_large_burst,
++ bfqd->large_burst);
++
+ if ((bic->was_in_burst_list && bfqd->large_burst) ||
+- bic->saved_in_large_burst)
++ bic->saved_in_large_burst) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "get_request: marking in "
++ "large burst");
+ bfq_mark_bfqq_in_large_burst(bfqq);
+- else {
++ } else {
++ bfq_log_bfqq(bfqd, bfqq,
++ "get_request: clearing in "
++ "large burst");
+ bfq_clear_bfqq_in_large_burst(bfqq);
+ if (bic->was_in_burst_list)
+ hlist_add_head(&bfqq->burst_list_node,
+@@ -4897,10 +4927,12 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
+ &new_queue);
++ BUG_ON(bfqd->ioc_to_put);
+
+ if (unlikely(!new_queue)) {
+ /* If the queue was seeky for too long, break it apart. */
+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
++ BUG_ON(!is_sync);
+ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
+
+ /* Update bic before losing reference to bfqq */
+@@ -4923,6 +4955,9 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ NULL);
+ else
+ bfqq_already_existing = true;
++
++ BUG_ON(!bfqq);
++ BUG_ON(bfqq == &bfqd->oom_bfqq);
+ }
+ }
+
+@@ -4976,6 +5011,8 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+
+ BUG_ON(!bfqd);
+ spin_lock_irqsave(&bfqd->lock, flags);
++ BUG_ON(bfqd->ioc_to_put);
++
+ bfq_log_bfqq(bfqd, bfqq, "handling slice_timer expiration");
+ bfq_clear_bfqq_wait_request(bfqq);
+
+@@ -5083,6 +5120,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ spin_lock_irq(&bfqd->lock);
+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
+ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
++ BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+
+ hrtimer_cancel(&bfqd->idle_slice_timer);
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index a5c8b4acd33c..85e59eeb3569 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -1906,6 +1906,7 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+
+ if (bfqd->in_service_bic) {
+ #ifdef BFQ_MQ
++ BUG_ON(bfqd->ioc_to_put);
+ /*
+ * Schedule the release of a reference to
+ * bfqd->in_service_bic->icq.ioc to right after the
+
+From 3d54cb804f1db2e08ce4a6cc335868538542f587 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 22 Feb 2017 11:30:01 +0100
+Subject: [PATCH 18/51] Fix unbalanced increment of rq_in_driver
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 52 +++++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 43 insertions(+), 9 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 5707d42b160d..9cbcb8d43d81 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -3936,9 +3936,45 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ rq = list_first_entry(&bfqd->dispatch, struct request,
+ queuelist);
+ list_del_init(&rq->queuelist);
++
+ bfq_log(bfqd,
+ "dispatch requests: picked %p from dispatch list", rq);
+- goto exit;
++ bfqq = RQ_BFQQ(rq);
++
++ if (bfqq) {
++ /*
++ * Increment counters here, because this
++ * dispatch does not follow the standard
++ * dispatch flow (where counters are
++ * incremented)
++ */
++ bfqq->dispatched++;
++
++ goto inc_in_driver_start_rq;
++ }
++
++ /*
++ * We exploit the put_rq_private hook to decrement
++ * rq_in_driver, but put_rq_private will not be
++ * invoked on this request. So, to avoid unbalance,
++ * just start this request, without incrementing
++ * rq_in_driver. As a negative consequence,
++ * rq_in_driver is deceptively lower than it should be
++ * while this request is in service. This may cause
++ * bfq_schedule_dispatch to be invoked uselessly.
++ *
++ * As for implementing an exact solution, the
++ * put_request hook, if defined, is probably invoked
++ * also on this request. So, by exploiting this hook,
++ * we could 1) increment rq_in_driver here, and 2)
++ * decrement it in put_request. Such a solution would
++ * let the value of the counter be always accurate,
++ * but it would entail using an extra interface
++ * function. This cost seems higher than the benefit,
++ * being the frequency of non-elevator-private
++ * requests very low.
++ */
++ goto start_rq;
+ }
+
+ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
+@@ -3973,10 +4009,12 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+
+ BUG_ON(bfqq->next_rq == NULL &&
+ bfqq->entity.budget < bfqq->entity.service);
+-exit:
++
+ if (rq) {
+- rq->rq_flags |= RQF_STARTED;
++ inc_in_driver_start_rq:
+ bfqd->rq_in_driver++;
++ start_rq:
++ rq->rq_flags |= RQF_STARTED;
+ if (bfqq)
+ bfq_log_bfqq(bfqd, bfqq,
+ "dispatched %s request %p, rq_in_driver %d",
+@@ -3992,6 +4030,7 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ "returned NULL request, rq_in_driver %d",
+ bfqd->rq_in_driver);
+
++exit:
+ return rq;
+ }
+
+@@ -4591,15 +4630,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+
+ spin_lock_irq(&bfqd->lock);
+ if (at_head || blk_rq_is_passthrough(rq)) {
+- struct bfq_queue *bfqq = RQ_BFQQ(rq);
+-
+ if (at_head)
+ list_add(&rq->queuelist, &bfqd->dispatch);
+ else
+ list_add_tail(&rq->queuelist, &bfqd->dispatch);
+-
+- if (bfqq)
+- bfqq->dispatched++;
+ } else {
+ __bfq_insert_request(bfqd, rq);
+
+@@ -4966,7 +5000,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ "get_request: new allocated %d", bfqq->allocated);
+
+ bfqq->ref++;
+- bfq_log_bfqq(bfqd, bfqq, "get_request: bfqq %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", rq, bfqq, bfqq->ref);
+
+ rq->elv.priv[0] = bic;
+ rq->elv.priv[1] = bfqq;
+
+From 7ba977d696b239569b4cd233aebc99e136ecf487 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 3 Mar 2017 09:39:35 +0100
+Subject: [PATCH 19/51] Add checks and extra log messages - Part III
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 9cbcb8d43d81..24b529a2edc7 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4630,10 +4630,21 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+
+ spin_lock_irq(&bfqd->lock);
+ if (at_head || blk_rq_is_passthrough(rq)) {
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
+ if (at_head)
+ list_add(&rq->queuelist, &bfqd->dispatch);
+ else
+ list_add_tail(&rq->queuelist, &bfqd->dispatch);
++
++ if (bfqq)
++ bfq_log_bfqq(bfqd, bfqq,
++ "insert_request %p in disp: at_head %d",
++ rq, at_head);
++ else
++ bfq_log(bfqd,
++ "insert_request %p in disp: at_head %d",
++ rq, at_head);
+ } else {
+ __bfq_insert_request(bfqd, rq);
+
+
+From c94e47b2908600b8ba89f84b0ac7febddd313141 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 17 Feb 2017 14:28:02 +0100
+Subject: [PATCH 20/51] TESTING: Check wrong invocation of merge and
+ put_rq_priv functions
+
+Check that merge functions are not invoked on requests queued in the
+dispatch queue, and that neither put_rq_private is invoked on these
+requests if, in addition, they have not passed through get_rq_private.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 22 ++++++++++++++++++++++
+ include/linux/blkdev.h | 2 ++
+ 2 files changed, 24 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 24b529a2edc7..b4d40bb712d2 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1746,6 +1746,8 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
+ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ enum elv_merge type)
+ {
++ BUG_ON(req->rq_flags & RQF_DISP_LIST);
++
+ if (type == ELEVATOR_FRONT_MERGE &&
+ rb_prev(&req->rb_node) &&
+ blk_rq_pos(req) <
+@@ -1795,6 +1797,8 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+
+ BUG_ON(!RQ_BFQQ(rq));
+ BUG_ON(!RQ_BFQQ(next));
++ BUG_ON(rq->rq_flags & RQF_DISP_LIST);
++ BUG_ON(next->rq_flags & RQF_DISP_LIST);
+
+ if (!RB_EMPTY_NODE(&rq->rb_node))
+ goto end;
+@@ -3936,6 +3940,7 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ rq = list_first_entry(&bfqd->dispatch, struct request,
+ queuelist);
+ list_del_init(&rq->queuelist);
++ rq->rq_flags &= ~RQF_DISP_LIST;
+
+ bfq_log(bfqd,
+ "dispatch requests: picked %p from dispatch list", rq);
+@@ -3950,6 +3955,17 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ */
+ bfqq->dispatched++;
+
++ /*
++ * TESTING: reset DISP_LIST flag, because: 1)
++ * this rq this request has passed through
++ * get_rq_private, 2) then it will have
++ * put_rq_private invoked on it, and 3) in
++ * put_rq_private we use this flag to check
++ * that put_rq_private is not invoked on
++ * requests for which get_rq_private has been
++ * invoked.
++ */
++ rq->rq_flags &= ~RQF_DISP_LIST;
+ goto inc_in_driver_start_rq;
+ }
+
+@@ -4637,6 +4653,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ else
+ list_add_tail(&rq->queuelist, &bfqd->dispatch);
+
++ rq->rq_flags |= RQF_DISP_LIST;
+ if (bfqq)
+ bfq_log_bfqq(bfqd, bfqq,
+ "insert_request %p in disp: at_head %d",
+@@ -4824,6 +4841,10 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ bfqd = bfqq->bfqd;
+ BUG_ON(!bfqd);
+
++ if (rq->rq_flags & RQF_DISP_LIST) {
++ pr_crit("putting disp rq %p for %d", rq, bfqq->pid);
++ BUG();
++ }
+ BUG_ON(rq->rq_flags & RQF_QUEUED);
+ BUG_ON(!(rq->rq_flags & RQF_ELVPRIV));
+
+@@ -5015,6 +5036,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ rq->elv.priv[0] = bic;
+ rq->elv.priv[1] = bfqq;
++ rq->rq_flags &= ~RQF_DISP_LIST;
+
+ /*
+ * If a bfq_queue has only one process reference, it is owned
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 10f892ca585d..0048e59e6d07 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -121,6 +121,8 @@ typedef __u32 __bitwise req_flags_t;
+ /* Look at ->special_vec for the actual data payload instead of the
+ bio chain. */
+ #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
++/* DEBUG: rq in bfq-mq dispatch list */
++#define RQF_DISP_LIST ((__force req_flags_t)(1 << 19))
+
+ /* flags that prevent us from merging requests: */
+ #define RQF_NOMERGE_FLAGS \
+
+From 49206f9052d13c96d49dbc36c612bed41b2d6552 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Sat, 25 Feb 2017 17:38:05 +0100
+Subject: [PATCH 21/51] Complete support for cgroups
+
+This commit completes cgroups support for bfq-mq. In particular, it deals with
+a sort of circular dependency introduced in blk-mq: the function
+blkcg_activate_policy, invoked during scheduler initialization, triggers the
+invocation of the has_work scheduler hook (before the init function is
+finished). To adress this issue, this commit moves the invocation of
+blkcg_activate_policy after the initialization of all the fields that could be
+initialized before invoking blkcg_activate_policy itself. This enables has_work
+to correctly return false, and thus to prevent the blk-mq stack from invoking
+further scheduler hooks before the init function is finished.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/Kconfig.iosched | 9 +++++
+ block/bfq-mq-iosched.c | 108 ++++++++++++++++++++++++++++---------------------
+ block/bfq-mq.h | 2 +-
+ 3 files changed, 72 insertions(+), 47 deletions(-)
+
+diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
+index 2d94af3d8b0a..299a6861fb90 100644
+--- a/block/Kconfig.iosched
++++ b/block/Kconfig.iosched
+@@ -106,6 +106,15 @@ config MQ_IOSCHED_BFQ
+ guarantees a low latency to interactive and soft real-time
+ applications. Details in Documentation/block/bfq-iosched.txt
+
++config MQ_BFQ_GROUP_IOSCHED
++ bool "BFQ-MQ hierarchical scheduling support"
++ depends on MQ_IOSCHED_BFQ && BLK_CGROUP
++ default n
++ ---help---
++
++ Enable hierarchical scheduling in BFQ-MQ, using the blkio
++ (cgroups-v1) or io (cgroups-v2) controller.
++
+ config MQ_IOSCHED_DEADLINE
+ tristate "MQ deadline I/O scheduler"
+ default y
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index b4d40bb712d2..02a1e7fd0ea4 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -88,7 +88,6 @@
+ #include "blk-mq.h"
+ #include "blk-mq-tag.h"
+ #include "blk-mq-sched.h"
+-#undef CONFIG_BFQ_GROUP_IOSCHED /* cgroups support not yet functional */
+ #include "bfq-mq.h"
+
+ /* Expiration time of sync (0) and async (1) requests, in ns. */
+@@ -233,15 +232,6 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
+ return NULL;
+ }
+
+-#define BFQ_MQ
+-#include "bfq-sched.c"
+-#include "bfq-cgroup-included.c"
+-
+-#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
+-#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
+-
+-#define bfq_sample_valid(samples) ((samples) > 80)
+-
+ /*
+ * Scheduler run of queue, if there are requests pending and no one in the
+ * driver that will restart queueing.
+@@ -255,6 +245,43 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd)
+ }
+
+ /*
++ * Next two functions release bfqd->lock and put the io context
++ * pointed by bfqd->ioc_to_put. This delayed put is used to not risk
++ * to take an ioc->lock while the scheduler lock is being held.
++ */
++static void bfq_unlock_put_ioc(struct bfq_data *bfqd)
++{
++ struct io_context *ioc_to_put = bfqd->ioc_to_put;
++
++ bfqd->ioc_to_put = NULL;
++ spin_unlock_irq(&bfqd->lock);
++
++ if (ioc_to_put)
++ put_io_context(ioc_to_put);
++}
++
++static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd,
++ unsigned long flags)
++{
++ struct io_context *ioc_to_put = bfqd->ioc_to_put;
++
++ bfqd->ioc_to_put = NULL;
++ spin_unlock_irqrestore(&bfqd->lock, flags);
++
++ if (ioc_to_put)
++ put_io_context(ioc_to_put);
++}
++
++#define BFQ_MQ
++#include "bfq-sched.c"
++#include "bfq-cgroup-included.c"
++
++#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
++#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
++
++#define bfq_sample_valid(samples) ((samples) > 80)
++
++/*
+ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
+ * We choose the request that is closesr to the head right now. Distance
+ * behind the head is penalized and only allowed to a certain extent.
+@@ -4050,34 +4077,6 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ return rq;
+ }
+
+-/*
+- * Next two functions release bfqd->lock and put the io context
+- * pointed by bfqd->ioc_to_put. This delayed put is used to not risk
+- * to take an ioc->lock while the scheduler lock is being held.
+- */
+-static void bfq_unlock_put_ioc(struct bfq_data *bfqd)
+-{
+- struct io_context *ioc_to_put = bfqd->ioc_to_put;
+-
+- bfqd->ioc_to_put = NULL;
+- spin_unlock_irq(&bfqd->lock);
+-
+- if (ioc_to_put)
+- put_io_context(ioc_to_put);
+-}
+-
+-static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd,
+- unsigned long flags)
+-{
+- struct io_context *ioc_to_put = bfqd->ioc_to_put;
+-
+- bfqd->ioc_to_put = NULL;
+- spin_unlock_irqrestore(&bfqd->lock, flags);
+-
+- if (ioc_to_put)
+- put_io_context(ioc_to_put);
+-}
+-
+ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ {
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+@@ -5239,6 +5238,10 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ }
+ eq->elevator_data = bfqd;
+
++ spin_lock_irq(q->queue_lock);
++ q->elevator = eq;
++ spin_unlock_irq(q->queue_lock);
++
+ /*
+ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
+ * Grab a permanent reference to it, so that the normal code flow
+@@ -5261,12 +5264,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ bfqd->oom_bfqq.entity.prio_changed = 1;
+
+ bfqd->queue = q;
+-
+- bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
+- if (!bfqd->root_group)
+- goto out_free;
+- bfq_init_root_group(bfqd->root_group, bfqd);
+- bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
++ INIT_LIST_HEAD(&bfqd->dispatch);
+
+ hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+@@ -5324,9 +5322,27 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ bfqd->device_speed = BFQ_BFQD_FAST;
+
+ spin_lock_init(&bfqd->lock);
+- INIT_LIST_HEAD(&bfqd->dispatch);
+
+- q->elevator = eq;
++ /*
++ * The invocation of the next bfq_create_group_hierarchy
++ * function is the head of a chain of function calls
++ * (bfq_create_group_hierarchy->blkcg_activate_policy->
++ * blk_mq_freeze_queue) that may lead to the invocation of the
++ * has_work hook function. For this reason,
++ * bfq_create_group_hierarchy is invoked only after all
++ * scheduler data has been initialized, apart from the fields
++ * that can be initialized only after invoking
++ * bfq_create_group_hierarchy. This, in particular, enables
++ * has_work to correctly return false. Of course, to avoid
++ * other inconsistencies, the blk-mq stack must then refrain
++ * from invoking further scheduler hooks before this init
++ * function is finished.
++ */
++ bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
++ if (!bfqd->root_group)
++ goto out_free;
++ bfq_init_root_group(bfqd->root_group, bfqd);
++ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
+
+ return 0;
+
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index bd83f1c02573..2c81c02bccc4 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -20,7 +20,7 @@
+ #include <linux/blk-cgroup.h>
+
+ /* see comments on CONFIG_BFQ_GROUP_IOSCHED in bfq.h */
+-#ifdef CONFIG_BFQ_MQ_GROUP_IOSCHED
++#ifdef CONFIG_MQ_BFQ_GROUP_IOSCHED
+ #define BFQ_GROUP_IOSCHED_ENABLED
+ #endif
+
+
+From 62d12db23ce14d2716b5cff7d2635fbc817b96d0 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 17 Mar 2017 06:15:18 +0100
+Subject: [PATCH 22/51] Remove all get and put of I/O contexts
+
+When a bfq queue is set in service and when it is merged, a reference
+to the I/O context associated with the queue is taken. This reference
+is then released when the queue is deselected from service or
+split. More precisely, the release of the reference is postponed to
+when the scheduler lock is released, to avoid nesting between the
+scheduler and the I/O-context lock. In fact, such nesting would lead
+to deadlocks, because of other code paths that take the same locks in
+the opposite order. This postponing of I/O-context releases does
+complicate code.
+
+This commit addresses this issue by modifying involved operations in
+such a way to not need to get the above I/O-context references any
+more. Then it also removes any get and release of these references.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 2 +-
+ block/bfq-mq-iosched.c | 127 ++++++++------------------------------------
+ block/bfq-mq.h | 11 ----
+ block/bfq-sched.c | 17 ------
+ 4 files changed, 22 insertions(+), 135 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index cf59eeb7f08e..dfacca799b5e 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -773,7 +773,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ bfq_put_async_queues(bfqd, bfqg);
+
+ #ifdef BFQ_MQ
+- bfq_unlock_put_ioc_restore(bfqd, flags);
++ spin_unlock_irqrestore(&bfqd->lock, flags);
+ #endif
+ /*
+ * @blkg is going offline and will be ignored by
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 02a1e7fd0ea4..8e7589d3280f 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -244,34 +244,6 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd)
+ }
+ }
+
+-/*
+- * Next two functions release bfqd->lock and put the io context
+- * pointed by bfqd->ioc_to_put. This delayed put is used to not risk
+- * to take an ioc->lock while the scheduler lock is being held.
+- */
+-static void bfq_unlock_put_ioc(struct bfq_data *bfqd)
+-{
+- struct io_context *ioc_to_put = bfqd->ioc_to_put;
+-
+- bfqd->ioc_to_put = NULL;
+- spin_unlock_irq(&bfqd->lock);
+-
+- if (ioc_to_put)
+- put_io_context(ioc_to_put);
+-}
+-
+-static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd,
+- unsigned long flags)
+-{
+- struct io_context *ioc_to_put = bfqd->ioc_to_put;
+-
+- bfqd->ioc_to_put = NULL;
+- spin_unlock_irqrestore(&bfqd->lock, flags);
+-
+- if (ioc_to_put)
+- put_io_context(ioc_to_put);
+-}
+-
+ #define BFQ_MQ
+ #include "bfq-sched.c"
+ #include "bfq-cgroup-included.c"
+@@ -1747,7 +1719,6 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
+ if (free)
+ blk_mq_free_request(free);
+ bfqd->bio_bfqq_set = false;
+- BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+
+ return ret;
+@@ -1812,7 +1783,6 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ bfq_updated_next_req(bfqd, bfqq);
+ bfq_pos_tree_add_move(bfqd, bfqq);
+ }
+- BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+ }
+ }
+@@ -1858,7 +1828,6 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+
+ bfq_remove_request(q, next);
+
+- BUG_ON(bfqq->bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqq->bfqd->lock);
+ end:
+ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
+@@ -2035,20 +2004,18 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+ * first time that the requests of some process are redirected to
+ * it.
+ *
+- * We redirect bfqq to new_bfqq and not the opposite, because we
+- * are in the context of the process owning bfqq, hence we have
+- * the io_cq of this process. So we can immediately configure this
+- * io_cq to redirect the requests of the process to new_bfqq.
++ * We redirect bfqq to new_bfqq and not the opposite, because
++ * we are in the context of the process owning bfqq, thus we
++ * have the io_cq of this process. So we can immediately
++ * configure this io_cq to redirect the requests of the
++ * process to new_bfqq. In contrast, the io_cq of new_bfqq is
++ * not available any more (new_bfqq->bic == NULL).
+ *
+- * NOTE, even if new_bfqq coincides with the in-service queue, the
+- * io_cq of new_bfqq is not available, because, if the in-service
+- * queue is shared, bfqd->in_service_bic may not point to the
+- * io_cq of the in-service queue.
+- * Redirecting the requests of the process owning bfqq to the
+- * currently in-service queue is in any case the best option, as
+- * we feed the in-service queue with new requests close to the
+- * last request served and, by doing so, hopefully increase the
+- * throughput.
++ * Anyway, even in case new_bfqq coincides with the in-service
++ * queue, redirecting requests the in-service queue is the
++ * best option, as we feed the in-service queue with new
++ * requests close to the last request served and, by doing so,
++ * are likely to increase the throughput.
+ */
+ bfqq->new_bfqq = new_bfqq;
+ new_bfqq->ref += process_refs;
+@@ -2147,13 +2114,13 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ in_service_bfqq = bfqd->in_service_queue;
+
+ if (in_service_bfqq && in_service_bfqq != bfqq &&
+- bfqd->in_service_bic && wr_from_too_long(in_service_bfqq)
++ wr_from_too_long(in_service_bfqq)
+ && likely(in_service_bfqq == &bfqd->oom_bfqq))
+ bfq_log_bfqq(bfqd, bfqq,
+ "would have tried merge with in-service-queue, but wr");
+
+- if (!in_service_bfqq || in_service_bfqq == bfqq ||
+- !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) ||
++ if (!in_service_bfqq || in_service_bfqq == bfqq
++ || wr_from_too_long(in_service_bfqq) ||
+ unlikely(in_service_bfqq == &bfqd->oom_bfqq))
+ goto check_scheduled;
+
+@@ -2214,16 +2181,6 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+ }
+
+-static void bfq_get_bic_reference(struct bfq_queue *bfqq)
+-{
+- /*
+- * If bfqq->bic has a non-NULL value, the bic to which it belongs
+- * is about to begin using a shared bfq_queue.
+- */
+- if (bfqq->bic)
+- atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
+-}
+-
+ static void
+ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+@@ -2280,12 +2237,6 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+ bfqd->wr_busy_queues);
+
+ /*
+- * Grab a reference to the bic, to prevent it from being destroyed
+- * before being possibly touched by a bfq_split_bfqq().
+- */
+- bfq_get_bic_reference(bfqq);
+- bfq_get_bic_reference(new_bfqq);
+- /*
+ * Merge queues (that is, let bic redirect its requests to new_bfqq)
+ */
+ bic_set_bfqq(bic, new_bfqq, 1);
+@@ -2472,16 +2423,10 @@ static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
+ static void bfq_arm_slice_timer(struct bfq_data *bfqd)
+ {
+ struct bfq_queue *bfqq = bfqd->in_service_queue;
+- struct bfq_io_cq *bic;
+ u32 sl;
+
+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
+
+- /* Processes have exited, don't wait. */
+- bic = bfqd->in_service_bic;
+- if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0)
+- return;
+-
+ bfq_mark_bfqq_wait_request(bfqq);
+
+ /*
+@@ -3922,11 +3867,6 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
+ bfq_bfqq_budget_left(bfqq),
+ bfqq->dispatched);
+
+- if (!bfqd->in_service_bic) {
+- atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
+- bfqd->in_service_bic = RQ_BIC(rq);
+- }
+-
+ /*
+ * Expire bfqq, pretending that its budget expired, if bfqq
+ * belongs to CLASS_IDLE and other queues are waiting for
+@@ -4085,7 +4025,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ spin_lock_irq(&bfqd->lock);
+
+ rq = __bfq_dispatch_request(hctx);
+- bfq_unlock_put_ioc(bfqd);
++ spin_unlock_irq(&bfqd->lock);
+
+ return rq;
+ }
+@@ -4184,21 +4124,10 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfqd->lock, flags);
+- BUG_ON(bfqd->ioc_to_put);
+- /*
+- * If the bic is using a shared queue, put the
+- * reference taken on the io_context when the bic
+- * started using a shared bfq_queue. This put cannot
+- * make ioc->ref_count reach 0, then no ioc->lock
+- * risks to be taken (leading to possible deadlock
+- * scenarios).
+- */
+- if (is_sync && bfq_bfqq_coop(bfqq))
+- put_io_context(bic->icq.ioc);
+
+ bfq_exit_bfqq(bfqd, bfqq);
+ bic_set_bfqq(bic, NULL, is_sync);
+- bfq_unlock_put_ioc_restore(bfqd, flags);
++ spin_unlock_irqrestore(&bfqd->lock, flags);
+ }
+ }
+
+@@ -4633,12 +4562,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+
+ spin_lock_irq(&bfqd->lock);
+ if (blk_mq_sched_try_insert_merge(q, rq)) {
+- BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+ return;
+ }
+
+- BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+
+ blk_mq_sched_request_inserted(rq);
+@@ -4671,7 +4598,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ }
+ }
+
+- bfq_unlock_put_ioc(bfqd);
++ spin_unlock_irq(&bfqd->lock);
+ }
+
+ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
+@@ -4864,12 +4791,11 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfqd->lock, flags);
+- BUG_ON(bfqd->ioc_to_put);
+
+ bfq_completed_request(bfqq, bfqd);
+ bfq_put_rq_priv_body(bfqq);
+
+- bfq_unlock_put_ioc_restore(bfqd, flags);
++ spin_unlock_irqrestore(&bfqd->lock, flags);
+ } else {
+ /*
+ * Request rq may be still/already in the scheduler,
+@@ -4992,7 +4918,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
+ &new_queue);
+- BUG_ON(bfqd->ioc_to_put);
+
+ if (unlikely(!new_queue)) {
+ /* If the queue was seeky for too long, break it apart. */
+@@ -5005,14 +4930,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ bic->saved_in_large_burst = true;
+
+ bfqq = bfq_split_bfqq(bic, bfqq);
+- /*
+- * A reference to bic->icq.ioc needs to be
+- * released after a queue split. Do not do it
+- * immediately, to not risk to possibly take
+- * an ioc->lock while holding the scheduler
+- * lock.
+- */
+- bfqd->ioc_to_put = bic->icq.ioc;
+
+ if (!bfqq)
+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
+@@ -5045,7 +4962,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ */
+ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
+ bfqq->bic = bic;
+- if (bfqd->ioc_to_put) { /* if true, then there has been a split */
++ if (split) {
+ /*
+ * The queue has just been split from a shared
+ * queue: restore the idle window and the
+@@ -5059,7 +4976,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ if (unlikely(bfq_bfqq_just_created(bfqq)))
+ bfq_handle_burst(bfqd, bfqq);
+
+- bfq_unlock_put_ioc(bfqd);
++ spin_unlock_irq(&bfqd->lock);
+
+ return 0;
+
+@@ -5077,7 +4994,6 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+
+ BUG_ON(!bfqd);
+ spin_lock_irqsave(&bfqd->lock, flags);
+- BUG_ON(bfqd->ioc_to_put);
+
+ bfq_log_bfqq(bfqd, bfqq, "handling slice_timer expiration");
+ bfq_clear_bfqq_wait_request(bfqq);
+@@ -5108,7 +5024,7 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+ bfq_bfqq_expire(bfqd, bfqq, true, reason);
+
+ schedule_dispatch:
+- bfq_unlock_put_ioc_restore(bfqd, flags);
++ spin_unlock_irqrestore(&bfqd->lock, flags);
+ bfq_schedule_dispatch(bfqd);
+ }
+
+@@ -5186,7 +5102,6 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ spin_lock_irq(&bfqd->lock);
+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
+ bfq_deactivate_bfqq(bfqd, bfqq, false, false);
+- BUG_ON(bfqd->ioc_to_put);
+ spin_unlock_irq(&bfqd->lock);
+
+ hrtimer_cancel(&bfqd->idle_slice_timer);
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 2c81c02bccc4..36ee24a87dda 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -458,8 +458,6 @@ struct bfq_data {
+
+ /* bfq_queue in service */
+ struct bfq_queue *in_service_queue;
+- /* bfq_io_cq (bic) associated with the @in_service_queue */
+- struct bfq_io_cq *in_service_bic;
+
+ /* on-disk position of the last served request */
+ sector_t last_position;
+@@ -621,15 +619,6 @@ struct bfq_data {
+ struct bfq_queue *bio_bfqq;
+ /* Extra flag used only for TESTING */
+ bool bio_bfqq_set;
+-
+- /*
+- * io context to put right after bfqd->lock is released. This
+- * filed is used to perform put_io_context, when needed, to
+- * after the scheduler lock has been released, and thus
+- * prevent an ioc->lock from being possibly taken while the
+- * scheduler lock is being held.
+- */
+- struct io_context *ioc_to_put;
+ };
+
+ enum bfqq_state_flags {
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 85e59eeb3569..9c4e6797d8c9 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -1904,23 +1904,6 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+ struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
+ struct bfq_entity *entity = in_serv_entity;
+
+- if (bfqd->in_service_bic) {
+-#ifdef BFQ_MQ
+- BUG_ON(bfqd->ioc_to_put);
+- /*
+- * Schedule the release of a reference to
+- * bfqd->in_service_bic->icq.ioc to right after the
+- * scheduler lock is released. This ioc is not
+- * released immediately, to not risk to possibly take
+- * an ioc->lock while holding the scheduler lock.
+- */
+- bfqd->ioc_to_put = bfqd->in_service_bic->icq.ioc;
+-#else
+- put_io_context(bfqd->in_service_bic->icq.ioc);
+-#endif
+- bfqd->in_service_bic = NULL;
+- }
+-
+ bfq_clear_bfqq_wait_request(in_serv_bfqq);
+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
+ bfqd->in_service_queue = NULL;
+
+From 1521ad11f8684cf0a1b7249249cd406fee50da6d Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 29 Mar 2017 18:41:46 +0200
+Subject: [PATCH 23/51] BUGFIX: Remove unneeded and deadlock-causing lock in
+ request_merged
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 8e7589d3280f..bb046335ff4f 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1761,7 +1761,6 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ BUG_ON(RQ_BFQQ(req) != bfqq);
+ elv_rb_add(&bfqq->sort_list, req);
+
+- spin_lock_irq(&bfqd->lock);
+ /* Choose next request to be served for bfqq */
+ prev = bfqq->next_rq;
+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
+@@ -1783,7 +1782,6 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ bfq_updated_next_req(bfqd, bfqq);
+ bfq_pos_tree_add_move(bfqd, bfqq);
+ }
+- spin_unlock_irq(&bfqd->lock);
+ }
+ }
+
+
+From 9136b4c953918ea937254c57cfb787b55b5bc2c6 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 29 Mar 2017 18:55:30 +0200
+Subject: [PATCH 24/51] Fix wrong unlikely
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index bb046335ff4f..3ae9bd424b3f 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4917,7 +4917,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
+ &new_queue);
+
+- if (unlikely(!new_queue)) {
++ if (likely(!new_queue)) {
+ /* If the queue was seeky for too long, break it apart. */
+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
+ BUG_ON(!is_sync);
+
+From 8e05f722f19645f2278f6962368ca3b7c2a81c9c Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 12 May 2017 09:51:18 +0200
+Subject: [PATCH 25/51] Change cgroup params prefix to bfq-mq for bfq-mq
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 54 ++++++++++++++++++++++++++-------------------
+ 1 file changed, 31 insertions(+), 23 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index dfacca799b5e..9e9b0a09e26f 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -995,9 +995,15 @@ bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
+ return blkg_to_bfqg(bfqd->queue->root_blkg);
+ }
+
++#ifdef BFQ_MQ
++#define BFQ_CGROUP_FNAME(param) "bfq-mq."#param
++#else
++#define BFQ_CGROUP_FNAME(param) "bfq."#param
++#endif
++
+ static struct cftype bfq_blkcg_legacy_files[] = {
+ {
+- .name = "bfq.weight",
++ .name = BFQ_CGROUP_FNAME(weight),
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = bfq_io_show_weight,
+ .write_u64 = bfq_io_set_weight_legacy,
+@@ -1005,106 +1011,106 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+
+ /* statistics, covers only the tasks in the bfqg */
+ {
+- .name = "bfq.time",
++ .name = BFQ_CGROUP_FNAME(time),
+ .private = offsetof(struct bfq_group, stats.time),
+ .seq_show = bfqg_print_stat,
+ },
+ {
+- .name = "bfq.sectors",
++ .name = BFQ_CGROUP_FNAME(sectors),
+ .seq_show = bfqg_print_stat_sectors,
+ },
+ {
+- .name = "bfq.io_service_bytes",
++ .name = BFQ_CGROUP_FNAME(io_service_bytes),
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_bytes,
+ },
+ {
+- .name = "bfq.io_serviced",
++ .name = BFQ_CGROUP_FNAME(io_serviced),
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_ios,
+ },
+ {
+- .name = "bfq.io_service_time",
++ .name = BFQ_CGROUP_FNAME(io_service_time),
+ .private = offsetof(struct bfq_group, stats.service_time),
+ .seq_show = bfqg_print_rwstat,
+ },
+ {
+- .name = "bfq.io_wait_time",
++ .name = BFQ_CGROUP_FNAME(io_wait_time),
+ .private = offsetof(struct bfq_group, stats.wait_time),
+ .seq_show = bfqg_print_rwstat,
+ },
+ {
+- .name = "bfq.io_merged",
++ .name = BFQ_CGROUP_FNAME(io_merged),
+ .private = offsetof(struct bfq_group, stats.merged),
+ .seq_show = bfqg_print_rwstat,
+ },
+ {
+- .name = "bfq.io_queued",
++ .name = BFQ_CGROUP_FNAME(io_queued),
+ .private = offsetof(struct bfq_group, stats.queued),
+ .seq_show = bfqg_print_rwstat,
+ },
+
+ /* the same statictics which cover the bfqg and its descendants */
+ {
+- .name = "bfq.time_recursive",
++ .name = BFQ_CGROUP_FNAME(time_recursive),
+ .private = offsetof(struct bfq_group, stats.time),
+ .seq_show = bfqg_print_stat_recursive,
+ },
+ {
+- .name = "bfq.sectors_recursive",
++ .name = BFQ_CGROUP_FNAME(sectors_recursive),
+ .seq_show = bfqg_print_stat_sectors_recursive,
+ },
+ {
+- .name = "bfq.io_service_bytes_recursive",
++ .name = BFQ_CGROUP_FNAME(io_service_bytes_recursive),
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_bytes_recursive,
+ },
+ {
+- .name = "bfq.io_serviced_recursive",
++ .name = BFQ_CGROUP_FNAME(io_serviced_recursive),
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_ios_recursive,
+ },
+ {
+- .name = "bfq.io_service_time_recursive",
++ .name = BFQ_CGROUP_FNAME(io_service_time_recursive),
+ .private = offsetof(struct bfq_group, stats.service_time),
+ .seq_show = bfqg_print_rwstat_recursive,
+ },
+ {
+- .name = "bfq.io_wait_time_recursive",
++ .name = BFQ_CGROUP_FNAME(io_wait_time_recursive),
+ .private = offsetof(struct bfq_group, stats.wait_time),
+ .seq_show = bfqg_print_rwstat_recursive,
+ },
+ {
+- .name = "bfq.io_merged_recursive",
++ .name = BFQ_CGROUP_FNAME(io_merged_recursive),
+ .private = offsetof(struct bfq_group, stats.merged),
+ .seq_show = bfqg_print_rwstat_recursive,
+ },
+ {
+- .name = "bfq.io_queued_recursive",
++ .name = BFQ_CGROUP_FNAME(io_queued_recursive),
+ .private = offsetof(struct bfq_group, stats.queued),
+ .seq_show = bfqg_print_rwstat_recursive,
+ },
+ {
+- .name = "bfq.avg_queue_size",
++ .name = BFQ_CGROUP_FNAME(avg_queue_size),
+ .seq_show = bfqg_print_avg_queue_size,
+ },
+ {
+- .name = "bfq.group_wait_time",
++ .name = BFQ_CGROUP_FNAME(group_wait_time),
+ .private = offsetof(struct bfq_group, stats.group_wait_time),
+ .seq_show = bfqg_print_stat,
+ },
+ {
+- .name = "bfq.idle_time",
++ .name = BFQ_CGROUP_FNAME(idle_time),
+ .private = offsetof(struct bfq_group, stats.idle_time),
+ .seq_show = bfqg_print_stat,
+ },
+ {
+- .name = "bfq.empty_time",
++ .name = BFQ_CGROUP_FNAME(empty_time),
+ .private = offsetof(struct bfq_group, stats.empty_time),
+ .seq_show = bfqg_print_stat,
+ },
+ {
+- .name = "bfq.dequeue",
++ .name = BFQ_CGROUP_FNAME(dequeue),
+ .private = offsetof(struct bfq_group, stats.dequeue),
+ .seq_show = bfqg_print_stat,
+ },
+@@ -1113,7 +1119,7 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+
+ static struct cftype bfq_blkg_files[] = {
+ {
+- .name = "bfq.weight",
++ .name = BFQ_CGROUP_FNAME(weight),
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = bfq_io_show_weight,
+ .write = bfq_io_set_weight,
+@@ -1121,6 +1127,8 @@ static struct cftype bfq_blkg_files[] = {
+ {} /* terminate */
+ };
+
++#undef BFQ_CGROUP_FNAME
++
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg,
+
+From abdf7565dadbb00e78be5f4fb2cc9b157649840e Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 12 May 2017 11:56:13 +0200
+Subject: [PATCH 26/51] Add tentative extra tests on groups, reqs and queues
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 1 +
+ block/bfq-mq-iosched.c | 5 +++++
+ include/linux/blkdev.h | 2 ++
+ 3 files changed, 8 insertions(+)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 9e9b0a09e26f..72107ad12220 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -412,6 +412,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
+ BUG_ON(!blkg);
+ bfqg = blkg_to_bfqg(blkg);
+ bfqd = blkg->q->elevator->elevator_data;
++ BUG_ON(bfqg == bfqd->root_group);
+ entity = &bfqg->entity;
+ d = blkcg_to_bfqgd(blkg->blkcg);
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 3ae9bd424b3f..a9e3406fef06 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4494,6 +4494,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
++ BUG_ON(!bfqq);
+
+ assert_spin_locked(&bfqd->lock);
+
+@@ -4587,6 +4588,9 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ "insert_request %p in disp: at_head %d",
+ rq, at_head);
+ } else {
++ BUG_ON(!(rq->rq_flags & RQF_GOT));
++ rq->rq_flags &= ~RQF_GOT;
++
+ __bfq_insert_request(bfqd, rq);
+
+ if (rq_mergeable(rq)) {
+@@ -4974,6 +4978,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ if (unlikely(bfq_bfqq_just_created(bfqq)))
+ bfq_handle_burst(bfqd, bfqq);
+
++ rq->rq_flags |= RQF_GOT;
+ spin_unlock_irq(&bfqd->lock);
+
+ return 0;
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 0048e59e6d07..9ae814743095 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -123,6 +123,8 @@ typedef __u32 __bitwise req_flags_t;
+ #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
+ /* DEBUG: rq in bfq-mq dispatch list */
+ #define RQF_DISP_LIST ((__force req_flags_t)(1 << 19))
++/* DEBUG: rq had get_rq_private executed on it */
++#define RQF_GOT ((__force req_flags_t)(1 << 20))
+
+ /* flags that prevent us from merging requests: */
+ #define RQF_NOMERGE_FLAGS \
+
+From 9e1c1514bc947c4e04502331372b1cc58459d8d1 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 15 May 2017 22:25:03 +0200
+Subject: [PATCH 27/51] block, bfq-mq: access and cache blkg data only when
+ safe
+
+In blk-cgroup, operations on blkg objects are protected with the
+request_queue lock. This is no more the lock that protects
+I/O-scheduler operations in blk-mq. In fact, the latter are now
+protected with a finer-grained per-scheduler-instance lock. As a
+consequence, although blkg lookups are also rcu-protected, blk-mq I/O
+schedulers may see inconsistent data when they access blkg and
+blkg-related objects. BFQ does access these objects, and does incur
+this problem, in the following case.
+
+The blkg_lookup performed in bfq_get_queue, being protected (only)
+through rcu, may happen to return the address of a copy of the
+original blkg. If this is the case, then the blkg_get performed in
+bfq_get_queue, to pin down the blkg, is useless: it does not prevent
+blk-cgroup code from destroying both the original blkg and all objects
+directly or indirectly referred by the copy of the blkg. BFQ accesses
+these objects, which typically causes a crash for NULL-pointer
+dereference of memory-protection violation.
+
+Some additional protection mechanism should be added to blk-cgroup to
+address this issue. In the meantime, this commit provides a quick
+temporary fix for BFQ: cache (when safe) blkg data that might
+disappear right after a blkg_lookup.
+
+In particular, this commit exploits the following facts to achieve its
+goal without introducing further locks. Destroy operations on a blkg
+invoke, as a first step, hooks of the scheduler associated with the
+blkg. And these hooks are executed with bfqd->lock held for BFQ. As a
+consequence, for any blkg associated with the request queue an
+instance of BFQ is attached to, we are guaranteed that such a blkg is
+not destroyed, and that all the pointers it contains are consistent,
+while that instance is holding its bfqd->lock. A blkg_lookup performed
+with bfqd->lock held then returns a fully consistent blkg, which
+remains consistent until this lock is held. In more detail, this holds
+even if the returned blkg is a copy of the original one.
+
+Finally, also the object describing a group inside BFQ needs to be
+protected from destruction on the blkg_free of the original blkg
+(which invokes bfq_pd_free). This commit adds private refcounting for
+this object, to let it disappear only after no bfq_queue refers to it
+any longer.
+
+This commit also removes or updates some stale comments on locking
+issues related to blk-cgroup operations.
+
+Reported-by: Tomas Konir <tomas.konir@gmail.com>
+Reported-by: Lee Tibbert <lee.tibbert@gmail.com>
+Reported-by: Marco Piazza <mpiazza@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Tested-by: Tomas Konir <tomas.konir@gmail.com>
+Tested-by: Lee Tibbert <lee.tibbert@gmail.com>
+Tested-by: Marco Piazza <mpiazza@gmail.com>
+---
+ block/bfq-cgroup-included.c | 149 ++++++++++++++++++++++++++++++++++++++++----
+ block/bfq-mq-iosched.c | 2 +-
+ block/bfq-mq.h | 26 +++-----
+ 3 files changed, 148 insertions(+), 29 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 72107ad12220..d903393ee78a 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -43,7 +43,11 @@ BFQG_FLAG_FNS(idling)
+ BFQG_FLAG_FNS(empty)
+ #undef BFQG_FLAG_FNS
+
++#ifdef BFQ_MQ
++/* This should be called with the scheduler lock held. */
++#else
+ /* This should be called with the queue_lock held. */
++#endif
+ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
+ {
+ unsigned long long now;
+@@ -58,7 +62,11 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
+ bfqg_stats_clear_waiting(stats);
+ }
+
++#ifdef BFQ_MQ
++/* This should be called with the scheduler lock held. */
++#else
+ /* This should be called with the queue_lock held. */
++#endif
+ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
+ struct bfq_group *curr_bfqg)
+ {
+@@ -72,7 +80,11 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
+ bfqg_stats_mark_waiting(stats);
+ }
+
++#ifdef BFQ_MQ
++/* This should be called with the scheduler lock held. */
++#else
+ /* This should be called with the queue_lock held. */
++#endif
+ static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
+ {
+ unsigned long long now;
+@@ -198,14 +210,43 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
+
+ static void bfqg_get(struct bfq_group *bfqg)
+ {
+- return blkg_get(bfqg_to_blkg(bfqg));
++#ifdef BFQ_MQ
++ bfqg->ref++;
++#else
++ blkg_get(bfqg_to_blkg(bfqg));
++#endif
+ }
+
+ static void bfqg_put(struct bfq_group *bfqg)
+ {
+- return blkg_put(bfqg_to_blkg(bfqg));
++#ifdef BFQ_MQ
++ bfqg->ref--;
++
++ BUG_ON(bfqg->ref < 0);
++ if (bfqg->ref == 0)
++ kfree(bfqg);
++#else
++ blkg_put(bfqg_to_blkg(bfqg));
++#endif
++}
++
++#ifdef BFQ_MQ
++static void bfqg_and_blkg_get(struct bfq_group *bfqg)
++{
++ /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
++ bfqg_get(bfqg);
++
++ blkg_get(bfqg_to_blkg(bfqg));
+ }
+
++static void bfqg_and_blkg_put(struct bfq_group *bfqg)
++{
++ bfqg_put(bfqg);
++
++ blkg_put(bfqg_to_blkg(bfqg));
++}
++#endif
++
+ static void bfqg_stats_update_io_add(struct bfq_group *bfqg,
+ struct bfq_queue *bfqq,
+ unsigned int op)
+@@ -310,7 +351,15 @@ static void bfq_init_entity(struct bfq_entity *entity,
+ if (bfqq) {
+ bfqq->ioprio = bfqq->new_ioprio;
+ bfqq->ioprio_class = bfqq->new_ioprio_class;
++#ifdef BFQ_MQ
++ /*
++ * Make sure that bfqg and its associated blkg do not
++ * disappear before entity.
++ */
++ bfqg_and_blkg_get(bfqg);
++#else
+ bfqg_get(bfqg);
++#endif
+ }
+ entity->parent = bfqg->my_entity; /* NULL for root group */
+ entity->sched_data = &bfqg->sched_data;
+@@ -397,6 +446,10 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
+ return NULL;
+ }
+
++#ifdef BFQ_MQ
++ /* see comments in bfq_bic_update_cgroup for why refcounting */
++ bfqg_get(bfqg);
++#endif
+ return &bfqg->pd;
+ }
+
+@@ -432,7 +485,11 @@ static void bfq_pd_free(struct blkg_policy_data *pd)
+ struct bfq_group *bfqg = pd_to_bfqg(pd);
+
+ bfqg_stats_exit(&bfqg->stats);
+- return kfree(bfqg);
++#ifdef BFQ_MQ
++ bfqg_put(bfqg);
++#else
++ kfree(bfqg);
++#endif
+ }
+
+ static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
+@@ -516,9 +573,16 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
+ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
+ * it on the new one. Avoid putting the entity on the old group idle tree.
+ *
++#ifdef BFQ_MQ
++ * Must be called under the scheduler lock, to make sure that the blkg
++ * owning @bfqg does not disappear (see comments in
++ * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
++ * objects).
++#else
+ * Must be called under the queue lock; the cgroup owning @bfqg must
+ * not disappear (by now this just means that we are called under
+ * rcu_read_lock()).
++#endif
+ */
+ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ struct bfq_group *bfqg)
+@@ -555,16 +619,20 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ entity->tree);
+ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
+ }
++#ifdef BFQ_MQ
++ bfqg_and_blkg_put(bfqq_group(bfqq));
++#else
+ bfqg_put(bfqq_group(bfqq));
++#endif
+
+- /*
+- * Here we use a reference to bfqg. We don't need a refcounter
+- * as the cgroup reference will not be dropped, so that its
+- * destroy() callback will not be invoked.
+- */
+ entity->parent = bfqg->my_entity;
+ entity->sched_data = &bfqg->sched_data;
++#ifdef BFQ_MQ
++ /* pin down bfqg and its associated blkg */
++ bfqg_and_blkg_get(bfqg);
++#else
+ bfqg_get(bfqg);
++#endif
+
+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_busy(bfqq));
+ if (bfq_bfqq_busy(bfqq)) {
+@@ -585,8 +653,14 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * @bic: the bic to move.
+ * @blkcg: the blk-cgroup to move to.
+ *
++#ifdef BFQ_MQ
++ * Move bic to blkcg, assuming that bfqd->lock is held; which makes
++ * sure that the reference to cgroup is valid across the call (see
++ * comments in bfq_bic_update_cgroup on this issue)
++#else
+ * Move bic to blkcg, assuming that bfqd->queue is locked; the caller
+ * has to make sure that the reference to cgroup is valid across the call.
++#endif
+ *
+ * NOTE: an alternative approach might have been to store the current
+ * cgroup in bfqq and getting a reference to it, reducing the lookup
+@@ -645,6 +719,59 @@ static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
+ goto out;
+
+ bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
++#ifdef BFQ_MQ
++ /*
++ * Update blkg_path for bfq_log_* functions. We cache this
++ * path, and update it here, for the following
++ * reasons. Operations on blkg objects in blk-cgroup are
++ * protected with the request_queue lock, and not with the
++ * lock that protects the instances of this scheduler
++ * (bfqd->lock). This exposes BFQ to the following sort of
++ * race.
++ *
++ * The blkg_lookup performed in bfq_get_queue, protected
++ * through rcu, may happen to return the address of a copy of
++ * the original blkg. If this is the case, then the
++ * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
++ * the blkg, is useless: it does not prevent blk-cgroup code
++ * from destroying both the original blkg and all objects
++ * directly or indirectly referred by the copy of the
++ * blkg.
++ *
++ * On the bright side, destroy operations on a blkg invoke, as
++ * a first step, hooks of the scheduler associated with the
++ * blkg. And these hooks are executed with bfqd->lock held for
++ * BFQ. As a consequence, for any blkg associated with the
++ * request queue this instance of the scheduler is attached
++ * to, we are guaranteed that such a blkg is not destroyed, and
++ * that all the pointers it contains are consistent, while we
++ * are holding bfqd->lock. A blkg_lookup performed with
++ * bfqd->lock held then returns a fully consistent blkg, which
++ * remains consistent until this lock is held.
++ *
++ * Thanks to the last fact, and to the fact that: (1) bfqg has
++ * been obtained through a blkg_lookup in the above
++ * assignment, and (2) bfqd->lock is being held, here we can
++ * safely use the policy data for the involved blkg (i.e., the
++ * field bfqg->pd) to get to the blkg associated with bfqg,
++ * and then we can safely use any field of blkg. After we
++ * release bfqd->lock, even just getting blkg through this
++ * bfqg may cause dangling references to be traversed, as
++ * bfqg->pd may not exist any more.
++ *
++ * In view of the above facts, here we cache, in the bfqg, any
++ * blkg data we may need for this bic, and for its associated
++ * bfq_queue. As of now, we need to cache only the path of the
++ * blkg, which is used in the bfq_log_* functions.
++ *
++ * Finally, note that bfqg itself needs to be protected from
++ * destruction on the blkg_free of the original blkg (which
++ * invokes bfq_pd_free). We use an additional private
++ * refcounter for bfqg, to let it disappear only after no
++ * bfq_queue refers to it any longer.
++ */
++ blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
++#endif
+ bic->blkcg_serial_nr = serial_nr;
+ out:
+ rcu_read_unlock();
+@@ -682,8 +809,6 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
+ * @bfqd: the device data structure with the root group.
+ * @bfqg: the group to move from.
+ * @st: the service tree with the entities.
+- *
+- * Needs queue_lock to be taken and reference to be valid over the call.
+ */
+ static void bfq_reparent_active_entities(struct bfq_data *bfqd,
+ struct bfq_group *bfqg,
+@@ -736,6 +861,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ #ifdef BFQ_MQ
+ spin_lock_irqsave(&bfqd->lock, flags);
+ #endif
++
+ /*
+ * Empty all service_trees belonging to this group before
+ * deactivating the group itself.
+@@ -746,8 +872,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ /*
+ * The idle tree may still contain bfq_queues belonging
+ * to exited task because they never migrated to a different
+- * cgroup from the one being destroyed now. No one else
+- * can access them so it's safe to act without any lock.
++ * cgroup from the one being destroyed now.
+ */
+ bfq_flush_idle_tree(st);
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index a9e3406fef06..4eb668eeacdc 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4073,7 +4073,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+
+ kmem_cache_free(bfq_pool, bfqq);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+- bfqg_put(bfqg);
++ bfqg_and_blkg_put(bfqg);
+ #endif
+ }
+
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 36ee24a87dda..77ab0f22ed22 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -695,23 +695,17 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+- char __pbuf[128]; \
+- \
+- blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
+ pr_crit("%s bfq%d%c %s " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+ (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- __pbuf, ##args); \
++ bfqq_group(bfqq)->blkg_path, ##args); \
+ } while (0)
+
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
+- char __pbuf[128]; \
+- \
+- blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
+ pr_crit("%s %s " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+- __pbuf, ##args); \
++ bfqg->blkg_path, ##args); \
+ } while (0)
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+@@ -736,20 +730,14 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+- char __pbuf[128]; \
+- \
+- blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \
+ (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- __pbuf, ##args); \
++ bfqq_group(bfqq)->blkg_path, ##args); \
+ } while (0)
+
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
+- char __pbuf[128]; \
+- \
+- blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
+- blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
++ blk_add_trace_msg((bfqd)->queue, "%s " fmt, bfqg->blkg_path, ##args);\
+ } while (0)
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+@@ -860,6 +848,12 @@ struct bfq_group {
+ /* must be the first member */
+ struct blkg_policy_data pd;
+
++ /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
++ char blkg_path[128];
++
++ /* reference counter (see comments in bfq_bic_update_cgroup) */
++ int ref;
++
+ struct bfq_entity entity;
+ struct bfq_sched_data sched_data;
+
+
+From c9137b749aceef6c2dde88e99b2fc978d5952e76 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Sat, 17 Jun 2017 11:18:11 +0200
+Subject: [PATCH 28/51] bfq-mq: fix macro name in conditional invocation of
+ policy_unregister
+
+This commit fixes the name of the macro in the conditional group that
+invokes blkcg_policy_unregister in bfq_exit for bfq-mq. Because of
+this error, blkcg_policy_unregister was never invoked.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 4eb668eeacdc..bc1de3f70ea8 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -5669,7 +5669,7 @@ static int __init bfq_init(void)
+ static void __exit bfq_exit(void)
+ {
+ elv_unregister(&iosched_bfq_mq);
+-#ifdef CONFIG_BFQ_GROUP_ENABLED
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+ #endif
+ bfq_slab_kill();
+
+From c7ceb37496f63b2dba4d06946ab85ec97b87bfb5 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 5 Jul 2017 11:48:17 +0200
+Subject: [PATCH 29/51] Port of "blk-mq-sched: unify request finished methods"
+
+No need to have two different callouts of bfq vs kyber.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+---
+ block/bfq-mq-iosched.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index bc1de3f70ea8..2598602a0b10 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4753,7 +4753,7 @@ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
+ bfq_put_queue(bfqq);
+ }
+
+-static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
++static void bfq_finish_request(struct request *rq)
+ {
+ struct bfq_queue *bfqq;
+ struct bfq_data *bfqd;
+@@ -4814,7 +4814,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+
+ assert_spin_locked(&bfqd->lock);
+ if (!RB_EMPTY_NODE(&rq->rb_node))
+- bfq_remove_request(q, rq);
++ bfq_remove_request(rq->q, rq);
+ bfq_put_rq_priv_body(bfqq);
+ }
+
+@@ -5558,7 +5558,7 @@ static struct elv_fs_entry bfq_attrs[] = {
+ static struct elevator_type iosched_bfq_mq = {
+ .ops.mq = {
+ .get_rq_priv = bfq_get_rq_private,
+- .put_rq_priv = bfq_put_rq_private,
++ .finish_request = bfq_finish_request,
+ .exit_icq = bfq_exit_icq,
+ .insert_requests = bfq_insert_requests,
+ .dispatch_request = bfq_dispatch_request,
+
+From 12bef026fe114ab5e2e284772ddc52a8be83fdbc Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 5 Jul 2017 11:54:57 +0200
+Subject: [PATCH 30/51] Port of "bfq-iosched: fix NULL ioc check in
+ bfq_get_rq_private"
+
+icq_to_bic is a container_of operation, so we need to check for NULL
+before it. Also move the check outside the spinlock while we're at
+it.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+---
+ block/bfq-mq-iosched.c | 15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 2598602a0b10..c57774a60911 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4903,16 +4903,17 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+ {
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+- struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
++ struct bfq_io_cq *bic;
+ const int is_sync = rq_is_sync(rq);
+ struct bfq_queue *bfqq;
+ bool bfqq_already_existing = false, split = false;
+ bool new_queue = false;
+
+- spin_lock_irq(&bfqd->lock);
++ if (!rq->elv.icq)
++ return 1;
++ bic = icq_to_bic(rq->elv.icq);
+
+- if (!bic)
+- goto queue_fail;
++ spin_lock_irq(&bfqd->lock);
+
+ bfq_check_ioprio_change(bic, bio);
+
+@@ -4980,13 +4981,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ rq->rq_flags |= RQF_GOT;
+ spin_unlock_irq(&bfqd->lock);
+-
+ return 0;
+-
+-queue_fail:
+- spin_unlock_irq(&bfqd->lock);
+-
+- return 1;
+ }
+
+ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+
+From 633e5711347df1bf4ca935fd0aa9118a0054f75d Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 5 Jul 2017 12:02:16 +0200
+Subject: [PATCH 31/51] Port of "blk-mq-sched: unify request prepare methods"
+
+This patch makes sure we always allocate requests in the core blk-mq
+code and use a common prepare_request method to initialize them for
+both mq I/O schedulers. For Kyber and additional limit_depth method
+is added that is called before allocating the request.
+
+Also because none of the intializations can really fail the new method
+does not return an error - instead the bfq finish method is hardened
+to deal with the no-IOC case.
+
+Last but not least this removes the abuse of RQF_QUEUE by the blk-mq
+scheduling code as RQF_ELFPRIV is all that is needed now.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+---
+ block/bfq-mq-iosched.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index c57774a60911..49ffca1ad6e7 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4760,6 +4760,10 @@ static void bfq_finish_request(struct request *rq)
+ struct bfq_io_cq *bic;
+
+ BUG_ON(!rq);
++
++ if (!rq->elv.icq)
++ return;
++
+ bfqq = RQ_BFQQ(rq);
+ BUG_ON(!bfqq);
+
+@@ -4899,9 +4903,9 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
+ /*
+ * Allocate bfq data structures associated with this request.
+ */
+-static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+- struct bio *bio)
++static void bfq_prepare_request(struct request *rq, struct bio *bio)
+ {
++ struct request_queue *q = rq->q;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_io_cq *bic;
+ const int is_sync = rq_is_sync(rq);
+@@ -4910,7 +4914,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+ bool new_queue = false;
+
+ if (!rq->elv.icq)
+- return 1;
++ return;
+ bic = icq_to_bic(rq->elv.icq);
+
+ spin_lock_irq(&bfqd->lock);
+@@ -4981,7 +4985,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
+
+ rq->rq_flags |= RQF_GOT;
+ spin_unlock_irq(&bfqd->lock);
+- return 0;
+ }
+
+ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
+@@ -5552,7 +5555,7 @@ static struct elv_fs_entry bfq_attrs[] = {
+
+ static struct elevator_type iosched_bfq_mq = {
+ .ops.mq = {
+- .get_rq_priv = bfq_get_rq_private,
++ .prepare_request = bfq_prepare_request,
+ .finish_request = bfq_finish_request,
+ .exit_icq = bfq_exit_icq,
+ .insert_requests = bfq_insert_requests,
+
+From 5a321acfce282c3e58ac63582faf6f928ad17f27 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 5 Jul 2017 12:43:22 +0200
+Subject: [PATCH 32/51] Add list of bfq instances to documentation
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ Documentation/block/bfq-iosched.txt | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index 3d6951d63489..8ce6b9a9bacd 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -11,6 +11,15 @@ controllers), BFQ's main features are:
+ groups (switching back to time distribution when needed to keep
+ throughput high).
+
++If bfq-mq patches have been applied, then the following three
++instances of BFQ are available (otherwise only the first instance):
++- bfq: mainline version of BFQ, for blk-mq
++- bfq-mq: development version of BFQ for blk-mq; this version contains
++ also all latest features not yet landed in mainline, plus many
++ safety checks
++- bfq: BFQ for legacy blk; also this version contains both latest
++ features and safety checks
++
+ In its default configuration, BFQ privileges latency over
+ throughput. So, when needed for achieving a lower latency, BFQ builds
+ schedules that may lead to a lower throughput. If your main or only
+@@ -27,7 +36,7 @@ sequential I/O (e.g., 8-12 GB/s if I/O requests are 256 KB large), and
+ to 120-200 MB/s with 4KB random I/O. BFQ is currently being tested on
+ multi-queue devices too.
+
+-The table of contents follow. Impatients can just jump to Section 3.
++The table of contents follows. Impatients can just jump to Section 3.
+
+ CONTENTS
+
+
+From 9f2e5b27227fd9254cc258572dc2d4531838c30b Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 5 Jul 2017 16:28:00 +0200
+Subject: [PATCH 33/51] bfq-sq: fix prefix of names of cgroups parameters
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ Documentation/block/bfq-iosched.txt | 12 +++++++-----
+ block/bfq-cgroup-included.c | 2 +-
+ 2 files changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index 8ce6b9a9bacd..965d82f94db9 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -503,10 +503,12 @@ To get proportional sharing of bandwidth with BFQ for a given device,
+ BFQ must of course be the active scheduler for that device.
+
+ Within each group directory, the names of the files associated with
+-BFQ-specific cgroup parameters and stats begin with the "bfq."
+-prefix. So, with cgroups-v1 or cgroups-v2, the full prefix for
+-BFQ-specific files is "blkio.bfq." or "io.bfq." For example, the group
+-parameter to set the weight of a group with BFQ is blkio.bfq.weight
++BFQ-specific cgroup parameters and stats begin with the "bfq.",
++"bfq-sq." or "bfq-mq." prefix, depending on which instance of bfq you
++want to use. So, with cgroups-v1 or cgroups-v2, the full prefix for
++BFQ-specific files is "blkio.bfqX." or "io.bfqX.", where X can be ""
++(i.e., null string), "-sq" or "-mq". For example, the group parameter
++to set the weight of a group with the mainline BFQ is blkio.bfq.weight
+ or io.bfq.weight.
+
+ Parameters to set
+@@ -514,7 +516,7 @@ Parameters to set
+
+ For each group, there is only the following parameter to set.
+
+-weight (namely blkio.bfq.weight or io.bfq-weight): the weight of the
++weight (namely blkio.bfqX.weight or io.bfqX.weight): the weight of the
+ group inside its parent. Available values: 1..10000 (default 100). The
+ linear mapping between ioprio and weights, described at the beginning
+ of the tunable section, is still valid, but all weights higher than
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index d903393ee78a..631e53d9150d 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -1124,7 +1124,7 @@ bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
+ #ifdef BFQ_MQ
+ #define BFQ_CGROUP_FNAME(param) "bfq-mq."#param
+ #else
+-#define BFQ_CGROUP_FNAME(param) "bfq."#param
++#define BFQ_CGROUP_FNAME(param) "bfq-sq."#param
+ #endif
+
+ static struct cftype bfq_blkcg_legacy_files[] = {
+
+From 92b42df8166939ccf26aa450125b5b575cf6d505 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 5 Jul 2017 21:08:32 +0200
+Subject: [PATCH 34/51] Add to documentation that bfq-mq and bfq-sq contain
+ last fixes too
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ Documentation/block/bfq-iosched.txt | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index 965d82f94db9..0e59f1c9d30e 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -15,10 +15,10 @@ If bfq-mq patches have been applied, then the following three
+ instances of BFQ are available (otherwise only the first instance):
+ - bfq: mainline version of BFQ, for blk-mq
+ - bfq-mq: development version of BFQ for blk-mq; this version contains
+- also all latest features not yet landed in mainline, plus many
++ also all latest features and fixes not yet landed in mainline, plus many
+ safety checks
+-- bfq: BFQ for legacy blk; also this version contains both latest
+- features and safety checks
++- bfq: BFQ for legacy blk; also this version contains latest features
++ and fixes, as well as safety checks
+
+ In its default configuration, BFQ privileges latency over
+ throughput. So, when needed for achieving a lower latency, BFQ builds
+
+From 7f9bdd433b848d4f53c167258bf4d0b3f1ae1923 Mon Sep 17 00:00:00 2001
+From: Lee Tibbert <lee.tibbert@gmail.com>
+Date: Wed, 19 Jul 2017 10:28:32 -0400
+Subject: [PATCH 35/51] Improve most frequently used no-logging path
+
+This patch originated as a fix for compiler unused-variable warnings
+issued when compiling bfq-mq with logging disabled (both
+CONFIG_BLK_DEV_IO_TRACE and CONFIG_BFQ_REDIRECT_TO_CONSOLE
+undefined).
+
+It turns out to also have benefits for the bfq-sq path as well.
+
+In most performance sensitive production builds blktrace_api logging
+will probably be turned off, so it is worth making the no-logging path
+compile without warnings. Any performance benefit is a bonus.
+
+Thank you to T. B. on the bfq-iosched@googlegroups.com list
+for ((void) (bfqq)) simplification/suggestion/improvement. All bugs
+and unclear descriptions are my own doing.
+
+The discussion below is based on the gcc compiler with optimization
+level of at least 02. Lower optimization levels are unlikely to
+remove no-op instruction equivalents.
+
+Provide three improvements in this likely case.
+
+ 1) Fix multiple occurrences of an unused-variable warning
+ issued when compiling bfq-mq with no logging. The warning
+ occurred each time the bfq_log_bfqg macro was expanded inside
+ a code block such as the following snippet from
+ block/bfq-sched.c, line 139 and few following, lightly edited for
+ indentation in order to pass checkpatch.pl maximum line lengths.
+
+else {
+ struct bfq_group *bfqg =
+ container_of(next_in_service,
+ struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+ "update_next_in_service: chosen this entity");
+ }
+
+ Previously bfq-mq.h expanded bfq_log_bfqg to blk_add_trace_msg.
+ When both bfq console logging and blktrace_api logging are
+ disabled, include/linux/blktrace_api expands to
+ do { } while (0), leaving the code block local variable unused.
+
+ bfq_log_bfqq() had similar behavior but is never called with
+ a potentially unused variable. This patch fixes that macro for
+ consistency.
+
+ bfq-sq.h (single queue) with blktrace_api enabled, and the bfq
+ console logging macros have code paths which not trigger this
+ warning.
+
+ kernel.org (4.12 & 4.13) bfq (bfq-iosched.h) could trigger
+ the warning but no code does so now. This patch fixes
+ bfq-iosched.h for consistency.
+
+ The style above enables a software engineering approach where
+ complex expressions are moved to a local variable before the
+ bfq_log* call. This makes it easier to read the expression and
+ use breakpoints to verify it. bfq-mq uses this approach in
+ several places.
+
+ New bfq_log* macros are provided for the no-logging case.
+ I touch only the second argument, because current code never
+ uses the local variable approach with the first or other
+ arguments. I tried to balance consistency with simplicity.
+
+ 2) For bfq-sq, reduce to zero, the number of instructions executed
+ when no logging is configured. No sense marshaling arguments
+ which are never going to be used.
+
+ On a trial V8R11 builds, this reduced the size of bfq-iosched.o
+ by 14.3 KiB. The size went from 70304 to 55664 bytes.
+
+ bfq-mq and kernel.org bfq code size does not change because
+ existing macros already optimize to zero bytes when not logging.
+ The current changes maintains consistency with the bfq-sq path
+ and makes the bfq-mq & bfq no-logging paths resistant to future
+ logging path macro changes which might cause generated code.
+
+ 3) Slightly reduce compile time of all bfq variants by including
+ blktrace_api.h only when it will be used.
+
+Signed-off-by: Lee Tibbert <lee.tibbert@gmail.com>
+---
+ block/bfq-mq.h | 18 +++++++++++++++++-
+ block/bfq.h | 18 +++++++++++++++++-
+ 2 files changed, 34 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 77ab0f22ed22..7ed2cc29be57 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -15,7 +15,6 @@
+ #ifndef _BFQ_H
+ #define _BFQ_H
+
+-#include <linux/blktrace_api.h>
+ #include <linux/hrtimer.h>
+ #include <linux/blk-cgroup.h>
+
+@@ -725,6 +724,21 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ ##args)
+
+ #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
++
++#if !defined(CONFIG_BLK_DEV_IO_TRACE)
++
++/* Avoid possible "unused-variable" warning. See commit message. */
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) ((void) (bfqq))
++
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) ((void) (bfqg))
++
++#define bfq_log(bfqd, fmt, args...) do {} while (0)
++
++#else /* CONFIG_BLK_DEV_IO_TRACE */
++
++#include <linux/blktrace_api.h>
++
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+@@ -752,6 +766,8 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+ #define bfq_log(bfqd, fmt, args...) \
+ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++
++#endif /* CONFIG_BLK_DEV_IO_TRACE */
+ #endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+
+ /* Expiration reasons. */
+diff --git a/block/bfq.h b/block/bfq.h
+index 53954d1b87f8..15d326f466b7 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -15,7 +15,6 @@
+ #ifndef _BFQ_H
+ #define _BFQ_H
+
+-#include <linux/blktrace_api.h>
+ #include <linux/hrtimer.h>
+ #include <linux/blk-cgroup.h>
+
+@@ -725,6 +724,21 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ ##args)
+
+ #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
++
++#if !defined(CONFIG_BLK_DEV_IO_TRACE)
++
++/* Avoid possible "unused-variable" warning. See commit message. */
++
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) ((void) (bfqq))
++
++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) ((void) (bfqg))
++
++#define bfq_log(bfqd, fmt, args...) do {} while (0)
++
++#else /* CONFIG_BLK_DEV_IO_TRACE */
++
++#include <linux/blktrace_api.h>
++
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+@@ -759,6 +773,8 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+ #define bfq_log(bfqd, fmt, args...) \
+ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++
++#endif /* CONFIG_BLK_DEV_IO_TRACE */
+ #endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+
+ /* Expiration reasons. */
+
+From f11a0e751e741bf94c6a48234824d50b3c0100ad Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 9 Aug 2017 16:40:39 +0200
+Subject: [PATCH 36/51] bfq-sq: fix commit "Remove all get and put of I/O
+ contexts" in branch bfq-mq
+
+The commit "Remove all get and put of I/O contexts" erroneously removed
+the reset of the field in_service_bic for bfq-sq. This commit re-adds
+that missing reset.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-sched.c | 7 +++++++
+ block/bfq-sq-iosched.c | 1 +
+ 2 files changed, 8 insertions(+)
+
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 9c4e6797d8c9..7425824c26b8 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -1904,6 +1904,13 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+ struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
+ struct bfq_entity *entity = in_serv_entity;
+
++#ifndef BFQ_MQ
++ if (bfqd->in_service_bic) {
++ put_io_context(bfqd->in_service_bic->icq.ioc);
++ bfqd->in_service_bic = NULL;
++ }
++#endif
++
+ bfq_clear_bfqq_wait_request(in_serv_bfqq);
+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
+ bfqd->in_service_queue = NULL;
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 25da0d1c0622..e1960bf149d8 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -3765,6 +3765,7 @@ static int bfq_dispatch_request(struct bfq_data *bfqd,
+ if (!bfqd->in_service_bic) {
+ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
+ bfqd->in_service_bic = RQ_BIC(rq);
++ BUG_ON(!bfqd->in_service_bic);
+ }
+
+ if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
+
+From eceae5457530df8598557767d7be258ca9384de4 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 9 Aug 2017 22:29:01 +0200
+Subject: [PATCH 37/51] bfq-sq-mq: make lookup_next_entity push up vtime on
+ expirations
+
+To provide a very smooth service, bfq starts to serve a bfq_queue
+only if the queue is 'eligible', i.e., if the same queue would
+have started to be served in the ideal, perfectly fair system that
+bfq simulates internally. This is obtained by associating each
+queue with a virtual start time, and by computing a special system
+virtual time quantity: a queue is eligible only if the system
+virtual time has reached the virtual start time of the
+queue. Finally, bfq guarantees that, when a new queue must be set
+in service, there is always at least one eligible entity for each
+active parent entity in the scheduler. To provide this guarantee,
+the function __bfq_lookup_next_entity pushes up, for each parent
+entity on which it is invoked, the system virtual time to the
+minimum among the virtual start times of the entities in the
+active tree for the parent entity (more precisely, the push up
+occurs if the system virtual time happens to be lower than all
+such virtual start times).
+
+There is however a circumstance in which __bfq_lookup_next_entity
+cannot push up the system virtual time for a parent entity, even
+if the system virtual time is lower than the virtual start times
+of all the child entities in the active tree. It happens if one of
+the child entities is in service. In fact, in such a case, there
+is already an eligible entity, the in-service one, even if it may
+not be not present in the active tree (because in-service entities
+may be removed from the active tree).
+
+Unfortunately, in the last re-design of the
+hierarchical-scheduling engine, the reset of the pointer to the
+in-service entity for a given parent entity--reset to be done as a
+consequence of the expiration of the in-service entity--always
+happens after the function __bfq_lookup_next_entity has been
+invoked. This causes the function to think that there is still an
+entity in service for the parent entity, and then that the system
+virtual time cannot be pushed up, even if actually such a
+no-more-in-service entity has already been properly reinserted
+into the active tree (or in some other tree if no more
+active). Yet, the system virtual time *had* to be pushed up, to be
+ready to correctly choose the next queue to serve. Because of the
+lack of this push up, bfq may wrongly set in service a queue that
+had been speculatively pre-computed as the possible
+next-in-service queue, but that would no more be the one to serve
+after the expiration and the reinsertion into the active trees of
+the previously in-service entities.
+
+This commit addresses this issue by making
+__bfq_lookup_next_entity properly push up the system virtual time
+if an expiration is occurring.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 4 +--
+ block/bfq-sched.c | 77 ++++++++++++++++++++++++++++++++------------------
+ block/bfq-sq-iosched.c | 4 +--
+ 3 files changed, 53 insertions(+), 32 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 49ffca1ad6e7..b5c848650375 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -682,7 +682,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd,
+ entity->budget = new_budget;
+ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
+ new_budget);
+- bfq_requeue_bfqq(bfqd, bfqq);
++ bfq_requeue_bfqq(bfqd, bfqq, false);
+ }
+ }
+
+@@ -2822,7 +2822,7 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+
+ bfq_del_bfqq_busy(bfqd, bfqq, true);
+ } else {
+- bfq_requeue_bfqq(bfqd, bfqq);
++ bfq_requeue_bfqq(bfqd, bfqq, true);
+ /*
+ * Resort priority tree of potential close cooperators.
+ */
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 7425824c26b8..f3001af37256 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -33,7 +33,8 @@ static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree)
+ return rb_entry(node, struct bfq_entity, rb_node);
+ }
+
+-static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd);
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++ bool expiration);
+
+ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
+
+@@ -43,6 +44,8 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
+ * @new_entity: if not NULL, pointer to the entity whose activation,
+ * requeueing or repositionig triggered the invocation of
+ * this function.
++ * @expiration: id true, this function is being invoked after the
++ * expiration of the in-service entity
+ *
+ * This function is called to update sd->next_in_service, which, in
+ * its turn, may change as a consequence of the insertion or
+@@ -61,7 +64,8 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
+ * entity.
+ */
+ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+- struct bfq_entity *new_entity)
++ struct bfq_entity *new_entity,
++ bool expiration)
+ {
+ struct bfq_entity *next_in_service = sd->next_in_service;
+ struct bfq_queue *bfqq;
+@@ -120,7 +124,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ if (replace_next)
+ next_in_service = new_entity;
+ } else /* invoked because of a deactivation: lookup needed */
+- next_in_service = bfq_lookup_next_entity(sd);
++ next_in_service = bfq_lookup_next_entity(sd, expiration);
+
+ if (next_in_service) {
+ parent_sched_may_change = !sd->next_in_service ||
+@@ -1291,10 +1295,12 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
+ * @requeue: true if this is a requeue, which implies that bfqq is
+ * being expired; thus ALL its ancestors stop being served and must
+ * therefore be requeued
++ * @expiration: true if this function is being invoked in the expiration path
++ * of the in-service queue
+ */
+ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
+ bool non_blocking_wait_rq,
+- bool requeue)
++ bool requeue, bool expiration)
+ {
+ struct bfq_sched_data *sd;
+
+@@ -1307,7 +1313,8 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
+ RB_EMPTY_ROOT(&(sd->service_tree+1)->active) &&
+ RB_EMPTY_ROOT(&(sd->service_tree+2)->active));
+
+- if (!bfq_update_next_in_service(sd, entity) && !requeue) {
++ if (!bfq_update_next_in_service(sd, entity, expiration) &&
++ !requeue) {
+ BUG_ON(!sd->next_in_service);
+ break;
+ }
+@@ -1373,6 +1380,8 @@ static bool __bfq_deactivate_entity(struct bfq_entity *entity,
+ * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
+ * @entity: the entity to deactivate.
+ * @ins_into_idle_tree: true if the entity can be put into the idle tree
++ * @expiration: true if this function is being invoked in the expiration path
++ * of the in-service queue
+ */
+ static void bfq_deactivate_entity(struct bfq_entity *entity,
+ bool ins_into_idle_tree,
+@@ -1417,7 +1426,7 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
+ * then, since entity has just been
+ * deactivated, a new one must be found.
+ */
+- bfq_update_next_in_service(sd, NULL);
++ bfq_update_next_in_service(sd, NULL, expiration);
+
+ if (sd->next_in_service || sd->in_service_entity) {
+ /*
+@@ -1495,7 +1504,7 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
+ "invoking udpdate_next for this entity");
+ }
+ #endif
+- if (!bfq_update_next_in_service(sd, entity) &&
++ if (!bfq_update_next_in_service(sd, entity, expiration) &&
+ !expiration)
+ /*
+ * next_in_service unchanged or not causing
+@@ -1524,7 +1533,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "calc_vtime_jump: new value %llu",
+- root_entity->min_start);
++ ((root_entity->min_start>>10)*1000)>>12);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+@@ -1533,7 +1542,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+ "calc_vtime_jump: new value %llu",
+- root_entity->min_start);
++ ((root_entity->min_start>>10)*1000)>>12);
+ }
+ #endif
+ return root_entity->min_start;
+@@ -1615,17 +1624,9 @@ static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
+ * 3) is idle.
+ */
+ static struct bfq_entity *
+-__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service
+-#if 0
+- , bool force
+-#endif
+- )
++__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
+ {
+- struct bfq_entity *entity
+-#if 0
+- , *new_next_in_service = NULL
+-#endif
+- ;
++ struct bfq_entity *entity;
+ u64 new_vtime;
+ struct bfq_queue *bfqq;
+
+@@ -1667,8 +1668,9 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "__lookup_next: start %llu vtime %llu st %p",
++ "__lookup_next: start %llu vtime %llu (%llu) st %p",
+ ((entity->start>>10)*1000)>>12,
++ ((st->vtime>>10)*1000)>>12,
+ ((new_vtime>>10)*1000)>>12, st);
+ }
+ #endif
+@@ -1681,12 +1683,14 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service
+ /**
+ * bfq_lookup_next_entity - return the first eligible entity in @sd.
+ * @sd: the sched_data.
++ * @expiration: true if we are on the expiration path of the in-service queue
+ *
+ * This function is invoked when there has been a change in the trees
+- * for sd, and we need know what is the new next entity after this
+- * change.
++ * for sd, and we need to know what is the new next entity to serve
++ * after this change.
+ */
+-static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd)
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++ bool expiration)
+ {
+ struct bfq_service_tree *st = sd->service_tree;
+ struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1);
+@@ -1716,8 +1720,24 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd)
+ * class, unless the idle class needs to be served.
+ */
+ for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) {
++ /*
++ * If expiration is true, then bfq_lookup_next_entity
++ * is being invoked as a part of the expiration path
++ * of the in-service queue. In this case, even if
++ * sd->in_service_entity is not NULL,
++ * sd->in_service_entiy at this point is actually not
++ * in service any more, and, if needed, has already
++ * been properly queued or requeued into the right
++ * tree. The reason why sd->in_service_entity is still
++ * not NULL here, even if expiration is true, is that
++ * sd->in_service_entiy is reset as a last step in the
++ * expiration path. So, if expiration is true, tell
++ * __bfq_lookup_next_entity that there is no
++ * sd->in_service_entity.
++ */
+ entity = __bfq_lookup_next_entity(st + class_idx,
+- sd->in_service_entity);
++ sd->in_service_entity &&
++ !expiration);
+
+ if (entity)
+ break;
+@@ -1891,7 +1911,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ for_each_entity(entity) {
+ struct bfq_sched_data *sd = entity->sched_data;
+
+- if(!bfq_update_next_in_service(sd, NULL))
++ if (!bfq_update_next_in_service(sd, NULL, false))
+ break;
+ }
+
+@@ -1951,16 +1971,17 @@ static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ entity->on_st);
+
+ bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq),
+- false);
++ false, false);
+ bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
+ }
+
+-static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ bool expiration)
+ {
+ struct bfq_entity *entity = &bfqq->entity;
+
+ bfq_activate_requeue_entity(entity, false,
+- bfqq == bfqd->in_service_queue);
++ bfqq == bfqd->in_service_queue, expiration);
+ }
+
+ static void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index e1960bf149d8..42393ab889a9 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -644,7 +644,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd,
+ entity->budget = new_budget;
+ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
+ new_budget);
+- bfq_requeue_bfqq(bfqd, bfqq);
++ bfq_requeue_bfqq(bfqd, bfqq, false);
+ }
+ }
+
+@@ -2715,7 +2715,7 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+
+ bfq_del_bfqq_busy(bfqd, bfqq, true);
+ } else {
+- bfq_requeue_bfqq(bfqd, bfqq);
++ bfq_requeue_bfqq(bfqd, bfqq, true);
+ /*
+ * Resort priority tree of potential close cooperators.
+ */
+
+From ee9f95b24e1d88ffba4845981c2a4684aefd0245 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 9 Aug 2017 22:53:00 +0200
+Subject: [PATCH 38/51] bfq-sq-mq: remove direct switch to an entity in higher
+ class
+
+If the function bfq_update_next_in_service is invoked as a consequence
+of the activation or requeueing of an entity, say E, and finds out
+that E belongs to a higher-priority class than that of the current
+next-in-service entity, then it sets next_in_service directly to
+E. But this may lead to anomalous schedules, because E may happen not
+be eligible for service, because its virtual start time is higher than
+the system virtual time for its service tree.
+
+This commit addresses this issue by simply removing this direct
+switch.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-sched.c | 19 +++++--------------
+ 1 file changed, 5 insertions(+), 14 deletions(-)
+
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index f3001af37256..b1a59088db88 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -76,9 +76,8 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ * or repositiong of an entity that does not coincide with
+ * sd->next_in_service, then a full lookup in the active tree
+ * can be avoided. In fact, it is enough to check whether the
+- * just-modified entity has a higher priority than
+- * sd->next_in_service, or, even if it has the same priority
+- * as sd->next_in_service, is eligible and has a lower virtual
++ * just-modified entity has the same priority as
++ * sd->next_in_service, is eligible and has a lower virtual
+ * finish time than sd->next_in_service. If this compound
+ * condition holds, then the new entity becomes the new
+ * next_in_service. Otherwise no change is needed.
+@@ -94,9 +93,8 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+
+ /*
+ * If there is already a next_in_service candidate
+- * entity, then compare class priorities or timestamps
+- * to decide whether to replace sd->service_tree with
+- * new_entity.
++ * entity, then compare timestamps to decide whether
++ * to replace sd->service_tree with new_entity.
+ */
+ if (next_in_service) {
+ unsigned int new_entity_class_idx =
+@@ -104,10 +102,6 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ struct bfq_service_tree *st =
+ sd->service_tree + new_entity_class_idx;
+
+- /*
+- * For efficiency, evaluate the most likely
+- * sub-condition first.
+- */
+ replace_next =
+ (new_entity_class_idx ==
+ bfq_class_idx(next_in_service)
+@@ -115,10 +109,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ !bfq_gt(new_entity->start, st->vtime)
+ &&
+ bfq_gt(next_in_service->finish,
+- new_entity->finish))
+- ||
+- new_entity_class_idx <
+- bfq_class_idx(next_in_service);
++ new_entity->finish));
+ }
+
+ if (replace_next)
+
+From a3fdc5af40537355b68c1f0d3997c5a5fb54b9ce Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 10 Aug 2017 08:15:50 +0200
+Subject: [PATCH 39/51] bfq-sq-mq: guarantee update_next_in_service always
+ returns an eligible entity
+
+If the function bfq_update_next_in_service is invoked as a consequence
+of the activation or requeueing of an entity, say E, then it doesn't
+invoke bfq_lookup_next_entity to get the next-in-service entity. In
+contrast, it follows a shorter path: if E happens to be eligible (see
+commit "bfq-sq-mq: make lookup_next_entity push up vtime on
+expirations" for details on eligibility) and to have a lower virtual
+finish time than the current candidate as next-in-service entity, then
+E directly becomes the next-in-service entity. Unfortunately, there is
+a corner case for which this shorter path makes
+bfq_update_next_in_service choose a non eligible entity: it occurs if
+both E and the current next-in-service entity happen to be non
+eligible when bfq_update_next_in_service is invoked. In this case, E
+is not set as next-in-service, and, since bfq_lookup_next_entity is
+not invoked, the state of the parent entity is not updated so as to
+end up with an eligible entity as the proper next-in-service entity.
+
+In this respect, next-in-service is actually allowed to be non
+eligible while some queue is in service: since no system-virtual-time
+push-up can be performed in that case (see again commit "bfq-sq-mq:
+make lookup_next_entity push up vtime on expirations" for details),
+next-in-service is chosen, speculatively, as a function of the
+possible value that the system virtual time may get after a push
+up. But the correctness of the schedule breaks if next-in-service is
+still a non eligible entity when it is time to set in service the next
+entity. Unfortunately, this may happen in the above corner case.
+
+This commit fixes this problem by making bfq_update_next_in_service
+invoke bfq_lookup_next_entity not only if the above shorter path
+cannot be taken, but also if the shorter path is taken but fails to
+yield an eligible next-in-service entity.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-sched.c | 38 ++++++++++++++++++++++++++++----------
+ 1 file changed, 28 insertions(+), 10 deletions(-)
+
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index b1a59088db88..e4a2553a2d2c 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -70,6 +70,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ struct bfq_entity *next_in_service = sd->next_in_service;
+ struct bfq_queue *bfqq;
+ bool parent_sched_may_change = false;
++ bool change_without_lookup = false;
+
+ /*
+ * If this update is triggered by the activation, requeueing
+@@ -89,7 +90,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ * set to true, and left as true if
+ * sd->next_in_service is NULL.
+ */
+- bool replace_next = true;
++ change_without_lookup = true;
+
+ /*
+ * If there is already a next_in_service candidate
+@@ -102,7 +103,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ struct bfq_service_tree *st =
+ sd->service_tree + new_entity_class_idx;
+
+- replace_next =
++ change_without_lookup =
+ (new_entity_class_idx ==
+ bfq_class_idx(next_in_service)
+ &&
+@@ -112,15 +113,32 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ new_entity->finish));
+ }
+
+- if (replace_next)
++ if (change_without_lookup) {
+ next_in_service = new_entity;
+- } else /* invoked because of a deactivation: lookup needed */
++ bfqq = bfq_entity_to_bfqq(next_in_service);
++
++ if (bfqq)
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "update_next_in_service: chose without lookup");
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ else {
++ struct bfq_group *bfqg =
++ container_of(next_in_service,
++ struct bfq_group, entity);
++
++ bfq_log_bfqg((struct bfq_data*)bfqg->bfqd, bfqg,
++ "update_next_in_service: chose without lookup");
++ }
++#endif
++ }
++ }
++
++ if (!change_without_lookup) /* lookup needed */
+ next_in_service = bfq_lookup_next_entity(sd, expiration);
+
+- if (next_in_service) {
++ if (next_in_service)
+ parent_sched_may_change = !sd->next_in_service ||
+ bfq_update_parent_budget(next_in_service);
+- }
+
+ sd->next_in_service = next_in_service;
+
+@@ -1053,7 +1071,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+
+ if (bfqq) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "__activate_entity: new queue finish %llu",
++ "update_fin_time_enqueue: new queue finish %llu",
+ ((entity->finish>>10)*1000)>>12);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+@@ -1061,7 +1079,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "__activate_entity: new group finish %llu",
++ "update_fin_time_enqueue: new group finish %llu",
+ ((entity->finish>>10)*1000)>>12);
+ #endif
+ }
+@@ -1071,7 +1089,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+
+ if (bfqq) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "__activate_entity: queue %seligible in st %p",
++ "update_fin_time_enqueue: queue %seligible in st %p",
+ entity->start <= st->vtime ? "" : "non ", st);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+@@ -1079,7 +1097,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "__activate_entity: group %seligible in st %p",
++ "update_fin_time_enqueue: group %seligible in st %p",
+ entity->start <= st->vtime ? "" : "non ", st);
+ #endif
+ }
+
+From 6565e4d1aac029b6f0a5d86a4c6ef38608838eac Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 31 Aug 2017 19:24:26 +0200
+Subject: [PATCH 40/51] doc, block, bfq: fix some typos and stale sentences
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Reviewed-by: Jeremy Hickman <jeremywh7@gmail.com>
+Reviewed-by: Laurentiu Nicola <lnicola@dend.ro>
+---
+ Documentation/block/bfq-iosched.txt | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index 0e59f1c9d30e..dcfe15523da3 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -17,7 +17,7 @@ instances of BFQ are available (otherwise only the first instance):
+ - bfq-mq: development version of BFQ for blk-mq; this version contains
+ also all latest features and fixes not yet landed in mainline, plus many
+ safety checks
+-- bfq: BFQ for legacy blk; also this version contains latest features
++- bfq-sq: BFQ for legacy blk; also this version contains latest features
+ and fixes, as well as safety checks
+
+ In its default configuration, BFQ privileges latency over
+
+From 261ee8cc9f43e03d790a07184f0bcaa504ee6737 Mon Sep 17 00:00:00 2001
+From: Luca Miccio <lucmiccio@gmail.com>
+Date: Wed, 13 Sep 2017 12:03:56 +0200
+Subject: [PATCH 41/51] bfq-mq, bfq-sq: Disable writeback throttling
+
+Similarly to CFQ, BFQ has its write-throttling heuristics, and it
+is better not to combine them with further write-throttling
+heuristics of a different nature.
+So this commit disables write-back throttling for a device if BFQ
+is used as I/O scheduler for that device.
+
+Signed-off-by: Luca Miccio <lucmiccio@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
+---
+ block/bfq-mq-iosched.c | 2 ++
+ block/bfq-sq-iosched.c | 7 +++++++
+ 2 files changed, 9 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index b5c848650375..7d27d5b3befb 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -89,6 +89,7 @@
+ #include "blk-mq-tag.h"
+ #include "blk-mq-sched.h"
+ #include "bfq-mq.h"
++#include "blk-wbt.h"
+
+ /* Expiration time of sync (0) and async (1) requests, in ns. */
+ static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
+@@ -5260,6 +5261,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ bfq_init_root_group(bfqd->root_group, bfqd);
+ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
+
++ wbt_disable_default(q);
+ return 0;
+
+ out_free:
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 42393ab889a9..6fdc3b1d5bb8 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -83,6 +83,7 @@
+ #include <linux/ioprio.h>
+ #include "blk.h"
+ #include "bfq.h"
++#include "blk-wbt.h"
+
+ /* Expiration time of sync (0) and async (1) requests, in ns. */
+ static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
+@@ -4976,6 +4977,11 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ return -ENOMEM;
+ }
+
++static void bfq_registered_queue(struct request_queue *q)
++{
++ wbt_disable_default(q);
++}
++
+ static void bfq_slab_kill(void)
+ {
+ kmem_cache_destroy(bfq_pool);
+@@ -5285,6 +5291,7 @@ static struct elevator_type iosched_bfq = {
+ .elevator_may_queue_fn = bfq_may_queue,
+ .elevator_init_fn = bfq_init_queue,
+ .elevator_exit_fn = bfq_exit_queue,
++ .elevator_registered_fn = bfq_registered_queue,
+ },
+ .icq_size = sizeof(struct bfq_io_cq),
+ .icq_align = __alignof__(struct bfq_io_cq),
+
+From 40ea0aed088791da27fcfa51f3b64d1f96b0d06e Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Tue, 12 Sep 2017 16:45:53 +0200
+Subject: [PATCH 42/51] bfq-mq, bfq-sq: fix wrong init of saved start time for
+ weight raising
+
+This commit fixes a bug that causes bfq to fail to guarantee a high
+responsiveness on some drives, if there is heavy random read+write I/O
+in the background. More precisely, such a failure allowed this bug to
+be found [1], but the bug may well cause other yet unreported
+anomalies.
+
+BFQ raises the weight of the bfq_queues associated with soft real-time
+applications, to privilege the I/O, and thus reduce latency, for these
+applications. This mechanism is named soft-real-time weight raising in
+BFQ. A soft real-time period may happen to be nested into an
+interactive weight raising period, i.e., it may happen that, when a
+bfq_queue switches to a soft real-time weight-raised state, the
+bfq_queue is already being weight-raised because deemed interactive
+too. In this case, BFQ saves in a special variable
+wr_start_at_switch_to_srt, the time instant when the interactive
+weight-raising period started for the bfq_queue, i.e., the time
+instant when BFQ started to deem the bfq_queue interactive. This value
+is then used to check whether the interactive weight-raising period
+would still be in progress when the soft real-time weight-raising
+period ends. If so, interactive weight raising is restored for the
+bfq_queue. This restore is useful, in particular, because it prevents
+bfq_queues from losing their interactive weight raising prematurely,
+as a consequence of spurious, short-lived soft real-time
+weight-raising periods caused by wrong detections as soft real-time.
+
+If, instead, a bfq_queue switches to soft-real-time weight raising
+while it *is not* already in an interactive weight-raising period,
+then the variable wr_start_at_switch_to_srt has no meaning during the
+following soft real-time weight-raising period. Unfortunately the
+handling of this case is wrong in BFQ: not only the variable is not
+flagged somehow as meaningless, but it is also set to the time when
+the switch to soft real-time weight-raising occurs. This may cause an
+interactive weight-raising period to be considered mistakenly as still
+in progress, and thus a spurious interactive weight-raising period to
+start for the bfq_queue, at the end of the soft-real-time
+weight-raising period. In particular the spurious interactive
+weight-raising period will be considered as still in progress, if the
+soft-real-time weight-raising period does not last very long. The
+bfq_queue will then be wrongly privileged and, if I/O bound, will
+unjustly steal bandwidth to truly interactive or soft real-time
+bfq_queues, harming responsiveness and low latency.
+
+This commit fixes this issue by just setting wr_start_at_switch_to_srt
+to minus infinity (farthest past time instant according to jiffies
+macros): when the soft-real-time weight-raising period ends, certainly
+no interactive weight-raising period will be considered as still in
+progress.
+
+[1] Background I/O Type: Random - Background I/O mix: Reads and writes
+- Application to start: LibreOffice Writer in
+http://www.phoronix.com/scan.php?page=news_item&px=Linux-4.13-IO-Laptop
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
+Tested-by: Lee Tibbert <lee.tibbert@gmail.com>
+Tested-by: Mirko Montanari <mirkomontanari91@gmail.com>
+---
+ block/bfq-mq-iosched.c | 50 +++++++++++++++++++++++++++++++-------------------
+ block/bfq-sq-iosched.c | 50 +++++++++++++++++++++++++++++++-------------------
+ 2 files changed, 62 insertions(+), 38 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 7d27d5b3befb..f378519b6d33 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1204,6 +1204,24 @@ static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
+ return wr_or_deserves_wr;
+ }
+
++/*
++ * Return the farthest future time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_greatest_from_now(void)
++{
++ return jiffies + MAX_JIFFY_OFFSET;
++}
++
++/*
++ * Return the farthest past time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_smallest_from_now(void)
++{
++ return jiffies - MAX_JIFFY_OFFSET;
++}
++
+ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq,
+ unsigned int old_wr_coeff,
+@@ -1218,7 +1236,19 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+ } else {
+- bfqq->wr_start_at_switch_to_srt = jiffies;
++ /*
++ * No interactive weight raising in progress
++ * here: assign minus infinity to
++ * wr_start_at_switch_to_srt, to make sure
++ * that, at the end of the soft-real-time
++ * weight raising periods that is starting
++ * now, no interactive weight-raising period
++ * may be wrongly considered as still in
++ * progress (and thus actually started by
++ * mistake).
++ */
++ bfqq->wr_start_at_switch_to_srt =
++ bfq_smallest_from_now();
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff *
+ BFQ_SOFTRT_WEIGHT_FACTOR;
+ bfqq->wr_cur_max_time =
+@@ -3174,24 +3204,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
+ }
+
+-/*
+- * Return the farthest future time instant according to jiffies
+- * macros.
+- */
+-static unsigned long bfq_greatest_from_now(void)
+-{
+- return jiffies + MAX_JIFFY_OFFSET;
+-}
+-
+-/*
+- * Return the farthest past time instant according to jiffies
+- * macros.
+- */
+-static unsigned long bfq_smallest_from_now(void)
+-{
+- return jiffies - MAX_JIFFY_OFFSET;
+-}
+-
+ /**
+ * bfq_bfqq_expire - expire a queue.
+ * @bfqd: device owning the queue.
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 6fdc3b1d5bb8..f4654436cd55 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -1165,6 +1165,24 @@ static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
+ return wr_or_deserves_wr;
+ }
+
++/*
++ * Return the farthest future time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_greatest_from_now(void)
++{
++ return jiffies + MAX_JIFFY_OFFSET;
++}
++
++/*
++ * Return the farthest past time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_smallest_from_now(void)
++{
++ return jiffies - MAX_JIFFY_OFFSET;
++}
++
+ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq,
+ unsigned int old_wr_coeff,
+@@ -1179,7 +1197,19 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+ } else {
+- bfqq->wr_start_at_switch_to_srt = jiffies;
++ /*
++ * No interactive weight raising in progress
++ * here: assign minus infinity to
++ * wr_start_at_switch_to_srt, to make sure
++ * that, at the end of the soft-real-time
++ * weight raising periods that is starting
++ * now, no interactive weight-raising period
++ * may be wrongly considered as still in
++ * progress (and thus actually started by
++ * mistake).
++ */
++ bfqq->wr_start_at_switch_to_srt =
++ bfq_smallest_from_now();
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff *
+ BFQ_SOFTRT_WEIGHT_FACTOR;
+ bfqq->wr_cur_max_time =
+@@ -3067,24 +3097,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
+ }
+
+-/*
+- * Return the farthest future time instant according to jiffies
+- * macros.
+- */
+-static unsigned long bfq_greatest_from_now(void)
+-{
+- return jiffies + MAX_JIFFY_OFFSET;
+-}
+-
+-/*
+- * Return the farthest past time instant according to jiffies
+- * macros.
+- */
+-static unsigned long bfq_smallest_from_now(void)
+-{
+- return jiffies - MAX_JIFFY_OFFSET;
+-}
+-
+ /**
+ * bfq_bfqq_expire - expire a queue.
+ * @bfqd: device owning the queue.
+
+From 9dbea44b6f721baeff35b9fdf628ec55fe00e09d Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 14 Sep 2017 05:12:58 -0400
+Subject: [PATCH 43/51] Fix commit "Unnest request-queue and ioc locks from
+ scheduler locks"
+
+The commit "Unnest request-queue and ioc locks from scheduler locks"
+mistakenly removed the setting of the split flag in function
+bfq_prepare_request. This commit puts this missing instruction back in
+its place.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index f378519b6d33..288078e68a2a 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -744,6 +744,12 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
++ __func__,
++ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish,
++ bfqq->wr_cur_max_time);
++
+ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
+ time_is_before_jiffies(bfqq->last_wr_start_finish +
+ bfqq->wr_cur_max_time))) {
+@@ -2208,6 +2214,11 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
+ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
++ __func__,
++ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish,
++ bfqq->wr_cur_max_time);
+ }
+
+ static void
+@@ -4950,6 +4961,7 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
+ bic->saved_in_large_burst = true;
+
+ bfqq = bfq_split_bfqq(bic, bfqq);
++ split = true;
+
+ if (!bfqq)
+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
+
+From d4ebb2a66a23dc183792088c521f2be2193b56db Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 15 Sep 2017 01:53:51 -0400
+Subject: [PATCH 44/51] bfq-sq, bfq-mq: check and switch back to interactive wr
+ also on queue split
+
+As already explained in the message of commit "bfq-mq, bfq-sq: fix
+wrong init of saved start time for weight raising", if a soft
+real-time weight-raising period happens to be nested in a larger
+interactive weight-raising period, then BFQ restores the interactive
+weight raising at the end of the soft real-time weight raising. In
+particular, BFQ checks whether the latter has ended only on request
+dispatches.
+
+Unfortunately, the above scheme fails to restore interactive weight
+raising in the following corner case: if a bfq_queue, say Q,
+1) Is merged with another bfq_queue while it is in a nested soft
+real-time weight-raising period. The weight-raising state of Q is
+then saved, and not considered any longer until a split occurs.
+2) Is split from the other bfq_queue(s) at a time instant when its
+soft real-time weight raising is already finished.
+On the split, while resuming the previous, soft real-time
+weight-raised state of the bfq_queue Q, BFQ checks whether the
+current soft real-time weight-raising period is actually over. If so,
+BFQ switches weight raising off for Q, *without* checking whether the
+soft real-time period was actually nested in a non-yet-finished
+interactive weight-raising period.
+
+This commit addresses this issue by adding the above missing check in
+bfq_queue splits, and restoring interactive weight raising if needed.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Tested-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Tested-by: Mirko Montanari <mirkomontanari91@gmail.com>
+---
+ block/bfq-mq-iosched.c | 29 +++++++++++++++++++++--------
+ block/bfq-sq-iosched.c | 35 +++++++++++++++++++++++++++--------
+ 2 files changed, 48 insertions(+), 16 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 288078e68a2a..6130a95c6497 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -716,6 +716,15 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
+ return dur;
+ }
+
++/* switch back from soft real-time to interactive weight raising */
++static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
++ struct bfq_data *bfqd)
++{
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
++}
++
+ static void
+ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ struct bfq_io_cq *bic, bool bfq_already_existing)
+@@ -753,12 +762,20 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
+ time_is_before_jiffies(bfqq->last_wr_start_finish +
+ bfqq->wr_cur_max_time))) {
+- bfq_log_bfqq(bfqq->bfqd, bfqq,
++ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
++ !bfq_bfqq_in_large_burst(bfqq) &&
++ time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
++ bfq_wr_duration(bfqd))) {
++ switch_back_to_interactive_wr(bfqq, bfqd);
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "resume state: switching back to interactive");
++ } else {
++ bfqq->wr_coeff = 1;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "resume state: switching off wr (%lu + %lu < %lu)",
+ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time,
+ jiffies);
+-
+- bfqq->wr_coeff = 1;
++ }
+ }
+
+ /* make sure weight will be updated, however we got here */
+@@ -3820,11 +3837,7 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_wr_duration(bfqd)))
+ bfq_bfqq_end_wr(bfqq);
+ else {
+- /* switch back to interactive wr */
+- bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+- bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+- bfqq->last_wr_start_finish =
+- bfqq->wr_start_at_switch_to_srt;
++ switch_back_to_interactive_wr(bfqq, bfqd);
+ BUG_ON(time_is_after_jiffies(
+ bfqq->last_wr_start_finish));
+ bfqq->entity.prio_changed = 1;
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index f4654436cd55..e07d5d1c0d40 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -678,6 +678,15 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
+ return dur;
+ }
+
++/* switch back from soft real-time to interactive weight raising */
++static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
++ struct bfq_data *bfqd)
++{
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
++}
++
+ static void
+ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ struct bfq_io_cq *bic, bool bfq_already_existing)
+@@ -705,15 +714,29 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
++ __func__,
++ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish,
++ bfqq->wr_cur_max_time);
++
+ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
+ time_is_before_jiffies(bfqq->last_wr_start_finish +
+ bfqq->wr_cur_max_time))) {
+- bfq_log_bfqq(bfqq->bfqd, bfqq,
++ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
++ !bfq_bfqq_in_large_burst(bfqq) &&
++ time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
++ bfq_wr_duration(bfqd))) {
++ switch_back_to_interactive_wr(bfqq, bfqd);
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "resume state: switching back to interactive");
++ } else {
++ bfqq->wr_coeff = 1;
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "resume state: switching off wr (%lu + %lu < %lu)",
+ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time,
+ jiffies);
+-
+- bfqq->wr_coeff = 1;
++ }
+ }
+
+ /* make sure weight will be updated, however we got here */
+@@ -3703,11 +3726,7 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_wr_duration(bfqd)))
+ bfq_bfqq_end_wr(bfqq);
+ else {
+- /* switch back to interactive wr */
+- bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+- bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+- bfqq->last_wr_start_finish =
+- bfqq->wr_start_at_switch_to_srt;
++ switch_back_to_interactive_wr(bfqq, bfqd);
+ BUG_ON(time_is_after_jiffies(
+ bfqq->last_wr_start_finish));
+ bfqq->entity.prio_changed = 1;
+
+From 9eaec0c3a2d675763b09da81c9117a9c43bce942 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 15 Sep 2017 04:58:33 -0400
+Subject: [PATCH 45/51] bfq-sq, bfq-mq: let early-merged queues be
+ weight-raised on split too
+
+A just-created bfq_queue, say Q, may happen to be merged with another
+bfq_queue on the very first invocation of the function
+__bfq_insert_request. In such a case, even if Q would clearly deserve
+interactive weight raising (as it has just been created), the function
+bfq_add_request does not make it to be invoked for Q, and thus to
+activate weight raising for Q. As a consequence, when the state of Q
+is saved for a possible future restore, after a split of Q from the
+other bfq_queue(s), such a state happens to be (unjustly)
+non-weight-raised. Then the bfq_queue will not enjoy any weight
+raising on the split, even if should still be in an interactive
+weight-raising period when the split occurs.
+
+This commit solves this problem as follows, for a just-created
+bfq_queue that is being early-merged: it stores directly, in the saved
+state of the bfq_queue, the weight-raising state that would have been
+assigned to the bfq_queue if not early-merged.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Tested-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Tested-by: Mirko Montanari <mirkomontanari91@gmail.com>
+---
+ block/bfq-mq-iosched.c | 28 +++++++++++++++++++++++-----
+ block/bfq-sq-iosched.c | 28 +++++++++++++++++++++++-----
+ 2 files changed, 46 insertions(+), 10 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 6130a95c6497..af84e506e897 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -2226,10 +2226,27 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
+- bic->saved_wr_coeff = bfqq->wr_coeff;
+- bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
+- bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
+- bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
++ if (unlikely(bfq_bfqq_just_created(bfqq) &&
++ !bfq_bfqq_in_large_burst(bfqq))) {
++ /*
++ * bfqq being merged ritgh after being created: bfqq
++ * would have deserved interactive weight raising, but
++ * did not make it to be set in a weight-raised state,
++ * because of this early merge. Store directly the
++ * weight-raising state that would have been assigned
++ * to bfqq, so that to avoid that bfqq unjustly fails
++ * to enjoy weight raising if split soon.
++ */
++ bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
++ bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
++ bic->saved_last_wr_start_finish = jiffies;
++ } else {
++ bic->saved_wr_coeff = bfqq->wr_coeff;
++ bic->saved_wr_start_at_switch_to_srt =
++ bfqq->wr_start_at_switch_to_srt;
++ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
++ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
++ }
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
+@@ -4560,7 +4577,6 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ bfqq->allocated);
+
+ new_bfqq->ref++;
+- bfq_clear_bfqq_just_created(bfqq);
+ /*
+ * If the bic associated with the process
+ * issuing this request still points to bfqq
+@@ -4572,6 +4588,8 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
+ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
+ bfqq, new_bfqq);
++
++ bfq_clear_bfqq_just_created(bfqq);
+ /*
+ * rq is about to be enqueued into new_bfqq,
+ * release rq reference on bfqq
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index e07d5d1c0d40..0c48f527fe3f 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -2105,10 +2105,27 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
+- bic->saved_wr_coeff = bfqq->wr_coeff;
+- bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
+- bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
+- bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
++ if (unlikely(bfq_bfqq_just_created(bfqq) &&
++ !bfq_bfqq_in_large_burst(bfqq))) {
++ /*
++ * bfqq being merged ritgh after being created: bfqq
++ * would have deserved interactive weight raising, but
++ * did not make it to be set in a weight-raised state,
++ * because of this early merge. Store directly the
++ * weight-raising state that would have been assigned
++ * to bfqq, so that to avoid that bfqq unjustly fails
++ * to enjoy weight raising if split soon.
++ */
++ bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
++ bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
++ bic->saved_last_wr_start_finish = jiffies;
++ } else {
++ bic->saved_wr_coeff = bfqq->wr_coeff;
++ bic->saved_wr_start_at_switch_to_srt =
++ bfqq->wr_start_at_switch_to_srt;
++ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
++ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
++ }
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+ }
+
+@@ -4383,10 +4400,11 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq)
+ new_bfqq->allocated[rq_data_dir(rq)]++;
+ bfqq->allocated[rq_data_dir(rq)]--;
+ new_bfqq->ref++;
+- bfq_clear_bfqq_just_created(bfqq);
+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
+ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
+ bfqq, new_bfqq);
++
++ bfq_clear_bfqq_just_created(bfqq);
+ /*
+ * rq is about to be enqueued into new_bfqq,
+ * release rq reference on bfqq
+
+From cb05150675095cb97ab22e4955eb82e4fe2e9dbe Mon Sep 17 00:00:00 2001
+From: omcira <omcira@gmail.com>
+Date: Mon, 18 Sep 2017 10:49:48 +0200
+Subject: [PATCH 46/51] bfq-sq, bfq-mq: decrease burst size when queues in
+ burst exit
+
+If many queues belonging to the same group happen to be created
+shortly after each other, then the concurrent processes associated
+with these queues have typically a common goal, and they get it done
+as soon as possible if not hampered by device idling. Examples are
+processes spawned by git grep, or by systemd during boot. As for
+device idling, this mechanism is currently necessary for weight
+raising to succeed in its goal: privileging I/O. In view of these
+facts, BFQ does not provide the above queues with either weight
+raising or device idling.
+
+On the other hand, a burst of queue creations may be caused also by
+the start-up of a complex application. In this case, these queues need
+usually to be served one after the other, and as quickly as possible,
+to maximise responsiveness. Therefore, in this case the best strategy
+is to weight-raise all the queues created during the burst, i.e., the
+exact opposite of the strategy for the above case.
+
+To distinguish between the two cases, BFQ uses an empirical burst-size
+threshold, found through extensive tests and monitoring of daily
+usage. Only large bursts, i.e., burst with a size above this
+threshold, are considered as generated by a high number of parallel
+processes. In this respect, upstart-based boot proved to be rather
+hard to detect as generating a large burst of queue creations, because
+with upstart most of the queues created in a burst exit *before* the
+next queues in the same burst are created. To address this issue, I
+changed the burst-detection mechanism so as to not decrease the size
+of the current burst even if one of the queues in the burst is
+eliminated.
+
+Unfortunately, this missing decrease causes false positives on very
+fast systems: on the start-up of a complex application, such as
+libreoffice writer, so many queues are created, served and exited
+shortly after each other, that a large burst of queue creations is
+wrongly detected as occurring. These false positives just disappear if
+the size of a burst is decreased when one of the queues in the burst
+exits. This commit restores the missing burst-size decrease, relying
+of the fact that upstart is apparently unlikely to be used on systems
+running this and future versions of the kernel.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Mauro Andreolini <mauro.andreolini@unimore.it>
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Tested-by: Mirko Montanari <mirkomontanari91@gmail.com>
+---
+ block/bfq-mq-iosched.c | 12 +++---------
+ block/bfq-sq-iosched.c | 12 +++---------
+ 2 files changed, 6 insertions(+), 18 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index af84e506e897..6e413d7236ce 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4111,16 +4111,10 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ BUG_ON(bfqq->entity.tree);
+ BUG_ON(bfq_bfqq_busy(bfqq));
+
+- if (bfq_bfqq_sync(bfqq))
+- /*
+- * The fact that this queue is being destroyed does not
+- * invalidate the fact that this queue may have been
+- * activated during the current burst. As a consequence,
+- * although the queue does not exist anymore, and hence
+- * needs to be removed from the burst list if there,
+- * the burst size has not to be decremented.
+- */
++ if (bfq_bfqq_sync(bfqq) && !hlist_unhashed(&bfqq->burst_list_node)) {
+ hlist_del_init(&bfqq->burst_list_node);
++ bfqq->bfqd->burst_size--;
++ }
+
+ if (bfqq->bfqd)
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 0c48f527fe3f..93034dd7b801 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -3945,16 +3945,10 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ BUG_ON(bfqq->entity.tree);
+ BUG_ON(bfq_bfqq_busy(bfqq));
+
+- if (bfq_bfqq_sync(bfqq))
+- /*
+- * The fact that this queue is being destroyed does not
+- * invalidate the fact that this queue may have been
+- * activated during the current burst. As a consequence,
+- * although the queue does not exist anymore, and hence
+- * needs to be removed from the burst list if there,
+- * the burst size has not to be decremented.
+- */
++ if (bfq_bfqq_sync(bfqq) && !hlist_unhashed(&bfqq->burst_list_node)) {
+ hlist_del_init(&bfqq->burst_list_node);
++ bfqq->bfqd->burst_size--;
++ }
+
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+
+
+From 60de7307d5e3ed7f272f12c900f631bdfe114db2 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 6 Oct 2017 19:35:38 +0200
+Subject: [PATCH 47/51] bfq-sq, bfq-mq: fix unbalanced decrements of burst size
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The commit "bfq-sq, bfq-mq: decrease burst size when queues in burst
+exit" introduced the decrement of burst_size on the removal of a
+bfq_queue from the burst list. Unfortunately, this decrement can
+happen to be performed even when burst size is already equal to 0,
+because of unbalanced decrements. A description follows of the cause
+of these unbalanced decrements, namely a wrong assumption, and of the
+way how this wrong assumption leads to unbalanced decrements.
+
+The wrong assumption is that a bfq_queue can exit only if the process
+associated with the bfq_queue has exited. This is false, because a
+bfq_queue, say Q, may exit also as a consequence of a merge with
+another bfq_queue. In this case, Q exits because the I/O of its
+associated process has been redirected to another bfq_queue.
+
+The decrement unbalance occurs because Q may then be re-created after
+a split, and added back to the current burst list, *without*
+incrementing burst_size. burst_size is not incremented because Q is
+not a new bfq_queue added to the burst list, but a bfq_queue only
+temporarily removed from the list, and, before the commit "bfq-sq,
+bfq-mq: decrease burst size when queues in burst exit", burst_size was
+not decremented when Q was removed.
+
+This commit addresses this issue by just checking whether the exiting
+bfq_queue is a merged bfq_queue, and, in that case, not decrementing
+burst_size. Unfortunately, this still leaves room for unbalanced
+decrements, in the following rarer case: on a split, the bfq_queue
+happens to be inserted into a different burst list than that it was
+removed from when merged. If this happens, the number of elements in
+the new burst list becomes higher than burst_size (by one). When the
+bfq_queue then exits, it is of course not in a merged state any
+longer, thus burst_size is decremented, which results in an unbalanced
+decrement. To handle this sporadic, unlucky case in a simple way,
+this commit also checks that burst_size is larger than 0 before
+decrementing it.
+
+Finally, this commit removes an useless, extra check: the check that
+the bfq_queue is sync, performed before checking whether the bfq_queue
+is in the burst list. This extra check is redundant, because only sync
+bfq_queues can be inserted into the burst list.
+
+Reported-by: Philip Müller <philm@manjaro.org>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Tested-by: Philip Müller <philm@manjaro.org>
+Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
+Tested-by: Lee Tibbert <lee.tibbert@gmail.com>
+---
+ block/bfq-mq-iosched.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++--
+ block/bfq-sq-iosched.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 114 insertions(+), 4 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 6e413d7236ce..816bac6cdd3d 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4111,9 +4111,36 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ BUG_ON(bfqq->entity.tree);
+ BUG_ON(bfq_bfqq_busy(bfqq));
+
+- if (bfq_bfqq_sync(bfqq) && !hlist_unhashed(&bfqq->burst_list_node)) {
++ if (!hlist_unhashed(&bfqq->burst_list_node)) {
+ hlist_del_init(&bfqq->burst_list_node);
+- bfqq->bfqd->burst_size--;
++ /*
++ * Decrement also burst size after the removal, if the
++ * process associated with bfqq is exiting, and thus
++ * does not contribute to the burst any longer. This
++ * decrement helps filter out false positives of large
++ * bursts, when some short-lived process (often due to
++ * the execution of commands by some service) happens
++ * to start and exit while a complex application is
++ * starting, and thus spawning several processes that
++ * do I/O (and that *must not* be treated as a large
++ * burst, see comments on bfq_handle_burst).
++ *
++ * In particular, the decrement is performed only if:
++ * 1) bfqq is not a merged queue, because, if it is,
++ * then this free of bfqq is not triggered by the exit
++ * of the process bfqq is associated with, but exactly
++ * by the fact that bfqq has just been merged.
++ * 2) burst_size is greater than 0, to handle
++ * unbalanced decrements. Unbalanced decrements may
++ * happen in te following case: bfqq is inserted into
++ * the current burst list--without incrementing
++ * bust_size--because of a split, but the current
++ * burst list is not the burst list bfqq belonged to
++ * (see comments on the case of a split in
++ * bfq_set_request).
++ */
++ if (bfqq->bic && bfqq->bfqd->burst_size > 0)
++ bfqq->bfqd->burst_size--;
+ }
+
+ if (bfqq->bfqd)
+@@ -4940,6 +4967,34 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
+ "large burst");
+ bfq_clear_bfqq_in_large_burst(bfqq);
+ if (bic->was_in_burst_list)
++ /*
++ * If bfqq was in the current
++ * burst list before being
++ * merged, then we have to add
++ * it back. And we do not need
++ * to increase burst_size, as
++ * we did not decrement
++ * burst_size when we removed
++ * bfqq from the burst list as
++ * a consequence of a merge
++ * (see comments in
++ * bfq_put_queue). In this
++ * respect, it would be rather
++ * costly to know whether the
++ * current burst list is still
++ * the same burst list from
++ * which bfqq was removed on
++ * the merge. To avoid this
++ * cost, if bfqq was in a
++ * burst list, then we add
++ * bfqq to the current burst
++ * list without any further
++ * check. This can cause
++ * inappropriate insertions,
++ * but rarely enough to not
++ * harm the detection of large
++ * bursts significantly.
++ */
+ hlist_add_head(&bfqq->burst_list_node,
+ &bfqd->burst_list);
+ }
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 93034dd7b801..4bbd7f4c0154 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -3945,9 +3945,36 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ BUG_ON(bfqq->entity.tree);
+ BUG_ON(bfq_bfqq_busy(bfqq));
+
+- if (bfq_bfqq_sync(bfqq) && !hlist_unhashed(&bfqq->burst_list_node)) {
++ if (!hlist_unhashed(&bfqq->burst_list_node)) {
+ hlist_del_init(&bfqq->burst_list_node);
+- bfqq->bfqd->burst_size--;
++ /*
++ * Decrement also burst size after the removal, if the
++ * process associated with bfqq is exiting, and thus
++ * does not contribute to the burst any longer. This
++ * decrement helps filter out false positives of large
++ * bursts, when some short-lived process (often due to
++ * the execution of commands by some service) happens
++ * to start and exit while a complex application is
++ * starting, and thus spawning several processes that
++ * do I/O (and that *must not* be treated as a large
++ * burst, see comments on bfq_handle_burst).
++ *
++ * In particular, the decrement is performed only if:
++ * 1) bfqq is not a merged queue, because, if it is,
++ * then this free of bfqq is not triggered by the exit
++ * of the process bfqq is associated with, but exactly
++ * by the fact that bfqq has just been merged.
++ * 2) burst_size is greater than 0, to handle
++ * unbalanced decrements. Unbalanced decrements may
++ * happen in te following case: bfqq is inserted into
++ * the current burst list--without incrementing
++ * bust_size--because of a split, but the current
++ * burst list is not the burst list bfqq belonged to
++ * (see comments on the case of a split in
++ * bfq_set_request).
++ */
++ if (bfqq->bic && bfqq->bfqd->burst_size > 0)
++ bfqq->bfqd->burst_size--;
+ }
+
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+@@ -4691,6 +4718,34 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ "large burst");
+ bfq_clear_bfqq_in_large_burst(bfqq);
+ if (bic->was_in_burst_list)
++ /*
++ * If bfqq was in the current
++ * burst list before being
++ * merged, then we have to add
++ * it back. And we do not need
++ * to increase burst_size, as
++ * we did not decrement
++ * burst_size when we removed
++ * bfqq from the burst list as
++ * a consequence of a merge
++ * (see comments in
++ * bfq_put_queue). In this
++ * respect, it would be rather
++ * costly to know whether the
++ * current burst list is still
++ * the same burst list from
++ * which bfqq was removed on
++ * the merge. To avoid this
++ * cost, if bfqq was in a
++ * burst list, then we add
++ * bfqq to the current burst
++ * list without any further
++ * check. This can cause
++ * inappropriate insertions,
++ * but rarely enough to not
++ * harm the detection of large
++ * bursts significantly.
++ */
+ hlist_add_head(&bfqq->burst_list_node,
+ &bfqd->burst_list);
+ }
+
+From 09adbd0f46f4ba395964b35bf611b7cc3dd84b4d Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 30 Oct 2017 16:50:50 +0100
+Subject: [PATCH 48/51] doc, block, bfq-mq: update max IOPS sustainable with
+ BFQ
+
+We have investigated more deeply the performance of BFQ, in terms of
+number of IOPS that can be processed by the CPU when BFQ is used as
+I/O scheduler. In more detail, using the script [1], we have measured
+the number of IOPS reached on top of a null block device configured
+with zero latency, as a function of the workload (sequential read,
+sequential write, random read, random write) and of the system (we
+considered desktops, laptops and embedded systems).
+
+Basing on the resulting figures, with this commit we update the
+current, conservative IOPS range reported in BFQ documentation. In
+particular, the documentation now reports, for each of three different
+systems, the lowest number of IOPS obtained for that system with the
+above test (namely, the value obtained with the workload leading to
+the lowest IOPS).
+
+[1] https://github.com/Algodev-github/IOSpeed
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Luca Miccio <lucmiccio@gmail.com>
+---
+ Documentation/block/bfq-iosched.txt | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index dcfe15523da3..595ff7a5ff34 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -29,12 +29,19 @@ for that device, by setting low_latency to 0. See Section 3 for
+ details on how to configure BFQ for the desired tradeoff between
+ latency and throughput, or on how to maximize throughput.
+
+-On average CPUs, the current version of BFQ can handle devices
+-performing at most ~30K IOPS; at most ~50 KIOPS on faster CPUs. As a
+-reference, 30-50 KIOPS correspond to very high bandwidths with
+-sequential I/O (e.g., 8-12 GB/s if I/O requests are 256 KB large), and
+-to 120-200 MB/s with 4KB random I/O. BFQ is currently being tested on
+-multi-queue devices too.
++BFQ has a non-null overhead, which limits the maximum IOPS that the
++CPU can process for a device scheduled with BFQ. To give an idea of
++the limits on slow or average CPUs, here are BFQ limits for three
++different CPUs, on, respectively, an average laptop, an old desktop,
++and a cheap embedded system, in case full hierarchical support is
++enabled (i.e., CONFIG_BFQ_SQ_GROUP_IOSCHED is set for bfq-sq, or
++CONFIG_MQ_BFQ_GROUP_IOSCHED is set for bfq-mq, or, finally,
++CONFIG_BFQ_GROUP_IOSCHED is set for bfq):
++- Intel i7-4850HQ: 250 KIOPS
++- AMD A8-3850: 170 KIOPS
++- ARM CortexTM-A53 Octa-core: 45 KIOPS
++
++BFQ works for multi-queue devices too (bfq and bfq-mq instances).
+
+ The table of contents follows. Impatients can just jump to Section 3.
+
+
+From be94f97b577dc587593185224a7718aa59ac43f7 Mon Sep 17 00:00:00 2001
+From: Luca Miccio <lucmiccio@gmail.com>
+Date: Tue, 31 Oct 2017 09:50:11 +0100
+Subject: [PATCH 49/51] block, bfq-mq: add missing invocations of
+ bfqg_stats_update_io_add/remove
+
+bfqg_stats_update_io_add and bfqg_stats_update_io_remove are to be
+invoked, respectively, when an I/O request enters and when an I/O
+request exits the scheduler. Unfortunately, bfq-mq does not fully comply
+with this scheme, because it does not invoke these functions for
+requests that are inserted into or extracted from its priority
+dispatch list. This commit fixes this mistake.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Luca Miccio <lucmiccio@gmail.com>
+---
+ block/bfq-mq-iosched.c | 24 +++++++++++++++++++-----
+ 1 file changed, 19 insertions(+), 5 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 816bac6cdd3d..fbf28804c220 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1394,7 +1394,6 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
+ BUG_ON(bfqq->entity.budget < bfqq->entity.service);
+
+ BUG_ON(bfqq == bfqd->in_service_queue);
+- bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
+
+ /*
+ * bfqq deserves to be weight-raised if:
+@@ -1734,7 +1733,6 @@ static void bfq_remove_request(struct request_queue *q,
+ BUG_ON(bfqq->meta_pending == 0);
+ bfqq->meta_pending--;
+ }
+- bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
+ }
+
+ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
+@@ -1879,6 +1877,7 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+ bfqq->next_rq = rq;
+
+ bfq_remove_request(q, next);
++ bfqg_stats_update_io_remove(bfqq_group(bfqq), next->cmd_flags);
+
+ spin_unlock_irq(&bfqq->bfqd->lock);
+ end:
+@@ -4077,6 +4076,10 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ spin_lock_irq(&bfqd->lock);
+
+ rq = __bfq_dispatch_request(hctx);
++ if (rq && RQ_BFQQ(rq))
++ bfqg_stats_update_io_remove(bfqq_group(RQ_BFQQ(rq)),
++ rq->cmd_flags);
++
+ spin_unlock_irq(&bfqd->lock);
+
+ return rq;
+@@ -4634,6 +4637,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ {
+ struct request_queue *q = hctx->queue;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
+
+ spin_lock_irq(&bfqd->lock);
+ if (blk_mq_sched_try_insert_merge(q, rq)) {
+@@ -4647,8 +4651,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+
+ spin_lock_irq(&bfqd->lock);
+ if (at_head || blk_rq_is_passthrough(rq)) {
+- struct bfq_queue *bfqq = RQ_BFQQ(rq);
+-
+ if (at_head)
+ list_add(&rq->queuelist, &bfqd->dispatch);
+ else
+@@ -4668,6 +4670,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ rq->rq_flags &= ~RQF_GOT;
+
+ __bfq_insert_request(bfqd, rq);
++ /*
++ * Update bfqq, because, if a queue merge has occurred
++ * in __bfq_insert_request, then rq has been
++ * redirected into a new queue.
++ */
++ bfqq = RQ_BFQQ(rq);
+
+ if (rq_mergeable(rq)) {
+ elv_rqhash_add(q, rq);
+@@ -4676,6 +4684,9 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ }
+ }
+
++ if (bfqq)
++ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, rq->cmd_flags);
++
+ spin_unlock_irq(&bfqd->lock);
+ }
+
+@@ -4893,8 +4904,11 @@ static void bfq_finish_request(struct request *rq)
+ BUG_ON(in_interrupt());
+
+ assert_spin_locked(&bfqd->lock);
+- if (!RB_EMPTY_NODE(&rq->rb_node))
++ if (!RB_EMPTY_NODE(&rq->rb_node)) {
+ bfq_remove_request(rq->q, rq);
++ bfqg_stats_update_io_remove(bfqq_group(bfqq),
++ rq->cmd_flags);
++ }
+ bfq_put_rq_priv_body(bfqq);
+ }
+
+
+From 8659a1549d2bf241129a0f7c90429bddd9c2bc53 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 8 Nov 2017 19:07:40 +0100
+Subject: [PATCH 50/51] block, bfq-mq: update blkio stats outside the scheduler
+ lock
+
+bfq-mq invokes various blkg_*stats_* functions to update the statistics
+contained in the special files blkio.bfq-mq.* in the blkio controller
+groups, i.e., the I/O accounting related to the proportional-share
+policy provided by bfq-mq. The execution of these functions takes a
+considerable percentage, about 40%, of the total per-request execution
+time of bfq-mq (i.e., of the sum of the execution time of all the bfq-mq
+functions that have to be executed to process an I/O request from its
+creation to its destruction). This reduces the request-processing
+rate sustainable by bfq-mq noticeably, even on a multicore CPU. In fact,
+the bfq-mq functions that invoke blkg_*stats_* functions cannot be
+executed in parallel with the rest of the code of bfq-mq, because
+both are executed under the same same per-device scheduler lock.
+
+To reduce this slowdown, this commit moves, wherever possible, the
+invocation of these functions (more precisely, of the bfq-mq functions
+that invoke blkg_*stats_* functions) outside the critical sections
+protected by the scheduler lock.
+
+With this change, and with all blkio.bfq-mq.* statistics enabled, the
+throughput grows, e.g., from 250 to 310 KIOPS (+25%) on an Intel
+i7-4850HQ, in case of 8 threads doing random I/O in parallel on
+null_blk, with the latter configured with 0 latency. We obtained the
+same or higher throughput boosts, up to +30%, with other processors
+(some figures are reported in the documentation). For our tests, we
+used the script [1], with which our results can be easily reproduced.
+
+NOTE. This commit still protects the invocation of blkg_*stats_*
+functions with the request_queue lock, because the group these
+functions are invoked on may otherwise disappear before or while these
+functions are executed. Fortunately, tests without even this lock
+show, by difference, that the serialization caused by this lock has a
+little impact (at most ~5% of throughput reduction).
+
+[1] https://github.com/Algodev-github/IOSpeed
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Luca Miccio <lucmiccio@gmail.com>
+---
+ Documentation/block/bfq-iosched.txt | 18 ++++--
+ block/bfq-mq-iosched.c | 112 +++++++++++++++++++++++++++++++-----
+ block/bfq-sched.c | 2 +
+ 3 files changed, 112 insertions(+), 20 deletions(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index 595ff7a5ff34..c816c595082d 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -31,16 +31,22 @@ latency and throughput, or on how to maximize throughput.
+
+ BFQ has a non-null overhead, which limits the maximum IOPS that the
+ CPU can process for a device scheduled with BFQ. To give an idea of
+-the limits on slow or average CPUs, here are BFQ limits for three
+-different CPUs, on, respectively, an average laptop, an old desktop,
+-and a cheap embedded system, in case full hierarchical support is
+-enabled (i.e., CONFIG_BFQ_SQ_GROUP_IOSCHED is set for bfq-sq, or
+-CONFIG_MQ_BFQ_GROUP_IOSCHED is set for bfq-mq, or, finally,
+-CONFIG_BFQ_GROUP_IOSCHED is set for bfq):
++the limits on slow or average CPUs, here are, first, the limits of
++bfq-sq for three different CPUs, on, respectively, an average laptop,
++an old desktop, and a cheap embedded system, in case full hierarchical
++support is enabled (i.e., CONFIG_BFQ_SQ_GROUP_IOSCHED is set):
+ - Intel i7-4850HQ: 250 KIOPS
+ - AMD A8-3850: 170 KIOPS
+ - ARM CortexTM-A53 Octa-core: 45 KIOPS
+
++bfq-mq and bfq instances reach, instead, a higher sustainable
++throughput. Their limits, on the same systems as above, are, with full
++hierarchical support enabled (i.e., CONFIG_MQ_BFQ_GROUP_IOSCHED set
++for bfq-mq, or CONFIG_BFQ_GROUP_IOSCHED set for bfq):
++- Intel i7-4850HQ: 310 KIOPS
++- AMD A8-3850: 200 KIOPS
++- ARM CortexTM-A53 Octa-core: 56 KIOPS
++
+ BFQ works for multi-queue devices too (bfq and bfq-mq instances).
+
+ The table of contents follows. Impatients can just jump to Section 3.
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index fbf28804c220..ab3b83d612c2 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1822,7 +1822,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ bfqq->next_rq = next_rq;
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "requests_merged: req %p prev %p next_rq %p bfqq %p",
++ "request_merged: req %p prev %p next_rq %p bfqq %p",
+ req, prev, next_rq, bfqq);
+
+ /*
+@@ -2415,7 +2415,6 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+ {
+ if (bfqq) {
+- bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
+ bfq_clear_bfqq_fifo_expire(bfqq);
+
+ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
+@@ -3784,7 +3783,6 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ */
+ bfq_clear_bfqq_wait_request(bfqq);
+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
+- bfqg_stats_update_idle_time(bfqq_group(bfqq));
+ }
+ goto keep_queue;
+ }
+@@ -4072,16 +4070,67 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ {
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+ struct request *rq;
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ struct bfq_queue *in_serv_queue, *bfqq;
++ bool waiting_rq, idle_timer_disabled;
++#endif
+
+ spin_lock_irq(&bfqd->lock);
+
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ in_serv_queue = bfqd->in_service_queue;
++ waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
++
+ rq = __bfq_dispatch_request(hctx);
+- if (rq && RQ_BFQQ(rq))
+- bfqg_stats_update_io_remove(bfqq_group(RQ_BFQQ(rq)),
+- rq->cmd_flags);
+
++ idle_timer_disabled =
++ waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
++
++#else
++ rq = __bfq_dispatch_request(hctx);
++#endif
+ spin_unlock_irq(&bfqd->lock);
+
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ bfqq = rq ? RQ_BFQQ(rq) : NULL;
++ if (!idle_timer_disabled && !bfqq)
++ return rq;
++
++ /*
++ * rq and bfqq are guaranteed to exist until this function
++ * ends, for the following reasons. First, rq can be
++ * dispatched to the device, and then can be completed and
++ * freed, only after this function ends. Second, rq cannot be
++ * merged (and thus freed because of a merge) any longer,
++ * because it has already started. Thus rq cannot be freed
++ * before this function ends, and, since rq has a reference to
++ * bfqq, the same guarantee holds for bfqq too.
++ *
++ * In addition, the following queue lock guarantees that
++ * bfqq_group(bfqq) exists as well.
++ */
++ spin_lock_irq(hctx->queue->queue_lock);
++ if (idle_timer_disabled)
++ /*
++ * Since the idle timer has been disabled,
++ * in_serv_queue contained some request when
++ * __bfq_dispatch_request was invoked above, which
++ * implies that rq was picked exactly from
++ * in_serv_queue. Thus in_serv_queue == bfqq, and is
++ * therefore guaranteed to exist because of the above
++ * arguments.
++ */
++ bfqg_stats_update_idle_time(bfqq_group(in_serv_queue));
++ if (bfqq) {
++ struct bfq_group *bfqg = bfqq_group(bfqq);
++
++ bfqg_stats_update_avg_queue_size(bfqg);
++ bfqg_stats_set_start_empty_time(bfqg);
++ bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
++ }
++ spin_unlock_irq(hctx->queue->queue_lock);
++#endif
++
+ return rq;
+ }
+
+@@ -4200,7 +4249,6 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfqd->lock, flags);
+-
+ bfq_exit_bfqq(bfqd, bfqq);
+ bic_set_bfqq(bic, NULL, is_sync);
+ spin_unlock_irqrestore(&bfqd->lock, flags);
+@@ -4554,7 +4602,6 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ */
+ bfq_clear_bfqq_wait_request(bfqq);
+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
+- bfqg_stats_update_idle_time(bfqq_group(bfqq));
+
+ /*
+ * The queue is not empty, because a new request just
+@@ -4569,9 +4616,11 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ }
+ }
+
+-static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
++/* returns true if it causes the idle timer to be disabled */
++static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ {
+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
++ bool waiting, idle_timer_disabled = false;
+ BUG_ON(!bfqq);
+
+ assert_spin_locked(&bfqd->lock);
+@@ -4624,12 +4673,16 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ }
+ }
+
++ waiting = bfqq && bfq_bfqq_wait_request(bfqq);
+ bfq_add_request(rq);
++ idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
+
+ rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
+ list_add_tail(&rq->queuelist, &bfqq->fifo);
+
+ bfq_rq_enqueued(bfqd, bfqq, rq);
++
++ return idle_timer_disabled;
+ }
+
+ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+@@ -4638,6 +4691,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ struct request_queue *q = hctx->queue;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ bool idle_timer_disabled = false;
++ unsigned int cmd_flags;
++#endif
+
+ spin_lock_irq(&bfqd->lock);
+ if (blk_mq_sched_try_insert_merge(q, rq)) {
+@@ -4669,13 +4726,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ BUG_ON(!(rq->rq_flags & RQF_GOT));
+ rq->rq_flags &= ~RQF_GOT;
+
+- __bfq_insert_request(bfqd, rq);
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ idle_timer_disabled = __bfq_insert_request(bfqd, rq);
+ /*
+ * Update bfqq, because, if a queue merge has occurred
+ * in __bfq_insert_request, then rq has been
+ * redirected into a new queue.
+ */
+ bfqq = RQ_BFQQ(rq);
++#else
++ __bfq_insert_request(bfqd, rq);
++#endif
+
+ if (rq_mergeable(rq)) {
+ elv_rqhash_add(q, rq);
+@@ -4683,11 +4744,34 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ q->last_merge = rq;
+ }
+ }
+-
+- if (bfqq)
+- bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, rq->cmd_flags);
+-
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ /*
++ * Cache cmd_flags before releasing scheduler lock, because rq
++ * may disappear afterwards (for example, because of a request
++ * merge).
++ */
++ cmd_flags = rq->cmd_flags;
++#endif
+ spin_unlock_irq(&bfqd->lock);
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
++ if (!bfqq)
++ return;
++ /*
++ * bfqq still exists, because it can disappear only after
++ * either it is merged with another queue, or the process it
++ * is associated with exits. But both actions must be taken by
++ * the same process currently executing this flow of
++ * instruction.
++ *
++ * In addition, the following queue lock guarantees that
++ * bfqq_group(bfqq) exists as well.
++ */
++ spin_lock_irq(q->queue_lock);
++ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
++ if (idle_timer_disabled)
++ bfqg_stats_update_idle_time(bfqq_group(bfqq));
++ spin_unlock_irq(q->queue_lock);
++#endif
+ }
+
+ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index e4a2553a2d2c..616c0692335a 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -949,9 +949,11 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
+ st->vtime += bfq_delta(served, st->wsum);
+ bfq_forget_idle(st);
+ }
++#ifndef BFQ_MQ
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ bfqg_stats_set_start_empty_time(bfqq_group(bfqq));
+ #endif
++#endif
+ st = bfq_entity_service_tree(&bfqq->entity);
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs, vtime %llu on %p",
+ served, ((st->vtime>>10)*1000)>>12, st);
+
+From abdfb33a3325df55ec0261fd824ca61ddac13575 Mon Sep 17 00:00:00 2001
+From: Luca Miccio <lucmiccio@gmail.com>
+Date: Wed, 8 Nov 2017 19:07:41 +0100
+Subject: [PATCH 51/51] block, bfq-sq, bfq-mq: move debug blkio stats behind
+ CONFIG_DEBUG_BLK_CGROUP
+
+BFQ (both bfq-mq and bfq-sq) currently creates, and updates, its own
+instance of the whole set of blkio statistics that cfq creates. Yet,
+from the comments of Tejun Heo in [1], it turned out that most of
+these statistics are meant/useful only for debugging. This commit
+makes BFQ create the latter, debugging statistics only if the option
+CONFIG_DEBUG_BLK_CGROUP is set.
+
+By doing so, this commit also enables BFQ to enjoy a high perfomance
+boost. The reason is that, if CONFIG_DEBUG_BLK_CGROUP is not set, then
+BFQ has to update far fewer statistics, and, in particular, not the
+heaviest to update. To give an idea of the benefits, if
+CONFIG_DEBUG_BLK_CGROUP is not set, then, on an Intel i7-4850HQ, and
+with 8 threads doing random I/O in parallel on null_blk (configured
+with 0 latency), the throughput of bfq-mq grows from 310 to 400 KIOPS
+(+30%). We have measured similar or even much higher boosts with other
+CPUs: e.g., +45% with an ARM CortexTM-A53 Octa-core. Our results have
+been obtained and can be reproduced very easily with the script in [1].
+
+[1] https://www.spinics.net/lists/linux-block/msg18943.html
+
+Reported-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Luca Miccio <lucmiccio@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ Documentation/block/bfq-iosched.txt | 59 ++++++++++---
+ block/bfq-cgroup-included.c | 163 ++++++++++++++++++++----------------
+ block/bfq-mq-iosched.c | 14 ++--
+ block/bfq-mq.h | 4 +-
+ block/bfq.h | 4 +-
+ 5 files changed, 147 insertions(+), 97 deletions(-)
+
+diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
+index c816c595082d..30ef2dba85ad 100644
+--- a/Documentation/block/bfq-iosched.txt
++++ b/Documentation/block/bfq-iosched.txt
+@@ -29,24 +29,41 @@ for that device, by setting low_latency to 0. See Section 3 for
+ details on how to configure BFQ for the desired tradeoff between
+ latency and throughput, or on how to maximize throughput.
+
+-BFQ has a non-null overhead, which limits the maximum IOPS that the
+-CPU can process for a device scheduled with BFQ. To give an idea of
+-the limits on slow or average CPUs, here are, first, the limits of
+-bfq-sq for three different CPUs, on, respectively, an average laptop,
++BFQ has a non-null overhead, which limits the maximum IOPS that a CPU
++can process for a device scheduled with BFQ. To give an idea of the
++limits on slow or average CPUs, here are, first, the limits of bfq-mq
++and bfq for three different CPUs, on, respectively, an average laptop,
+ an old desktop, and a cheap embedded system, in case full hierarchical
+-support is enabled (i.e., CONFIG_BFQ_SQ_GROUP_IOSCHED is set):
+-- Intel i7-4850HQ: 250 KIOPS
+-- AMD A8-3850: 170 KIOPS
+-- ARM CortexTM-A53 Octa-core: 45 KIOPS
+-
+-bfq-mq and bfq instances reach, instead, a higher sustainable
+-throughput. Their limits, on the same systems as above, are, with full
+-hierarchical support enabled (i.e., CONFIG_MQ_BFQ_GROUP_IOSCHED set
+-for bfq-mq, or CONFIG_BFQ_GROUP_IOSCHED set for bfq):
++support is enabled (i.e., CONFIG_MQ_BFQ_GROUP_IOSCHED is set for
++bfq-mq, or CONFIG_BFQ_GROUP_IOSCHED is set for bfq), but
++CONFIG_DEBUG_BLK_CGROUP is not set (Section 4-2):
++- Intel i7-4850HQ: 400 KIOPS
++- AMD A8-3850: 250 KIOPS
++- ARM CortexTM-A53 Octa-core: 80 KIOPS
++
++As for bfq-sq, it cannot reach the above IOPS, because of the
++inherent, lower parallelism of legacy blk and of the components within
++it (including bfq-sq itself). In particular, results with
++CONFIG_DEBUG_BLK_CGROUP unset are rather fluctuating. The limits
++reported below for the case CONFIG_DEBUG_BLK_CGROUP set will however
++provide a lower bound to the limits of bfq-sq.
++
++Turning back to bfq-mq and bfq, If CONFIG_DEBUG_BLK_CGROUP is set (and
++of course full hierarchical support is enabled), then the sustainable
++throughput with bfq-mq and bfq decreases, because all blkio.bfq*
++statistics are created and updated (Section 4-2). For bfq-mq and bfq,
++this leads to the following maximum sustainable throughputs, on the
++same systems as above:
+ - Intel i7-4850HQ: 310 KIOPS
+ - AMD A8-3850: 200 KIOPS
+ - ARM CortexTM-A53 Octa-core: 56 KIOPS
+
++Finally, if CONFIG_DEBUG_BLK_CGROUP is set (and full hierarchical
++support is enabled), then bfq-sq exhibits the following limits:
++- Intel i7-4850HQ: 250 KIOPS
++- AMD A8-3850: 170 KIOPS
++- ARM CortexTM-A53 Octa-core: 45 KIOPS
++
+ BFQ works for multi-queue devices too (bfq and bfq-mq instances).
+
+ The table of contents follows. Impatients can just jump to Section 3.
+@@ -524,6 +541,22 @@ BFQ-specific files is "blkio.bfqX." or "io.bfqX.", where X can be ""
+ to set the weight of a group with the mainline BFQ is blkio.bfq.weight
+ or io.bfq.weight.
+
++As for cgroups-v1 (blkio controller), the exact set of stat files
++created, and kept up-to-date by bfq*, depends on whether
++CONFIG_DEBUG_BLK_CGROUP is set. If it is set, then bfq* creates all
++the stat files documented in
++Documentation/cgroup-v1/blkio-controller.txt. If, instead,
++CONFIG_DEBUG_BLK_CGROUP is not set, then bfq* creates only the files
++blkio.bfq*.io_service_bytes
++blkio.bfq*.io_service_bytes_recursive
++blkio.bfq*.io_serviced
++blkio.bfq*.io_serviced_recursive
++
++The value of CONFIG_DEBUG_BLK_CGROUP greatly influences the maximum
++throughput sustainable with bfq*, because updating the blkio.bfq*
++stats is rather costly, especially for some of the stats enabled by
++CONFIG_DEBUG_BLK_CGROUP.
++
+ Parameters to set
+ -----------------
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 631e53d9150d..562b0ce581a7 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -15,7 +15,7 @@
+ * file.
+ */
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+
+ /* bfqg stats flags */
+ enum bfqg_stats_flags {
+@@ -155,6 +155,63 @@ static void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
+ bfqg_stats_update_group_wait_time(stats);
+ }
+
++static void bfqg_stats_update_io_add(struct bfq_group *bfqg,
++ struct bfq_queue *bfqq, unsigned int op)
++{
++ blkg_rwstat_add(&bfqg->stats.queued, op, 1);
++ bfqg_stats_end_empty_time(&bfqg->stats);
++ if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
++ bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
++}
++
++static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
++{
++ blkg_rwstat_add(&bfqg->stats.queued, op, -1);
++}
++
++static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
++{
++ blkg_rwstat_add(&bfqg->stats.merged, op, 1);
++}
++
++static void bfqg_stats_update_completion(struct bfq_group *bfqg,
++ uint64_t start_time, uint64_t io_start_time, unsigned int op)
++{
++ struct bfqg_stats *stats = &bfqg->stats;
++ unsigned long long now = sched_clock();
++
++ if (time_after64(now, io_start_time))
++ blkg_rwstat_add(&stats->service_time, op,
++ now - io_start_time);
++ if (time_after64(io_start_time, start_time))
++ blkg_rwstat_add(&stats->wait_time, op,
++ io_start_time - start_time);
++}
++
++#else /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */
++
++static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg,
++ struct bfq_queue *bfqq, unsigned int op) { }
++static inline void
++bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
++static inline void
++bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
++static inline void bfqg_stats_update_completion(struct bfq_group *bfqg,
++ uint64_t start_time, uint64_t io_start_time,
++ unsigned int op) { }
++static inline void
++bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
++ struct bfq_group *curr_bfqg) { }
++static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { }
++static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
++static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
++
++#endif /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */
++
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ static struct blkcg_policy blkcg_policy_bfq;
+
+ /*
+@@ -247,44 +304,10 @@ static void bfqg_and_blkg_put(struct bfq_group *bfqg)
+ }
+ #endif
+
+-static void bfqg_stats_update_io_add(struct bfq_group *bfqg,
+- struct bfq_queue *bfqq,
+- unsigned int op)
+-{
+- blkg_rwstat_add(&bfqg->stats.queued, op, 1);
+- bfqg_stats_end_empty_time(&bfqg->stats);
+- if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
+- bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
+-}
+-
+-static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
+-{
+- blkg_rwstat_add(&bfqg->stats.queued, op, -1);
+-}
+-
+-static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
+-{
+- blkg_rwstat_add(&bfqg->stats.merged, op, 1);
+-}
+-
+-static void bfqg_stats_update_completion(struct bfq_group *bfqg,
+- uint64_t start_time, uint64_t io_start_time,
+- unsigned int op)
+-{
+- struct bfqg_stats *stats = &bfqg->stats;
+- unsigned long long now = sched_clock();
+-
+- if (time_after64(now, io_start_time))
+- blkg_rwstat_add(&stats->service_time, op,
+- now - io_start_time);
+- if (time_after64(io_start_time, start_time))
+- blkg_rwstat_add(&stats->wait_time, op,
+- io_start_time - start_time);
+-}
+-
+ /* @stats = 0 */
+ static void bfqg_stats_reset(struct bfqg_stats *stats)
+ {
++#ifdef CONFIG_DEBUG_BLK_CGROUP
+ /* queued stats shouldn't be cleared */
+ blkg_rwstat_reset(&stats->merged);
+ blkg_rwstat_reset(&stats->service_time);
+@@ -296,6 +319,7 @@ static void bfqg_stats_reset(struct bfqg_stats *stats)
+ blkg_stat_reset(&stats->group_wait_time);
+ blkg_stat_reset(&stats->idle_time);
+ blkg_stat_reset(&stats->empty_time);
++#endif
+ }
+
+ /* @to += @from */
+@@ -304,6 +328,7 @@ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
+ if (!to || !from)
+ return;
+
++#ifdef CONFIG_DEBUG_BLK_CGROUP
+ /* queued stats shouldn't be cleared */
+ blkg_rwstat_add_aux(&to->merged, &from->merged);
+ blkg_rwstat_add_aux(&to->service_time, &from->service_time);
+@@ -316,6 +341,7 @@ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
+ blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
+ blkg_stat_add_aux(&to->idle_time, &from->idle_time);
+ blkg_stat_add_aux(&to->empty_time, &from->empty_time);
++#endif
+ }
+
+ /*
+@@ -367,6 +393,7 @@ static void bfq_init_entity(struct bfq_entity *entity,
+
+ static void bfqg_stats_exit(struct bfqg_stats *stats)
+ {
++#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_rwstat_exit(&stats->merged);
+ blkg_rwstat_exit(&stats->service_time);
+ blkg_rwstat_exit(&stats->wait_time);
+@@ -378,10 +405,12 @@ static void bfqg_stats_exit(struct bfqg_stats *stats)
+ blkg_stat_exit(&stats->group_wait_time);
+ blkg_stat_exit(&stats->idle_time);
+ blkg_stat_exit(&stats->empty_time);
++#endif
+ }
+
+ static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
+ {
++#ifdef CONFIG_DEBUG_BLK_CGROUP
+ if (blkg_rwstat_init(&stats->merged, gfp) ||
+ blkg_rwstat_init(&stats->service_time, gfp) ||
+ blkg_rwstat_init(&stats->wait_time, gfp) ||
+@@ -396,6 +425,7 @@ static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
+ bfqg_stats_exit(stats);
+ return -ENOMEM;
+ }
++#endif
+
+ return 0;
+ }
+@@ -1003,6 +1033,7 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
+ return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
+ }
+
++#ifdef CONFIG_DEBUG_BLK_CGROUP
+ static int bfqg_print_stat(struct seq_file *sf, void *v)
+ {
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
+@@ -1108,6 +1139,7 @@ static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
+ 0, false);
+ return 0;
+ }
++#endif /* CONFIG_DEBUG_BLK_CGROUP */
+
+ static struct bfq_group *
+ bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
+@@ -1137,15 +1169,6 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+
+ /* statistics, covers only the tasks in the bfqg */
+ {
+- .name = BFQ_CGROUP_FNAME(time),
+- .private = offsetof(struct bfq_group, stats.time),
+- .seq_show = bfqg_print_stat,
+- },
+- {
+- .name = BFQ_CGROUP_FNAME(sectors),
+- .seq_show = bfqg_print_stat_sectors,
+- },
+- {
+ .name = BFQ_CGROUP_FNAME(io_service_bytes),
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_bytes,
+@@ -1155,6 +1178,16 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_ios,
+ },
++#ifdef CONFIG_DEBUG_BLK_CGROUP
++ {
++ .name = BFQ_CGROUP_FNAME(time),
++ .private = offsetof(struct bfq_group, stats.time),
++ .seq_show = bfqg_print_stat,
++ },
++ {
++ .name = BFQ_CGROUP_FNAME(sectors),
++ .seq_show = bfqg_print_stat_sectors,
++ },
+ {
+ .name = BFQ_CGROUP_FNAME(io_service_time),
+ .private = offsetof(struct bfq_group, stats.service_time),
+@@ -1175,18 +1208,10 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+ .private = offsetof(struct bfq_group, stats.queued),
+ .seq_show = bfqg_print_rwstat,
+ },
++#endif /* CONFIG_DEBUG_BLK_CGROUP */
+
+ /* the same statictics which cover the bfqg and its descendants */
+ {
+- .name = BFQ_CGROUP_FNAME(time_recursive),
+- .private = offsetof(struct bfq_group, stats.time),
+- .seq_show = bfqg_print_stat_recursive,
+- },
+- {
+- .name = BFQ_CGROUP_FNAME(sectors_recursive),
+- .seq_show = bfqg_print_stat_sectors_recursive,
+- },
+- {
+ .name = BFQ_CGROUP_FNAME(io_service_bytes_recursive),
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_bytes_recursive,
+@@ -1196,6 +1221,16 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+ .private = (unsigned long)&blkcg_policy_bfq,
+ .seq_show = blkg_print_stat_ios_recursive,
+ },
++#ifdef CONFIG_DEBUG_BLK_CGROUP
++ {
++ .name = BFQ_CGROUP_FNAME(time_recursive),
++ .private = offsetof(struct bfq_group, stats.time),
++ .seq_show = bfqg_print_stat_recursive,
++ },
++ {
++ .name = BFQ_CGROUP_FNAME(sectors_recursive),
++ .seq_show = bfqg_print_stat_sectors_recursive,
++ },
+ {
+ .name = BFQ_CGROUP_FNAME(io_service_time_recursive),
+ .private = offsetof(struct bfq_group, stats.service_time),
+@@ -1240,6 +1275,7 @@ static struct cftype bfq_blkcg_legacy_files[] = {
+ .private = offsetof(struct bfq_group, stats.dequeue),
+ .seq_show = bfqg_print_stat,
+ },
++#endif /* CONFIG_DEBUG_BLK_CGROUP */
+ { } /* terminate */
+ };
+
+@@ -1257,25 +1293,6 @@ static struct cftype bfq_blkg_files[] = {
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+-static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg,
+- struct bfq_queue *bfqq, unsigned int op) { }
+-static inline void
+-bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
+-static inline void
+-bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
+-static inline void bfqg_stats_update_completion(struct bfq_group *bfqg,
+- uint64_t start_time, uint64_t io_start_time,
+- unsigned int op) { }
+-static inline void
+-bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
+- struct bfq_group *curr_bfqg) { }
+-static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { }
+-static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
+-static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
+-static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
+-static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
+-static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
+-
+ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ struct bfq_group *bfqg) {}
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index ab3b83d612c2..0c09609a6099 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4070,14 +4070,14 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ {
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+ struct request *rq;
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ struct bfq_queue *in_serv_queue, *bfqq;
+ bool waiting_rq, idle_timer_disabled;
+ #endif
+
+ spin_lock_irq(&bfqd->lock);
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ in_serv_queue = bfqd->in_service_queue;
+ waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
+
+@@ -4091,7 +4091,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ #endif
+ spin_unlock_irq(&bfqd->lock);
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ bfqq = rq ? RQ_BFQQ(rq) : NULL;
+ if (!idle_timer_disabled && !bfqq)
+ return rq;
+@@ -4691,7 +4691,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ struct request_queue *q = hctx->queue;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ bool idle_timer_disabled = false;
+ unsigned int cmd_flags;
+ #endif
+@@ -4726,7 +4726,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ BUG_ON(!(rq->rq_flags & RQF_GOT));
+ rq->rq_flags &= ~RQF_GOT;
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ idle_timer_disabled = __bfq_insert_request(bfqd, rq);
+ /*
+ * Update bfqq, because, if a queue merge has occurred
+@@ -4744,7 +4744,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ q->last_merge = rq;
+ }
+ }
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ /*
+ * Cache cmd_flags before releasing scheduler lock, because rq
+ * may disappear afterwards (for example, because of a request
+@@ -4753,7 +4753,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ cmd_flags = rq->cmd_flags;
+ #endif
+ spin_unlock_irq(&bfqd->lock);
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ if (!bfqq)
+ return;
+ /*
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 7ed2cc29be57..1cb05bb853d2 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -784,7 +784,7 @@ enum bfqq_expiration {
+
+
+ struct bfqg_stats {
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ /* number of ios merged */
+ struct blkg_rwstat merged;
+ /* total time spent on device in ns, may not be accurate w/ queueing */
+@@ -812,7 +812,7 @@ struct bfqg_stats {
+ uint64_t start_idle_time;
+ uint64_t start_empty_time;
+ uint16_t flags;
+-#endif
++#endif /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */
+ };
+
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+diff --git a/block/bfq.h b/block/bfq.h
+index 15d326f466b7..47cd4d5a8c32 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -791,7 +791,7 @@ enum bfqq_expiration {
+
+
+ struct bfqg_stats {
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ /* number of ios merged */
+ struct blkg_rwstat merged;
+ /* total time spent on device in ns, may not be accurate w/ queueing */
+@@ -819,7 +819,7 @@ struct bfqg_stats {
+ uint64_t start_idle_time;
+ uint64_t start_empty_time;
+ uint16_t flags;
+-#endif
++#endif /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */
+ };
+
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch b/sys-kernel/linux-sources-redcore-lts/files/0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch
new file mode 100644
index 00000000..8f2c8783
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch
@@ -0,0 +1,9571 @@
+diff -Nur a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
+--- a/arch/powerpc/platforms/cell/spufs/sched.c 2018-10-10 07:54:28.000000000 +0100
++++ b/arch/powerpc/platforms/cell/spufs/sched.c 2018-11-03 16:06:32.704528679 +0000
+@@ -65,11 +65,6 @@
+ static struct timer_list spuloadavg_timer;
+
+ /*
+- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
+- */
+-#define NORMAL_PRIO 120
+-
+-/*
+ * Frequency of the spu scheduler tick. By default we do one SPU scheduler
+ * tick for every 10 CPU scheduler ticks.
+ */
+diff -Nur a/arch/x86/Kconfig b/arch/x86/Kconfig
+--- a/arch/x86/Kconfig 2018-11-03 16:00:51.897619785 +0000
++++ b/arch/x86/Kconfig 2018-11-03 16:06:32.705528711 +0000
+@@ -963,10 +963,26 @@
+ depends on SMP
+ ---help---
+ SMT scheduler support improves the CPU scheduler's decision making
+- when dealing with Intel Pentium 4 chips with HyperThreading at a
++ when dealing with Intel P4/Core 2 chips with HyperThreading at a
+ cost of slightly increased overhead in some places. If unsure say
+ N here.
+
++config SMT_NICE
++ bool "SMT (Hyperthreading) aware nice priority and policy support"
++ depends on SCHED_MUQSS && SCHED_SMT
++ default y
++ ---help---
++ Enabling Hyperthreading on Intel CPUs decreases the effectiveness
++ of the use of 'nice' levels and different scheduling policies
++ (e.g. realtime) due to sharing of CPU power between hyperthreads.
++ SMT nice support makes each logical CPU aware of what is running on
++ its hyperthread siblings, maintaining appropriate distribution of
++ CPU according to nice levels and scheduling policies at the expense
++ of slightly increased overhead.
++
++ If unsure say Y here.
++
++
+ config SCHED_MC
+ def_bool y
+ prompt "Multi-core scheduler support"
+diff -Nur a/Documentation/scheduler/sched-BFS.txt b/Documentation/scheduler/sched-BFS.txt
+--- a/Documentation/scheduler/sched-BFS.txt 1970-01-01 01:00:00.000000000 +0100
++++ b/Documentation/scheduler/sched-BFS.txt 2018-11-03 16:06:32.702528615 +0000
+@@ -0,0 +1,351 @@
++BFS - The Brain Fuck Scheduler by Con Kolivas.
++
++Goals.
++
++The goal of the Brain Fuck Scheduler, referred to as BFS from here on, is to
++completely do away with the complex designs of the past for the cpu process
++scheduler and instead implement one that is very simple in basic design.
++The main focus of BFS is to achieve excellent desktop interactivity and
++responsiveness without heuristics and tuning knobs that are difficult to
++understand, impossible to model and predict the effect of, and when tuned to
++one workload cause massive detriment to another.
++
++
++Design summary.
++
++BFS is best described as a single runqueue, O(n) lookup, earliest effective
++virtual deadline first design, loosely based on EEVDF (earliest eligible virtual
++deadline first) and my previous Staircase Deadline scheduler. Each component
++shall be described in order to understand the significance of, and reasoning for
++it. The codebase when the first stable version was released was approximately
++9000 lines less code than the existing mainline linux kernel scheduler (in
++2.6.31). This does not even take into account the removal of documentation and
++the cgroups code that is not used.
++
++Design reasoning.
++
++The single runqueue refers to the queued but not running processes for the
++entire system, regardless of the number of CPUs. The reason for going back to
++a single runqueue design is that once multiple runqueues are introduced,
++per-CPU or otherwise, there will be complex interactions as each runqueue will
++be responsible for the scheduling latency and fairness of the tasks only on its
++own runqueue, and to achieve fairness and low latency across multiple CPUs, any
++advantage in throughput of having CPU local tasks causes other disadvantages.
++This is due to requiring a very complex balancing system to at best achieve some
++semblance of fairness across CPUs and can only maintain relatively low latency
++for tasks bound to the same CPUs, not across them. To increase said fairness
++and latency across CPUs, the advantage of local runqueue locking, which makes
++for better scalability, is lost due to having to grab multiple locks.
++
++A significant feature of BFS is that all accounting is done purely based on CPU
++used and nowhere is sleep time used in any way to determine entitlement or
++interactivity. Interactivity "estimators" that use some kind of sleep/run
++algorithm are doomed to fail to detect all interactive tasks, and to falsely tag
++tasks that aren't interactive as being so. The reason for this is that it is
++close to impossible to determine that when a task is sleeping, whether it is
++doing it voluntarily, as in a userspace application waiting for input in the
++form of a mouse click or otherwise, or involuntarily, because it is waiting for
++another thread, process, I/O, kernel activity or whatever. Thus, such an
++estimator will introduce corner cases, and more heuristics will be required to
++cope with those corner cases, introducing more corner cases and failed
++interactivity detection and so on. Interactivity in BFS is built into the design
++by virtue of the fact that tasks that are waking up have not used up their quota
++of CPU time, and have earlier effective deadlines, thereby making it very likely
++they will preempt any CPU bound task of equivalent nice level. See below for
++more information on the virtual deadline mechanism. Even if they do not preempt
++a running task, because the rr interval is guaranteed to have a bound upper
++limit on how long a task will wait for, it will be scheduled within a timeframe
++that will not cause visible interface jitter.
++
++
++Design details.
++
++Task insertion.
++
++BFS inserts tasks into each relevant queue as an O(1) insertion into a double
++linked list. On insertion, *every* running queue is checked to see if the newly
++queued task can run on any idle queue, or preempt the lowest running task on the
++system. This is how the cross-CPU scheduling of BFS achieves significantly lower
++latency per extra CPU the system has. In this case the lookup is, in the worst
++case scenario, O(n) where n is the number of CPUs on the system.
++
++Data protection.
++
++BFS has one single lock protecting the process local data of every task in the
++global queue. Thus every insertion, removal and modification of task data in the
++global runqueue needs to grab the global lock. However, once a task is taken by
++a CPU, the CPU has its own local data copy of the running process' accounting
++information which only that CPU accesses and modifies (such as during a
++timer tick) thus allowing the accounting data to be updated lockless. Once a
++CPU has taken a task to run, it removes it from the global queue. Thus the
++global queue only ever has, at most,
++
++ (number of tasks requesting cpu time) - (number of logical CPUs) + 1
++
++tasks in the global queue. This value is relevant for the time taken to look up
++tasks during scheduling. This will increase if many tasks with CPU affinity set
++in their policy to limit which CPUs they're allowed to run on if they outnumber
++the number of CPUs. The +1 is because when rescheduling a task, the CPU's
++currently running task is put back on the queue. Lookup will be described after
++the virtual deadline mechanism is explained.
++
++Virtual deadline.
++
++The key to achieving low latency, scheduling fairness, and "nice level"
++distribution in BFS is entirely in the virtual deadline mechanism. The one
++tunable in BFS is the rr_interval, or "round robin interval". This is the
++maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
++tasks of the same nice level will be running for, or looking at it the other
++way around, the longest duration two tasks of the same nice level will be
++delayed for. When a task requests cpu time, it is given a quota (time_slice)
++equal to the rr_interval and a virtual deadline. The virtual deadline is
++offset from the current time in jiffies by this equation:
++
++ jiffies + (prio_ratio * rr_interval)
++
++The prio_ratio is determined as a ratio compared to the baseline of nice -20
++and increases by 10% per nice level. The deadline is a virtual one only in that
++no guarantee is placed that a task will actually be scheduled by this time, but
++it is used to compare which task should go next. There are three components to
++how a task is next chosen. First is time_slice expiration. If a task runs out
++of its time_slice, it is descheduled, the time_slice is refilled, and the
++deadline reset to that formula above. Second is sleep, where a task no longer
++is requesting CPU for whatever reason. The time_slice and deadline are _not_
++adjusted in this case and are just carried over for when the task is next
++scheduled. Third is preemption, and that is when a newly waking task is deemed
++higher priority than a currently running task on any cpu by virtue of the fact
++that it has an earlier virtual deadline than the currently running task. The
++earlier deadline is the key to which task is next chosen for the first and
++second cases. Once a task is descheduled, it is put back on the queue, and an
++O(n) lookup of all queued-but-not-running tasks is done to determine which has
++the earliest deadline and that task is chosen to receive CPU next.
++
++The CPU proportion of different nice tasks works out to be approximately the
++
++ (prio_ratio difference)^2
++
++The reason it is squared is that a task's deadline does not change while it is
++running unless it runs out of time_slice. Thus, even if the time actually
++passes the deadline of another task that is queued, it will not get CPU time
++unless the current running task deschedules, and the time "base" (jiffies) is
++constantly moving.
++
++Task lookup.
++
++BFS has 103 priority queues. 100 of these are dedicated to the static priority
++of realtime tasks, and the remaining 3 are, in order of best to worst priority,
++SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority
++scheduling). When a task of these priorities is queued, a bitmap of running
++priorities is set showing which of these priorities has tasks waiting for CPU
++time. When a CPU is made to reschedule, the lookup for the next task to get
++CPU time is performed in the following way:
++
++First the bitmap is checked to see what static priority tasks are queued. If
++any realtime priorities are found, the corresponding queue is checked and the
++first task listed there is taken (provided CPU affinity is suitable) and lookup
++is complete. If the priority corresponds to a SCHED_ISO task, they are also
++taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds
++to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this
++stage, every task in the runlist that corresponds to that priority is checked
++to see which has the earliest set deadline, and (provided it has suitable CPU
++affinity) it is taken off the runqueue and given the CPU. If a task has an
++expired deadline, it is taken and the rest of the lookup aborted (as they are
++chosen in FIFO order).
++
++Thus, the lookup is O(n) in the worst case only, where n is as described
++earlier, as tasks may be chosen before the whole task list is looked over.
++
++
++Scalability.
++
++The major limitations of BFS will be that of scalability, as the separate
++runqueue designs will have less lock contention as the number of CPUs rises.
++However they do not scale linearly even with separate runqueues as multiple
++runqueues will need to be locked concurrently on such designs to be able to
++achieve fair CPU balancing, to try and achieve some sort of nice-level fairness
++across CPUs, and to achieve low enough latency for tasks on a busy CPU when
++other CPUs would be more suited. BFS has the advantage that it requires no
++balancing algorithm whatsoever, as balancing occurs by proxy simply because
++all CPUs draw off the global runqueue, in priority and deadline order. Despite
++the fact that scalability is _not_ the prime concern of BFS, it both shows very
++good scalability to smaller numbers of CPUs and is likely a more scalable design
++at these numbers of CPUs.
++
++It also has some very low overhead scalability features built into the design
++when it has been deemed their overhead is so marginal that they're worth adding.
++The first is the local copy of the running process' data to the CPU it's running
++on to allow that data to be updated lockless where possible. Then there is
++deference paid to the last CPU a task was running on, by trying that CPU first
++when looking for an idle CPU to use the next time it's scheduled. Finally there
++is the notion of cache locality beyond the last running CPU. The sched_domains
++information is used to determine the relative virtual "cache distance" that
++other CPUs have from the last CPU a task was running on. CPUs with shared
++caches, such as SMT siblings, or multicore CPUs with shared caches, are treated
++as cache local. CPUs without shared caches are treated as not cache local, and
++CPUs on different NUMA nodes are treated as very distant. This "relative cache
++distance" is used by modifying the virtual deadline value when doing lookups.
++Effectively, the deadline is unaltered between "cache local" CPUs, doubled for
++"cache distant" CPUs, and quadrupled for "very distant" CPUs. The reasoning
++behind the doubling of deadlines is as follows. The real cost of migrating a
++task from one CPU to another is entirely dependant on the cache footprint of
++the task, how cache intensive the task is, how long it's been running on that
++CPU to take up the bulk of its cache, how big the CPU cache is, how fast and
++how layered the CPU cache is, how fast a context switch is... and so on. In
++other words, it's close to random in the real world where we do more than just
++one sole workload. The only thing we can be sure of is that it's not free. So
++BFS uses the principle that an idle CPU is a wasted CPU and utilising idle CPUs
++is more important than cache locality, and cache locality only plays a part
++after that. Doubling the effective deadline is based on the premise that the
++"cache local" CPUs will tend to work on the same tasks up to double the number
++of cache local CPUs, and once the workload is beyond that amount, it is likely
++that none of the tasks are cache warm anywhere anyway. The quadrupling for NUMA
++is a value I pulled out of my arse.
++
++When choosing an idle CPU for a waking task, the cache locality is determined
++according to where the task last ran and then idle CPUs are ranked from best
++to worst to choose the most suitable idle CPU based on cache locality, NUMA
++node locality and hyperthread sibling business. They are chosen in the
++following preference (if idle):
++
++* Same core, idle or busy cache, idle threads
++* Other core, same cache, idle or busy cache, idle threads.
++* Same node, other CPU, idle cache, idle threads.
++* Same node, other CPU, busy cache, idle threads.
++* Same core, busy threads.
++* Other core, same cache, busy threads.
++* Same node, other CPU, busy threads.
++* Other node, other CPU, idle cache, idle threads.
++* Other node, other CPU, busy cache, idle threads.
++* Other node, other CPU, busy threads.
++
++This shows the SMT or "hyperthread" awareness in the design as well which will
++choose a real idle core first before a logical SMT sibling which already has
++tasks on the physical CPU.
++
++Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark.
++However this benchmarking was performed on an earlier design that was far less
++scalable than the current one so it's hard to know how scalable it is in terms
++of both CPUs (due to the global runqueue) and heavily loaded machines (due to
++O(n) lookup) at this stage. Note that in terms of scalability, the number of
++_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x)
++quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark
++results are very promising indeed, without needing to tweak any knobs, features
++or options. Benchmark contributions are most welcome.
++
++
++Features
++
++As the initial prime target audience for BFS was the average desktop user, it
++was designed to not need tweaking, tuning or have features set to obtain benefit
++from it. Thus the number of knobs and features has been kept to an absolute
++minimum and should not require extra user input for the vast majority of cases.
++There are precisely 2 tunables, and 2 extra scheduling policies. The rr_interval
++and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition
++to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is
++support for CGROUPS. The average user should neither need to know what these
++are, nor should they need to be using them to have good desktop behaviour.
++
++rr_interval
++
++There is only one "scheduler" tunable, the round robin interval. This can be
++accessed in
++
++ /proc/sys/kernel/rr_interval
++
++The value is in milliseconds, and the default value is set to 6 on a
++uniprocessor machine, and automatically set to a progressively higher value on
++multiprocessor machines. The reasoning behind increasing the value on more CPUs
++is that the effective latency is decreased by virtue of there being more CPUs on
++BFS (for reasons explained above), and increasing the value allows for less
++cache contention and more throughput. Valid values are from 1 to 1000
++Decreasing the value will decrease latencies at the cost of decreasing
++throughput, while increasing it will improve throughput, but at the cost of
++worsening latencies. The accuracy of the rr interval is limited by HZ resolution
++of the kernel configuration. Thus, the worst case latencies are usually slightly
++higher than this actual value. The default value of 6 is not an arbitrary one.
++It is based on the fact that humans can detect jitter at approximately 7ms, so
++aiming for much lower latencies is pointless under most circumstances. It is
++worth noting this fact when comparing the latency performance of BFS to other
++schedulers. Worst case latencies being higher than 7ms are far worse than
++average latencies not being in the microsecond range.
++
++Isochronous scheduling.
++
++Isochronous scheduling is a unique scheduling policy designed to provide
++near-real-time performance to unprivileged (ie non-root) users without the
++ability to starve the machine indefinitely. Isochronous tasks (which means
++"same time") are set using, for example, the schedtool application like so:
++
++ schedtool -I -e amarok
++
++This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
++is that it has a priority level between true realtime tasks and SCHED_NORMAL
++which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
++if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
++rate). However if ISO tasks run for more than a tunable finite amount of time,
++they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
++time is the percentage of _total CPU_ available across the machine, configurable
++as a percentage in the following "resource handling" tunable (as opposed to a
++scheduler tunable):
++
++ /proc/sys/kernel/iso_cpu
++
++and is set to 70% by default. It is calculated over a rolling 5 second average
++Because it is the total CPU available, it means that on a multi CPU machine, it
++is possible to have an ISO task running as realtime scheduling indefinitely on
++just one CPU, as the other CPUs will be available. Setting this to 100 is the
++equivalent of giving all users SCHED_RR access and setting it to 0 removes the
++ability to run any pseudo-realtime tasks.
++
++A feature of BFS is that it detects when an application tries to obtain a
++realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
++appropriate privileges to use those policies. When it detects this, it will
++give the task SCHED_ISO policy instead. Thus it is transparent to the user.
++Because some applications constantly set their policy as well as their nice
++level, there is potential for them to undo the override specified by the user
++on the command line of setting the policy to SCHED_ISO. To counter this, once
++a task has been set to SCHED_ISO policy, it needs superuser privileges to set
++it back to SCHED_NORMAL. This will ensure the task remains ISO and all child
++processes and threads will also inherit the ISO policy.
++
++Idleprio scheduling.
++
++Idleprio scheduling is a scheduling policy designed to give out CPU to a task
++_only_ when the CPU would be otherwise idle. The idea behind this is to allow
++ultra low priority tasks to be run in the background that have virtually no
++effect on the foreground tasks. This is ideally suited to distributed computing
++clients (like setiathome, folding, mprime etc) but can also be used to start
++a video encode or so on without any slowdown of other tasks. To avoid this
++policy from grabbing shared resources and holding them indefinitely, if it
++detects a state where the task is waiting on I/O, the machine is about to
++suspend to ram and so on, it will transiently schedule them as SCHED_NORMAL. As
++per the Isochronous task management, once a task has been scheduled as IDLEPRIO,
++it cannot be put back to SCHED_NORMAL without superuser privileges. Tasks can
++be set to start as SCHED_IDLEPRIO with the schedtool command like so:
++
++ schedtool -D -e ./mprime
++
++Subtick accounting.
++
++It is surprisingly difficult to get accurate CPU accounting, and in many cases,
++the accounting is done by simply determining what is happening at the precise
++moment a timer tick fires off. This becomes increasingly inaccurate as the
++timer tick frequency (HZ) is lowered. It is possible to create an application
++which uses almost 100% CPU, yet by being descheduled at the right time, records
++zero CPU usage. While the main problem with this is that there are possible
++security implications, it is also difficult to determine how much CPU a task
++really does use. BFS tries to use the sub-tick accounting from the TSC clock,
++where possible, to determine real CPU usage. This is not entirely reliable, but
++is far more likely to produce accurate CPU usage data than the existing designs
++and will not show tasks as consuming no CPU usage when they actually are. Thus,
++the amount of CPU reported as being used by BFS will more accurately represent
++how much CPU the task itself is using (as is shown for example by the 'time'
++application), so the reported values may be quite different to other schedulers.
++Values reported as the 'load' are more prone to problems with this design, but
++per process values are closer to real usage. When comparing throughput of BFS
++to other designs, it is important to compare the actual completed work in terms
++of total wall clock time taken and total work done, rather than the reported
++"cpu usage".
++
++
++Con Kolivas <kernel@kolivas.org> Fri Aug 27 2010
+diff -Nur a/Documentation/scheduler/sched-MuQSS.txt b/Documentation/scheduler/sched-MuQSS.txt
+--- a/Documentation/scheduler/sched-MuQSS.txt 1970-01-01 01:00:00.000000000 +0100
++++ b/Documentation/scheduler/sched-MuQSS.txt 2018-11-03 16:06:32.702528615 +0000
+@@ -0,0 +1,347 @@
++MuQSS - The Multiple Queue Skiplist Scheduler by Con Kolivas.
++
++MuQSS is a per-cpu runqueue variant of the original BFS scheduler with
++one 8 level skiplist per runqueue, and fine grained locking for much more
++scalability.
++
++
++Goals.
++
++The goal of the Multiple Queue Skiplist Scheduler, referred to as MuQSS from
++here on (pronounced mux) is to completely do away with the complex designs of
++the past for the cpu process scheduler and instead implement one that is very
++simple in basic design. The main focus of MuQSS is to achieve excellent desktop
++interactivity and responsiveness without heuristics and tuning knobs that are
++difficult to understand, impossible to model and predict the effect of, and when
++tuned to one workload cause massive detriment to another, while still being
++scalable to many CPUs and processes.
++
++
++Design summary.
++
++MuQSS is best described as per-cpu multiple runqueue, O(log n) insertion, O(1)
++lookup, earliest effective virtual deadline first tickless design, loosely based
++on EEVDF (earliest eligible virtual deadline first) and my previous Staircase
++Deadline scheduler, and evolved from the single runqueue O(n) BFS scheduler.
++Each component shall be described in order to understand the significance of,
++and reasoning for it.
++
++
++Design reasoning.
++
++In BFS, the use of a single runqueue across all CPUs meant that each CPU would
++need to scan the entire runqueue looking for the process with the earliest
++deadline and schedule that next, regardless of which CPU it originally came
++from. This made BFS deterministic with respect to latency and provided
++guaranteed latencies dependent on number of processes and CPUs. The single
++runqueue, however, meant that all CPUs would compete for the single lock
++protecting it, which would lead to increasing lock contention as the number of
++CPUs rose and appeared to limit scalability of common workloads beyond 16
++logical CPUs. Additionally, the O(n) lookup of the runqueue list obviously
++increased overhead proportionate to the number of queued proecesses and led to
++cache thrashing while iterating over the linked list.
++
++MuQSS is an evolution of BFS, designed to maintain the same scheduling
++decision mechanism and be virtually deterministic without relying on the
++constrained design of the single runqueue by splitting out the single runqueue
++to be per-CPU and use skiplists instead of linked lists.
++
++The original reason for going back to a single runqueue design for BFS was that
++once multiple runqueues are introduced, per-CPU or otherwise, there will be
++complex interactions as each runqueue will be responsible for the scheduling
++latency and fairness of the tasks only on its own runqueue, and to achieve
++fairness and low latency across multiple CPUs, any advantage in throughput of
++having CPU local tasks causes other disadvantages. This is due to requiring a
++very complex balancing system to at best achieve some semblance of fairness
++across CPUs and can only maintain relatively low latency for tasks bound to the
++same CPUs, not across them. To increase said fairness and latency across CPUs,
++the advantage of local runqueue locking, which makes for better scalability, is
++lost due to having to grab multiple locks.
++
++MuQSS works around the problems inherent in multiple runqueue designs by
++making its skip lists priority ordered and through novel use of lockless
++examination of each other runqueue it can decide if it should take the earliest
++deadline task from another runqueue for latency reasons, or for CPU balancing
++reasons. It still does not have a balancing system, choosing to allow the
++next task scheduling decision and task wakeup CPU choice to allow balancing to
++happen by virtue of its choices.
++
++
++Design details.
++
++Custom skip list implementation:
++
++To avoid the overhead of building up and tearing down skip list structures,
++the variant used by MuQSS has a number of optimisations making it specific for
++its use case in the scheduler. It uses static arrays of 8 'levels' instead of
++building up and tearing down structures dynamically. This makes each runqueue
++only scale O(log N) up to 64k tasks. However as there is one runqueue per CPU
++it means that it scales O(log N) up to 64k x number of logical CPUs which is
++far beyond the realistic task limits each CPU could handle. By being 8 levels
++it also makes the array exactly one cacheline in size. Additionally, each
++skip list node is bidirectional making insertion and removal amortised O(1),
++being O(k) where k is 1-8. Uniquely, we are only ever interested in the very
++first entry in each list at all times with MuQSS, so there is never a need to
++do a search and thus look up is always O(1). In interactive mode, the queues
++will be searched beyond their first entry if the first task is not suitable
++for affinity or SMT nice reasons.
++
++Task insertion:
++
++MuQSS inserts tasks into a per CPU runqueue as an O(log N) insertion into
++a custom skip list as described above (based on the original design by William
++Pugh). Insertion is ordered in such a way that there is never a need to do a
++search by ordering tasks according to static priority primarily, and then
++virtual deadline at the time of insertion.
++
++Niffies:
++
++Niffies are a monotonic forward moving timer not unlike the "jiffies" but are
++of nanosecond resolution. Niffies are calculated per-runqueue from the high
++resolution TSC timers, and in order to maintain fairness are synchronised
++between CPUs whenever both runqueues are locked concurrently.
++
++Virtual deadline:
++
++The key to achieving low latency, scheduling fairness, and "nice level"
++distribution in MuQSS is entirely in the virtual deadline mechanism. The one
++tunable in MuQSS is the rr_interval, or "round robin interval". This is the
++maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
++tasks of the same nice level will be running for, or looking at it the other
++way around, the longest duration two tasks of the same nice level will be
++delayed for. When a task requests cpu time, it is given a quota (time_slice)
++equal to the rr_interval and a virtual deadline. The virtual deadline is
++offset from the current time in niffies by this equation:
++
++ niffies + (prio_ratio * rr_interval)
++
++The prio_ratio is determined as a ratio compared to the baseline of nice -20
++and increases by 10% per nice level. The deadline is a virtual one only in that
++no guarantee is placed that a task will actually be scheduled by this time, but
++it is used to compare which task should go next. There are three components to
++how a task is next chosen. First is time_slice expiration. If a task runs out
++of its time_slice, it is descheduled, the time_slice is refilled, and the
++deadline reset to that formula above. Second is sleep, where a task no longer
++is requesting CPU for whatever reason. The time_slice and deadline are _not_
++adjusted in this case and are just carried over for when the task is next
++scheduled. Third is preemption, and that is when a newly waking task is deemed
++higher priority than a currently running task on any cpu by virtue of the fact
++that it has an earlier virtual deadline than the currently running task. The
++earlier deadline is the key to which task is next chosen for the first and
++second cases.
++
++The CPU proportion of different nice tasks works out to be approximately the
++
++ (prio_ratio difference)^2
++
++The reason it is squared is that a task's deadline does not change while it is
++running unless it runs out of time_slice. Thus, even if the time actually
++passes the deadline of another task that is queued, it will not get CPU time
++unless the current running task deschedules, and the time "base" (niffies) is
++constantly moving.
++
++Task lookup:
++
++As tasks are already pre-ordered according to anticipated scheduling order in
++the skip lists, lookup for the next suitable task per-runqueue is always a
++matter of simply selecting the first task in the 0th level skip list entry.
++In order to maintain optimal latency and fairness across CPUs, MuQSS does a
++novel examination of every other runqueue in cache locality order, choosing the
++best task across all runqueues. This provides near-determinism of how long any
++task across the entire system may wait before receiving CPU time. The other
++runqueues are first examine lockless and then trylocked to minimise the
++potential lock contention if they are likely to have a suitable better task.
++Each other runqueue lock is only held for as long as it takes to examine the
++entry for suitability. In "interactive" mode, the default setting, MuQSS will
++look for the best deadline task across all CPUs, while in !interactive mode,
++it will only select a better deadline task from another CPU if it is more
++heavily laden than the current one.
++
++Lookup is therefore O(k) where k is number of CPUs.
++
++
++Latency.
++
++Through the use of virtual deadlines to govern the scheduling order of normal
++tasks, queue-to-activation latency per runqueue is guaranteed to be bound by
++the rr_interval tunable which is set to 6ms by default. This means that the
++longest a CPU bound task will wait for more CPU is proportional to the number
++of running tasks and in the common case of 0-2 running tasks per CPU, will be
++under the 7ms threshold for human perception of jitter. Additionally, as newly
++woken tasks will have an early deadline from their previous runtime, the very
++tasks that are usually latency sensitive will have the shortest interval for
++activation, usually preempting any existing CPU bound tasks.
++
++Tickless expiry:
++
++A feature of MuQSS is that it is not tied to the resolution of the chosen tick
++rate in Hz, instead depending entirely on the high resolution timers where
++possible for sub-millisecond accuracy on timeouts regarless of the underlying
++tick rate. This allows MuQSS to be run with the low overhead of low Hz rates
++such as 100 by default, benefiting from the improved throughput and lower
++power usage it provides. Another advantage of this approach is that in
++combination with the Full No HZ option, which disables ticks on running task
++CPUs instead of just idle CPUs, the tick can be disabled at all times
++regardless of how many tasks are running instead of being limited to just one
++running task. Note that this option is NOT recommended for regular desktop
++users.
++
++
++Scalability and balancing.
++
++Unlike traditional approaches where balancing is a combination of CPU selection
++at task wakeup and intermittent balancing based on a vast array of rules set
++according to architecture, busyness calculations and special case management,
++MuQSS indirectly balances on the fly at task wakeup and next task selection.
++During initialisation, MuQSS creates a cache coherency ordered list of CPUs for
++each logical CPU and uses this to aid task/CPU selection when CPUs are busy.
++Additionally it selects any idle CPUs, if they are available, at any time over
++busy CPUs according to the following preference:
++
++ * Same thread, idle or busy cache, idle or busy threads
++ * Other core, same cache, idle or busy cache, idle threads.
++ * Same node, other CPU, idle cache, idle threads.
++ * Same node, other CPU, busy cache, idle threads.
++ * Other core, same cache, busy threads.
++ * Same node, other CPU, busy threads.
++ * Other node, other CPU, idle cache, idle threads.
++ * Other node, other CPU, busy cache, idle threads.
++ * Other node, other CPU, busy threads.
++
++Mux is therefore SMT, MC and Numa aware without the need for extra
++intermittent balancing to maintain CPUs busy and make the most of cache
++coherency.
++
++
++Features
++
++As the initial prime target audience for MuQSS was the average desktop user, it
++was designed to not need tweaking, tuning or have features set to obtain benefit
++from it. Thus the number of knobs and features has been kept to an absolute
++minimum and should not require extra user input for the vast majority of cases.
++There are 3 optional tunables, and 2 extra scheduling policies. The rr_interval,
++interactive, and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO
++policies. In addition to this, MuQSS also uses sub-tick accounting. What MuQSS
++does _not_ now feature is support for CGROUPS. The average user should neither
++need to know what these are, nor should they need to be using them to have good
++desktop behaviour. However since some applications refuse to work without
++cgroups, one can enable them with MuQSS as a stub and the filesystem will be
++created which will allow the applications to work.
++
++rr_interval:
++
++ /proc/sys/kernel/rr_interval
++
++The value is in milliseconds, and the default value is set to 6. Valid values
++are from 1 to 1000 Decreasing the value will decrease latencies at the cost of
++decreasing throughput, while increasing it will improve throughput, but at the
++cost of worsening latencies. It is based on the fact that humans can detect
++jitter at approximately 7ms, so aiming for much lower latencies is pointless
++under most circumstances. It is worth noting this fact when comparing the
++latency performance of MuQSS to other schedulers. Worst case latencies being
++higher than 7ms are far worse than average latencies not being in the
++microsecond range.
++
++interactive:
++
++ /proc/sys/kernel/interactive
++
++The value is a simple boolean of 1 for on and 0 for off and is set to on by
++default. Disabling this will disable the near-determinism of MuQSS when
++selecting the next task by not examining all CPUs for the earliest deadline
++task, or which CPU to wake to, instead prioritising CPU balancing for improved
++throughput. Latency will still be bound by rr_interval, but on a per-CPU basis
++instead of across the whole system.
++
++Isochronous scheduling:
++
++Isochronous scheduling is a unique scheduling policy designed to provide
++near-real-time performance to unprivileged (ie non-root) users without the
++ability to starve the machine indefinitely. Isochronous tasks (which means
++"same time") are set using, for example, the schedtool application like so:
++
++ schedtool -I -e amarok
++
++This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
++is that it has a priority level between true realtime tasks and SCHED_NORMAL
++which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
++if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
++rate). However if ISO tasks run for more than a tunable finite amount of time,
++they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
++time is the percentage of CPU available per CPU, configurable as a percentage in
++the following "resource handling" tunable (as opposed to a scheduler tunable):
++
++iso_cpu:
++
++ /proc/sys/kernel/iso_cpu
++
++and is set to 70% by default. It is calculated over a rolling 5 second average
++Because it is the total CPU available, it means that on a multi CPU machine, it
++is possible to have an ISO task running as realtime scheduling indefinitely on
++just one CPU, as the other CPUs will be available. Setting this to 100 is the
++equivalent of giving all users SCHED_RR access and setting it to 0 removes the
++ability to run any pseudo-realtime tasks.
++
++A feature of MuQSS is that it detects when an application tries to obtain a
++realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
++appropriate privileges to use those policies. When it detects this, it will
++give the task SCHED_ISO policy instead. Thus it is transparent to the user.
++
++
++Idleprio scheduling:
++
++Idleprio scheduling is a scheduling policy designed to give out CPU to a task
++_only_ when the CPU would be otherwise idle. The idea behind this is to allow
++ultra low priority tasks to be run in the background that have virtually no
++effect on the foreground tasks. This is ideally suited to distributed computing
++clients (like setiathome, folding, mprime etc) but can also be used to start a
++video encode or so on without any slowdown of other tasks. To avoid this policy
++from grabbing shared resources and holding them indefinitely, if it detects a
++state where the task is waiting on I/O, the machine is about to suspend to ram
++and so on, it will transiently schedule them as SCHED_NORMAL. Once a task has
++been scheduled as IDLEPRIO, it cannot be put back to SCHED_NORMAL without
++superuser privileges since it is effectively a lower scheduling policy. Tasks
++can be set to start as SCHED_IDLEPRIO with the schedtool command like so:
++
++schedtool -D -e ./mprime
++
++Subtick accounting:
++
++It is surprisingly difficult to get accurate CPU accounting, and in many cases,
++the accounting is done by simply determining what is happening at the precise
++moment a timer tick fires off. This becomes increasingly inaccurate as the timer
++tick frequency (HZ) is lowered. It is possible to create an application which
++uses almost 100% CPU, yet by being descheduled at the right time, records zero
++CPU usage. While the main problem with this is that there are possible security
++implications, it is also difficult to determine how much CPU a task really does
++use. Mux uses sub-tick accounting from the TSC clock to determine real CPU
++usage. Thus, the amount of CPU reported as being used by MuQSS will more
++accurately represent how much CPU the task itself is using (as is shown for
++example by the 'time' application), so the reported values may be quite
++different to other schedulers. When comparing throughput of MuQSS to other
++designs, it is important to compare the actual completed work in terms of total
++wall clock time taken and total work done, rather than the reported "cpu usage".
++
++Symmetric MultiThreading (SMT) aware nice:
++
++SMT, a.k.a. hyperthreading, is a very common feature on modern CPUs. While the
++logical CPU count rises by adding thread units to each CPU core, allowing more
++than one task to be run simultaneously on the same core, the disadvantage of it
++is that the CPU power is shared between the tasks, not summating to the power
++of two CPUs. The practical upshot of this is that two tasks running on
++separate threads of the same core run significantly slower than if they had one
++core each to run on. While smart CPU selection allows each task to have a core
++to itself whenever available (as is done on MuQSS), it cannot offset the
++slowdown that occurs when the cores are all loaded and only a thread is left.
++Most of the time this is harmless as the CPU is effectively overloaded at this
++point and the extra thread is of benefit. However when running a niced task in
++the presence of an un-niced task (say nice 19 v nice 0), the nice task gets
++precisely the same amount of CPU power as the unniced one. MuQSS has an
++optional configuration feature known as SMT-NICE which selectively idles the
++secondary niced thread for a period proportional to the nice difference,
++allowing CPU distribution according to nice level to be maintained, at the
++expense of a small amount of extra overhead. If this is configured in on a
++machine without SMT threads, the overhead is minimal.
++
++
++Con Kolivas <kernel@kolivas.org> Sat, 29th October 2016
+diff -Nur a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+--- a/Documentation/sysctl/kernel.txt 2018-11-03 16:00:51.893619657 +0000
++++ b/Documentation/sysctl/kernel.txt 2018-11-03 16:06:32.703528647 +0000
+@@ -39,6 +39,7 @@
+ - hung_task_timeout_secs
+ - hung_task_warnings
+ - kexec_load_disabled
++- iso_cpu
+ - kptr_restrict
+ - l2cr [ PPC only ]
+ - modprobe ==> Documentation/debugging-modules.txt
+@@ -73,6 +74,7 @@
+ - randomize_va_space
+ - real-root-dev ==> Documentation/admin-guide/initrd.rst
+ - reboot-cmd [ SPARC only ]
++- rr_interval
+ - rtsig-max
+ - rtsig-nr
+ - seccomp/ ==> Documentation/userspace-api/seccomp_filter.rst
+@@ -95,6 +97,7 @@
+ - unknown_nmi_panic
+ - watchdog
+ - watchdog_thresh
++- yield_type
+ - version
+
+ ==============================================================
+@@ -397,6 +400,16 @@
+
+ ==============================================================
+
++iso_cpu: (MuQSS CPU scheduler only).
++
++This sets the percentage cpu that the unprivileged SCHED_ISO tasks can
++run effectively at realtime priority, averaged over a rolling five
++seconds over the -whole- system, meaning all cpus.
++
++Set to 70 (percent) by default.
++
++==============================================================
++
+ l2cr: (PPC only)
+
+ This flag controls the L2 cache of G3 processor boards. If
+@@ -823,6 +836,20 @@
+
+ ==============================================================
+
++rr_interval: (MuQSS CPU scheduler only)
++
++This is the smallest duration that any cpu process scheduling unit
++will run for. Increasing this value can increase throughput of cpu
++bound tasks substantially but at the expense of increased latencies
++overall. Conversely decreasing it will decrease average and maximum
++latencies but at the expense of throughput. This value is in
++milliseconds and the default value chosen depends on the number of
++cpus available at scheduler initialisation with a minimum of 6.
++
++Valid values are from 1-1000.
++
++==============================================================
++
+ rtsig-max & rtsig-nr:
+
+ The file rtsig-max can be used to tune the maximum number
+@@ -1081,3 +1108,13 @@
+ tunable to zero will disable lockup detection altogether.
+
+ ==============================================================
++
++yield_type: (MuQSS CPU scheduler only)
++
++This determines what type of yield calls to sched_yield will perform.
++
++ 0: No yield.
++ 1: Yield only to better priority/deadline tasks. (default)
++ 2: Expire timeslice and recalculate deadline.
++
++==============================================================
+diff -Nur a/fs/proc/base.c b/fs/proc/base.c
+--- a/fs/proc/base.c 2018-10-10 07:54:28.000000000 +0100
++++ b/fs/proc/base.c 2018-11-03 16:06:32.706528743 +0000
+@@ -481,7 +481,7 @@
+ seq_printf(m, "0 0 0\n");
+ else
+ seq_printf(m, "%llu %llu %lu\n",
+- (unsigned long long)task->se.sum_exec_runtime,
++ (unsigned long long)tsk_seruntime(task),
+ (unsigned long long)task->sched_info.run_delay,
+ task->sched_info.pcount);
+
+diff -Nur a/include/linux/init_task.h b/include/linux/init_task.h
+--- a/include/linux/init_task.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/linux/init_task.h 2018-11-03 16:06:32.706528743 +0000
+@@ -172,8 +172,6 @@
+ # define INIT_VTIME(tsk)
+ #endif
+
+-#define INIT_TASK_COMM "swapper"
+-
+ #ifdef CONFIG_RT_MUTEXES
+ # define INIT_RT_MUTEXES(tsk) \
+ .pi_waiters = RB_ROOT_CACHED, \
+@@ -223,6 +221,80 @@
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+ */
++#ifdef CONFIG_SCHED_MUQSS
++#define INIT_TASK_COMM "MuQSS"
++#define INIT_TASK(tsk) \
++{ \
++ INIT_TASK_TI(tsk) \
++ .state = 0, \
++ .stack = init_stack, \
++ .usage = ATOMIC_INIT(2), \
++ .flags = PF_KTHREAD, \
++ .prio = NORMAL_PRIO, \
++ .static_prio = MAX_PRIO-20, \
++ .normal_prio = NORMAL_PRIO, \
++ .deadline = 0, \
++ .policy = SCHED_NORMAL, \
++ .cpus_allowed = CPU_MASK_ALL, \
++ .mm = NULL, \
++ .active_mm = &init_mm, \
++ .restart_block = { \
++ .fn = do_no_restart_syscall, \
++ }, \
++ .time_slice = 1000000, \
++ .tasks = LIST_HEAD_INIT(tsk.tasks), \
++ INIT_PUSHABLE_TASKS(tsk) \
++ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
++ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
++ .real_parent = &tsk, \
++ .parent = &tsk, \
++ .children = LIST_HEAD_INIT(tsk.children), \
++ .sibling = LIST_HEAD_INIT(tsk.sibling), \
++ .group_leader = &tsk, \
++ RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
++ RCU_POINTER_INITIALIZER(cred, &init_cred), \
++ .comm = INIT_TASK_COMM, \
++ .thread = INIT_THREAD, \
++ .fs = &init_fs, \
++ .files = &init_files, \
++ .signal = &init_signals, \
++ .sighand = &init_sighand, \
++ .nsproxy = &init_nsproxy, \
++ .pending = { \
++ .list = LIST_HEAD_INIT(tsk.pending.list), \
++ .signal = {{0}}}, \
++ .blocked = {{0}}, \
++ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
++ .journal_info = NULL, \
++ INIT_CPU_TIMERS(tsk) \
++ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
++ .timer_slack_ns = 50000, /* 50 usec default slack */ \
++ .pids = { \
++ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
++ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
++ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
++ }, \
++ .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
++ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
++ INIT_IDS \
++ INIT_PERF_EVENTS(tsk) \
++ INIT_TRACE_IRQFLAGS \
++ INIT_LOCKDEP \
++ INIT_FTRACE_GRAPH \
++ INIT_TRACE_RECURSION \
++ INIT_TASK_RCU_PREEMPT(tsk) \
++ INIT_TASK_RCU_TASKS(tsk) \
++ INIT_CPUSET_SEQ(tsk) \
++ INIT_RT_MUTEXES(tsk) \
++ INIT_PREV_CPUTIME(tsk) \
++ INIT_VTIME(tsk) \
++ INIT_NUMA_BALANCING(tsk) \
++ INIT_KASAN(tsk) \
++ INIT_LIVEPATCH(tsk) \
++ INIT_TASK_SECURITY \
++}
++#else /* CONFIG_SCHED_MUQSS */
++#define INIT_TASK_COMM "swapper"
+ #define INIT_TASK(tsk) \
+ { \
+ INIT_TASK_TI(tsk) \
+@@ -300,7 +372,7 @@
+ INIT_LIVEPATCH(tsk) \
+ INIT_TASK_SECURITY \
+ }
+-
++#endif /* CONFIG_SCHED_MUQSS */
+
+ /* Attach to the init_task data structure for proper alignment */
+ #define __init_task_data __attribute__((__section__(".data..init_task")))
+diff -Nur a/include/linux/ioprio.h b/include/linux/ioprio.h
+--- a/include/linux/ioprio.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/linux/ioprio.h 2018-11-03 16:06:32.706528743 +0000
+@@ -52,6 +52,8 @@
+ */
+ static inline int task_nice_ioprio(struct task_struct *task)
+ {
++ if (iso_task(task))
++ return 0;
+ return (task_nice(task) + 20) / 5;
+ }
+
+diff -Nur a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
+--- a/include/linux/sched/nohz.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/linux/sched/nohz.h 2018-11-03 16:06:32.707528775 +0000
+@@ -6,7 +6,7 @@
+ * This is the interface between the scheduler and nohz/dynticks:
+ */
+
+-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
++#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS)
+ extern void cpu_load_update_nohz_start(void);
+ extern void cpu_load_update_nohz_stop(void);
+ #else
+@@ -23,7 +23,7 @@
+ static inline void set_cpu_sd_state_idle(void) { }
+ #endif
+
+-#ifdef CONFIG_NO_HZ_COMMON
++#if defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS)
+ void calc_load_nohz_start(void);
+ void calc_load_nohz_stop(void);
+ #else
+diff -Nur a/include/linux/sched/prio.h b/include/linux/sched/prio.h
+--- a/include/linux/sched/prio.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/linux/sched/prio.h 2018-11-03 16:06:32.707528775 +0000
+@@ -20,8 +20,20 @@
+ */
+
+ #define MAX_USER_RT_PRIO 100
++
++#ifdef CONFIG_SCHED_MUQSS
++/* Note different MAX_RT_PRIO */
++#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1)
++
++#define ISO_PRIO (MAX_RT_PRIO)
++#define NORMAL_PRIO (MAX_RT_PRIO + 1)
++#define IDLE_PRIO (MAX_RT_PRIO + 2)
++#define PRIO_LIMIT ((IDLE_PRIO) + 1)
++#else /* CONFIG_SCHED_MUQSS */
+ #define MAX_RT_PRIO MAX_USER_RT_PRIO
+
++#endif /* CONFIG_SCHED_MUQSS */
++
+ #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
+ #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
+
+diff -Nur a/include/linux/sched/task.h b/include/linux/sched/task.h
+--- a/include/linux/sched/task.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/linux/sched/task.h 2018-11-03 16:06:32.707528775 +0000
+@@ -80,7 +80,7 @@
+ extern void free_task(struct task_struct *tsk);
+
+ /* sched_exec is called by processes performing an exec */
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_MUQSS)
+ extern void sched_exec(void);
+ #else
+ #define sched_exec() {}
+diff -Nur a/include/linux/sched.h b/include/linux/sched.h
+--- a/include/linux/sched.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/linux/sched.h 2018-11-03 16:06:32.707528775 +0000
+@@ -27,6 +27,9 @@
+ #include <linux/signal_types.h>
+ #include <linux/mm_types_task.h>
+ #include <linux/task_io_accounting.h>
++#ifdef CONFIG_SCHED_MUQSS
++#include <linux/skip_list.h>
++#endif
+
+ /* task_struct member predeclarations (sorted alphabetically): */
+ struct audit_context;
+@@ -579,9 +582,11 @@
+ unsigned int flags;
+ unsigned int ptrace;
+
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_MUQSS)
++ int on_cpu;
++#endif
+ #ifdef CONFIG_SMP
+ struct llist_node wake_entry;
+- int on_cpu;
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+ /* Current CPU: */
+ unsigned int cpu;
+@@ -598,10 +603,25 @@
+ int static_prio;
+ int normal_prio;
+ unsigned int rt_priority;
++#ifdef CONFIG_SCHED_MUQSS
++ int time_slice;
++ u64 deadline;
++ skiplist_node node; /* Skip list node */
++ u64 last_ran;
++ u64 sched_time; /* sched_clock time spent running */
++#ifdef CONFIG_SMT_NICE
++ int smt_bias; /* Policy/nice level bias across smt siblings */
++#endif
++#ifdef CONFIG_HOTPLUG_CPU
++ bool zerobound; /* Bound to CPU0 for hotplug */
++#endif
++ unsigned long rt_timeout;
++#else /* CONFIG_SCHED_MUQSS */
+
+ const struct sched_class *sched_class;
+ struct sched_entity se;
+ struct sched_rt_entity rt;
++#endif
+ #ifdef CONFIG_CGROUP_SCHED
+ struct task_group *sched_task_group;
+ #endif
+@@ -751,6 +771,10 @@
+ u64 utimescaled;
+ u64 stimescaled;
+ #endif
++#ifdef CONFIG_SCHED_MUQSS
++ /* Unbanked cpu time */
++ unsigned long utime_ns, stime_ns;
++#endif
+ u64 gtime;
+ struct prev_cputime prev_cputime;
+ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+@@ -1155,6 +1179,40 @@
+ */
+ };
+
++#ifdef CONFIG_SCHED_MUQSS
++#define tsk_seruntime(t) ((t)->sched_time)
++#define tsk_rttimeout(t) ((t)->rt_timeout)
++
++static inline void tsk_cpus_current(struct task_struct *p)
++{
++}
++
++void print_scheduler_version(void);
++
++static inline bool iso_task(struct task_struct *p)
++{
++ return (p->policy == SCHED_ISO);
++}
++#else /* CFS */
++#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t) ((t)->rt.timeout)
++
++static inline void tsk_cpus_current(struct task_struct *p)
++{
++ p->nr_cpus_allowed = current->nr_cpus_allowed;
++}
++
++static inline void print_scheduler_version(void)
++{
++ printk(KERN_INFO "CFS CPU scheduler.\n");
++}
++
++static inline bool iso_task(struct task_struct *p)
++{
++ return false;
++}
++#endif /* CONFIG_SCHED_MUQSS */
++
+ static inline struct pid *task_pid(struct task_struct *task)
+ {
+ return task->pids[PIDTYPE_PID].pid;
+diff -Nur a/include/linux/skip_list.h b/include/linux/skip_list.h
+--- a/include/linux/skip_list.h 1970-01-01 01:00:00.000000000 +0100
++++ b/include/linux/skip_list.h 2018-11-03 16:06:32.708528807 +0000
+@@ -0,0 +1,33 @@
++#ifndef _LINUX_SKIP_LISTS_H
++#define _LINUX_SKIP_LISTS_H
++typedef u64 keyType;
++typedef void *valueType;
++
++typedef struct nodeStructure skiplist_node;
++
++struct nodeStructure {
++ int level; /* Levels in this structure */
++ keyType key;
++ valueType value;
++ skiplist_node *next[8];
++ skiplist_node *prev[8];
++};
++
++typedef struct listStructure {
++ int entries;
++ int level; /* Maximum level of the list
++ (1 more than the number of levels in the list) */
++ skiplist_node *header; /* pointer to header */
++} skiplist;
++
++void skiplist_init(skiplist_node *slnode);
++skiplist *new_skiplist(skiplist_node *slnode);
++void free_skiplist(skiplist *l);
++void skiplist_node_init(skiplist_node *node);
++void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed);
++void skiplist_delete(skiplist *l, skiplist_node *node);
++
++static inline bool skiplist_node_empty(skiplist_node *node) {
++ return (!node->next[0]);
++}
++#endif /* _LINUX_SKIP_LISTS_H */
+diff -Nur a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
+--- a/include/uapi/linux/sched.h 2018-10-10 07:54:28.000000000 +0100
++++ b/include/uapi/linux/sched.h 2018-11-03 16:06:32.708528807 +0000
+@@ -37,9 +37,16 @@
+ #define SCHED_FIFO 1
+ #define SCHED_RR 2
+ #define SCHED_BATCH 3
+-/* SCHED_ISO: reserved but not implemented yet */
++/* SCHED_ISO: Implemented on MuQSS only */
+ #define SCHED_IDLE 5
++#ifdef CONFIG_SCHED_MUQSS
++#define SCHED_ISO 4
++#define SCHED_IDLEPRIO SCHED_IDLE
++#define SCHED_MAX (SCHED_IDLEPRIO)
++#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX)
++#else /* CONFIG_SCHED_MUQSS */
+ #define SCHED_DEADLINE 6
++#endif /* CONFIG_SCHED_MUQSS */
+
+ /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
+ #define SCHED_RESET_ON_FORK 0x40000000
+diff -Nur a/init/Kconfig b/init/Kconfig
+--- a/init/Kconfig 2018-11-03 16:00:51.921620552 +0000
++++ b/init/Kconfig 2018-11-03 16:06:32.709528839 +0000
+@@ -38,6 +38,18 @@
+
+ menu "General setup"
+
++config SCHED_MUQSS
++ bool "MuQSS cpu scheduler"
++ select HIGH_RES_TIMERS
++ ---help---
++ The Multiple Queue Skiplist Scheduler for excellent interactivity and
++ responsiveness on the desktop and highly scalable deterministic
++ low latency on any hardware.
++
++ Say Y here.
++ default y
++
++
+ config BROKEN
+ bool
+
+@@ -621,6 +633,7 @@
+ depends on ARCH_SUPPORTS_NUMA_BALANCING
+ depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ depends on SMP && NUMA && MIGRATION
++ depends on !SCHED_MUQSS
+ help
+ This option adds support for automatic NUMA aware memory/task placement.
+ The mechanism is quite primitive and is based on migrating memory when
+@@ -723,9 +736,13 @@
+ help
+ This feature lets CPU scheduler recognize task groups and control CPU
+ bandwidth allocation to such task groups. It uses cgroups to group
+- tasks.
++ tasks. In combination with MuQSS this is purely a STUB to create the
++ files associated with the CPU controller cgroup but most of the
++ controls do nothing. This is useful for working in environments and
++ with applications that will only work if this control group is
++ present.
+
+-if CGROUP_SCHED
++if CGROUP_SCHED && !SCHED_MUQSS
+ config FAIR_GROUP_SCHED
+ bool "Group scheduling for SCHED_OTHER"
+ depends on CGROUP_SCHED
+@@ -832,6 +849,7 @@
+
+ config CGROUP_CPUACCT
+ bool "Simple CPU accounting controller"
++ depends on !SCHED_MUQSS
+ help
+ Provides a simple controller for monitoring the
+ total CPU consumed by the tasks in a cgroup.
+@@ -950,6 +968,7 @@
+
+ config SCHED_AUTOGROUP
+ bool "Automatic process group scheduling"
++ depends on !SCHED_MUQSS
+ select CGROUPS
+ select CGROUP_SCHED
+ select FAIR_GROUP_SCHED
+diff -Nur a/init/main.c b/init/main.c
+--- a/init/main.c 2018-10-10 07:54:28.000000000 +0100
++++ b/init/main.c 2018-11-03 16:06:32.709528839 +0000
+@@ -841,7 +841,6 @@
+ return ret;
+ }
+
+-
+ extern initcall_t __initcall_start[];
+ extern initcall_t __initcall0_start[];
+ extern initcall_t __initcall1_start[];
+@@ -1008,6 +1007,8 @@
+
+ rcu_end_inkernel_boot();
+
++ print_scheduler_version();
++
+ if (ramdisk_execute_command) {
+ ret = run_init_process(ramdisk_execute_command);
+ if (!ret)
+diff -Nur a/kernel/delayacct.c b/kernel/delayacct.c
+--- a/kernel/delayacct.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/delayacct.c 2018-11-03 16:06:32.710528871 +0000
+@@ -115,7 +115,7 @@
+ */
+ t1 = tsk->sched_info.pcount;
+ t2 = tsk->sched_info.run_delay;
+- t3 = tsk->se.sum_exec_runtime;
++ t3 = tsk_seruntime(tsk);
+
+ d->cpu_count += t1;
+
+diff -Nur a/kernel/exit.c b/kernel/exit.c
+--- a/kernel/exit.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/exit.c 2018-11-03 16:06:32.710528871 +0000
+@@ -129,7 +129,7 @@
+ sig->curr_target = next_thread(tsk);
+ }
+
+- add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++ add_device_randomness((const void*) &tsk_seruntime(tsk),
+ sizeof(unsigned long long));
+
+ /*
+@@ -150,7 +150,7 @@
+ sig->inblock += task_io_get_inblock(tsk);
+ sig->oublock += task_io_get_oublock(tsk);
+ task_io_accounting_add(&sig->ioac, &tsk->ioac);
+- sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
++ sig->sum_sched_runtime += tsk_seruntime(tsk);
+ sig->nr_threads--;
+ __unhash_process(tsk, group_dead);
+ write_sequnlock(&sig->stats_lock);
+diff -Nur a/kernel/kthread.c b/kernel/kthread.c
+--- a/kernel/kthread.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/kthread.c 2018-11-03 16:06:32.711528903 +0000
+@@ -410,6 +410,34 @@
+ }
+ EXPORT_SYMBOL(kthread_bind);
+
++#if defined(CONFIG_SCHED_MUQSS) && defined(CONFIG_SMP)
++extern void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
++
++/*
++ * new_kthread_bind is a special variant of __kthread_bind_mask.
++ * For new threads to work on muqss we want to call do_set_cpus_allowed
++ * without the task_cpu being set and the task rescheduled until they're
++ * rescheduled on their own so we call __do_set_cpus_allowed directly which
++ * only changes the cpumask. This is particularly important for smpboot threads
++ * to work.
++ */
++static void new_kthread_bind(struct task_struct *p, unsigned int cpu)
++{
++ unsigned long flags;
++
++ if (WARN_ON(!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)))
++ return;
++
++ /* It's safe because the task is inactive. */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ __do_set_cpus_allowed(p, cpumask_of(cpu));
++ p->flags |= PF_NO_SETAFFINITY;
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++#else
++#define new_kthread_bind(p, cpu) kthread_bind(p, cpu)
++#endif
++
+ /**
+ * kthread_create_on_cpu - Create a cpu bound kthread
+ * @threadfn: the function to run until signal_pending(current).
+@@ -431,7 +459,7 @@
+ cpu);
+ if (IS_ERR(p))
+ return p;
+- kthread_bind(p, cpu);
++ new_kthread_bind(p, cpu);
+ /* CPU hotplug need to bind once again when unparking the thread. */
+ set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
+ to_kthread(p)->cpu = cpu;
+diff -Nur a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
+--- a/kernel/livepatch/transition.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/livepatch/transition.c 2018-11-03 16:06:32.711528903 +0000
+@@ -277,6 +277,12 @@
+ return 0;
+ }
+
++#ifdef CONFIG_SCHED_MUQSS
++typedef unsigned long rq_flags_t;
++#else
++typedef struct rq_flags rq_flag_t;
++#endif
++
+ /*
+ * Try to safely switch a task to the target patch state. If it's currently
+ * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
+@@ -285,7 +291,7 @@
+ static bool klp_try_switch_task(struct task_struct *task)
+ {
+ struct rq *rq;
+- struct rq_flags flags;
++ rq_flags_t flags;
+ int ret;
+ bool success = false;
+ char err_buf[STACK_ERR_BUF_SIZE];
+diff -Nur a/kernel/Makefile b/kernel/Makefile
+--- a/kernel/Makefile 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/Makefile 2018-11-03 16:06:32.709528839 +0000
+@@ -10,7 +10,7 @@
+ extable.o params.o \
+ kthread.o sys_ni.o nsproxy.o \
+ notifier.o ksysfs.o cred.o reboot.o \
+- async.o range.o smpboot.o ucount.o
++ async.o range.o smpboot.o ucount.o skip_list.o
+
+ obj-$(CONFIG_MODULES) += kmod.o
+ obj-$(CONFIG_MULTIUSER) += groups.o
+diff -Nur a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
+--- a/kernel/rcu/Kconfig 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/rcu/Kconfig 2018-11-03 16:06:32.711528903 +0000
+@@ -93,7 +93,7 @@
+ config CONTEXT_TRACKING_FORCE
+ bool "Force context tracking"
+ depends on CONTEXT_TRACKING
+- default y if !NO_HZ_FULL
++ default y if !NO_HZ_FULL && !SCHED_MUQSS
+ help
+ The major pre-requirement for full dynticks to work is to
+ support the context tracking subsystem. But there are also
+diff -Nur a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+--- a/kernel/sched/cpufreq_schedutil.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/sched/cpufreq_schedutil.c 2018-11-03 16:06:32.716529064 +0000
+@@ -176,6 +176,17 @@
+ return cpufreq_driver_resolve_freq(policy, freq);
+ }
+
++#ifdef CONFIG_SCHED_MUQSS
++static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ *util = rq->load_avg;
++ if (*util > SCHED_CAPACITY_SCALE)
++ *util = SCHED_CAPACITY_SCALE;
++ *max = SCHED_CAPACITY_SCALE;
++}
++#else /* CONFIG_SCHED_MUQSS */
+ static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
+ {
+ struct rq *rq = cpu_rq(cpu);
+@@ -186,6 +197,7 @@
+ *util = min(rq->cfs.avg.util_avg, cfs_max);
+ *max = cfs_max;
+ }
++#endif /* CONFIG_SCHED_MUQSS */
+
+ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
+ unsigned int flags)
+diff -Nur a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+--- a/kernel/sched/cputime.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/sched/cputime.c 2018-11-03 16:06:32.716529064 +0000
+@@ -270,26 +270,6 @@
+ return accounted;
+ }
+
+-#ifdef CONFIG_64BIT
+-static inline u64 read_sum_exec_runtime(struct task_struct *t)
+-{
+- return t->se.sum_exec_runtime;
+-}
+-#else
+-static u64 read_sum_exec_runtime(struct task_struct *t)
+-{
+- u64 ns;
+- struct rq_flags rf;
+- struct rq *rq;
+-
+- rq = task_rq_lock(t, &rf);
+- ns = t->se.sum_exec_runtime;
+- task_rq_unlock(rq, t, &rf);
+-
+- return ns;
+-}
+-#endif
+-
+ /*
+ * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
+ * tasks (sum on group iteration) belonging to @tsk's group.
+@@ -661,7 +641,7 @@
+ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+ {
+ struct task_cputime cputime = {
+- .sum_exec_runtime = p->se.sum_exec_runtime,
++ .sum_exec_runtime = tsk_seruntime(p),
+ };
+
+ task_cputime(p, &cputime.utime, &cputime.stime);
+diff -Nur a/kernel/sched/idle.c b/kernel/sched/idle.c
+--- a/kernel/sched/idle.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/sched/idle.c 2018-11-03 16:06:32.716529064 +0000
+@@ -209,6 +209,9 @@
+ */
+ static void do_idle(void)
+ {
++ int cpu = smp_processor_id();
++ bool pending = false;
++
+ /*
+ * If the arch has a polling bit, we maintain an invariant:
+ *
+@@ -220,13 +223,16 @@
+
+ __current_set_polling();
+ quiet_vmstat();
+- tick_nohz_idle_enter();
++ if (unlikely(softirq_pending(cpu)))
++ pending = true;
++ else
++ tick_nohz_idle_enter();
+
+ while (!need_resched()) {
+ check_pgt_cache();
+ rmb();
+
+- if (cpu_is_offline(smp_processor_id())) {
++ if (cpu_is_offline(cpu)) {
+ cpuhp_report_idle_dead();
+ arch_cpu_idle_dead();
+ }
+@@ -255,7 +261,8 @@
+ * an IPI to fold the state for us.
+ */
+ preempt_set_need_resched();
+- tick_nohz_idle_exit();
++ if (!pending)
++ tick_nohz_idle_exit();
+ __current_clr_polling();
+
+ /*
+diff -Nur a/kernel/sched/Makefile b/kernel/sched/Makefile
+--- a/kernel/sched/Makefile 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/sched/Makefile 2018-11-03 16:06:32.711528903 +0000
+@@ -16,14 +16,20 @@
+ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
+ endif
+
+-obj-y += core.o loadavg.o clock.o cputime.o
++ifdef CONFIG_SCHED_MUQSS
++obj-y += MuQSS.o clock.o
++else
++obj-y += core.o loadavg.o clock.o
+ obj-y += idle_task.o fair.o rt.o deadline.o
+-obj-y += wait.o wait_bit.o swait.o completion.o idle.o
+-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
++obj-$(CONFIG_SMP) += cpudeadline.o stop_task.o
+ obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
+-obj-$(CONFIG_SCHEDSTATS) += stats.o
+ obj-$(CONFIG_SCHED_DEBUG) += debug.o
+ obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
++endif
++obj-y += cputime.o
++obj-y += wait.o wait_bit.o swait.o completion.o idle.o
++obj-$(CONFIG_SMP) += cpupri.o topology.o
++obj-$(CONFIG_SCHEDSTATS) += stats.o
+ obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
+ obj-$(CONFIG_MEMBARRIER) += membarrier.o
+diff -Nur a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
+--- a/kernel/sched/MuQSS.c 1970-01-01 01:00:00.000000000 +0100
++++ b/kernel/sched/MuQSS.c 2018-11-03 16:06:32.715529032 +0000
+@@ -0,0 +1,6923 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * kernel/sched/MuQSS.c, was kernel/sched.c
++ *
++ * Kernel scheduler and related syscalls
++ *
++ * Copyright (C) 1991-2002 Linus Torvalds
++ *
++ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
++ * make semaphores SMP safe
++ * 1998-11-19 Implemented schedule_timeout() and related stuff
++ * by Andrea Arcangeli
++ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
++ * hybrid priority-list and round-robin design with
++ * an array-switch method of distributing timeslices
++ * and per-CPU runqueues. Cleanups and useful suggestions
++ * by Davide Libenzi, preemptible kernel bits by Robert Love.
++ * 2003-09-03 Interactivity tuning by Con Kolivas.
++ * 2004-04-02 Scheduler domains code by Nick Piggin
++ * 2007-04-15 Work begun on replacing all interactivity tuning with a
++ * fair scheduling design by Con Kolivas.
++ * 2007-05-05 Load balancing (smp-nice) and other improvements
++ * by Peter Williams
++ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
++ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
++ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
++ * Thomas Gleixner, Mike Kravetz
++ * 2009-08-13 Brainfuck deadline scheduling policy by Con Kolivas deletes
++ * a whole lot of those previous things.
++ * 2016-10-01 Multiple Queue Skiplist Scheduler scalable evolution of BFS
++ * scheduler by Con Kolivas.
++ */
++
++#include <linux/sched.h>
++#include <linux/sched/clock.h>
++#include <uapi/linux/sched/types.h>
++#include <linux/sched/loadavg.h>
++#include <linux/sched/hotplug.h>
++#include <linux/wait_bit.h>
++#include <linux/cpuset.h>
++#include <linux/delayacct.h>
++#include <linux/init_task.h>
++#include <linux/binfmts.h>
++#include <linux/context_tracking.h>
++#include <linux/rcupdate_wait.h>
++#include <linux/skip_list.h>
++
++#include <linux/blkdev.h>
++#include <linux/kprobes.h>
++#include <linux/mmu_context.h>
++#include <linux/module.h>
++#include <linux/nmi.h>
++#include <linux/prefetch.h>
++#include <linux/profile.h>
++#include <linux/security.h>
++#include <linux/syscalls.h>
++#include <linux/tick.h>
++
++#include <asm/switch_to.h>
++#include <asm/tlb.h>
++#ifdef CONFIG_PARAVIRT
++#include <asm/paravirt.h>
++#endif
++
++#include "../workqueue_internal.h"
++#include "../smpboot.h"
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++
++#include "MuQSS.h"
++
++#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
++#define rt_task(p) rt_prio((p)->prio)
++#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
++#define is_rt_policy(policy) ((policy) == SCHED_FIFO || \
++ (policy) == SCHED_RR)
++#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
++
++#define is_idle_policy(policy) ((policy) == SCHED_IDLEPRIO)
++#define idleprio_task(p) unlikely(is_idle_policy((p)->policy))
++#define task_running_idle(p) unlikely((p)->prio == IDLE_PRIO)
++
++#define is_iso_policy(policy) ((policy) == SCHED_ISO)
++#define iso_task(p) unlikely(is_iso_policy((p)->policy))
++#define task_running_iso(p) unlikely((p)->prio == ISO_PRIO)
++
++#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT)
++
++#define ISO_PERIOD (5 * HZ)
++
++#define STOP_PRIO (MAX_RT_PRIO - 1)
++
++/*
++ * Some helpers for converting to/from various scales. Use shifts to get
++ * approximate multiples of ten for less overhead.
++ */
++#define JIFFIES_TO_NS(TIME) ((TIME) * (1073741824 / HZ))
++#define JIFFY_NS (1073741824 / HZ)
++#define JIFFY_US (1048576 / HZ)
++#define NS_TO_JIFFIES(TIME) ((TIME) / JIFFY_NS)
++#define HALF_JIFFY_NS (1073741824 / HZ / 2)
++#define HALF_JIFFY_US (1048576 / HZ / 2)
++#define MS_TO_NS(TIME) ((TIME) << 20)
++#define MS_TO_US(TIME) ((TIME) << 10)
++#define NS_TO_MS(TIME) ((TIME) >> 20)
++#define NS_TO_US(TIME) ((TIME) >> 10)
++#define US_TO_NS(TIME) ((TIME) << 10)
++
++#define RESCHED_US (100) /* Reschedule if less than this many μs left */
++
++void print_scheduler_version(void)
++{
++ printk(KERN_INFO "MuQSS CPU scheduler v0.162 by Con Kolivas.\n");
++}
++
++/*
++ * This is the time all tasks within the same priority round robin.
++ * Value is in ms and set to a minimum of 6ms.
++ * Tunable via /proc interface.
++ */
++int rr_interval __read_mostly = 6;
++
++/*
++ * Tunable to choose whether to prioritise latency or throughput, simple
++ * binary yes or no
++ */
++int sched_interactive __read_mostly = 1;
++
++/*
++ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks
++ * are allowed to run five seconds as real time tasks. This is the total over
++ * all online cpus.
++ */
++int sched_iso_cpu __read_mostly = 70;
++
++/*
++ * sched_yield_type - Choose what sort of yield sched_yield will perform.
++ * 0: No yield.
++ * 1: Yield only to better priority/deadline tasks. (default)
++ * 2: Expire timeslice and recalculate deadline.
++ */
++int sched_yield_type __read_mostly = 1;
++
++/*
++ * The relative length of deadline for each priority(nice) level.
++ */
++static int prio_ratios[NICE_WIDTH] __read_mostly;
++
++/*
++ * The quota handed out to tasks of all priority levels when refilling their
++ * time_slice.
++ */
++static inline int timeslice(void)
++{
++ return MS_TO_US(rr_interval);
++}
++
++#ifdef CONFIG_SMP
++static cpumask_t cpu_idle_map ____cacheline_aligned_in_smp;
++#endif
++
++/* CPUs with isolated domains */
++cpumask_var_t cpu_isolated_map;
++
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#ifdef CONFIG_SMP
++struct rq *cpu_rq(int cpu)
++{
++ return &per_cpu(runqueues, (cpu));
++}
++#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
++
++/*
++ * For asym packing, by default the lower numbered cpu has higher priority.
++ */
++int __weak arch_asym_cpu_priority(int cpu)
++{
++ return -cpu;
++}
++
++int __weak arch_sd_sibling_asym_packing(void)
++{
++ return 0*SD_ASYM_PACKING;
++}
++#else
++struct rq *uprq;
++#endif /* CONFIG_SMP */
++
++#include "stats.h"
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next) do { } while (0)
++#endif
++#ifndef finish_arch_switch
++# define finish_arch_switch(prev) do { } while (0)
++#endif
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch() do { } while (0)
++#endif
++
++/*
++ * All common locking functions performed on rq->lock. rq->clock is local to
++ * the CPU accessing it so it can be modified just with interrupts disabled
++ * when we're not updating niffies.
++ * Looking up task_rq must be done under rq->lock to be safe.
++ */
++
++/*
++ * RQ-clock updating methods:
++ */
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++ s64 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++ /*
++ * Since irq_time is only updated on {soft,}irq_exit, we might run into
++ * this case when a previous update_rq_clock() happened inside a
++ * {soft,}irq region.
++ *
++ * When this happens, we stop ->clock_task and only update the
++ * prev_irq_time stamp to account for the part that fit, so that a next
++ * update will consume the rest. This ensures ->clock_task is
++ * monotonic.
++ *
++ * It does however cause some slight miss-attribution of {soft,}irq
++ * time, a more accurate solution would be to update the irq_time using
++ * the current rq->clock timestamp, except that would require using
++ * atomic ops.
++ */
++ if (irq_delta > delta)
++ irq_delta = delta;
++
++ rq->prev_irq_time += irq_delta;
++ delta -= irq_delta;
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++ if (static_key_false((&paravirt_steal_rq_enabled))) {
++ s64 steal = paravirt_steal_clock(cpu_of(rq));
++
++ steal -= rq->prev_steal_time_rq;
++
++ if (unlikely(steal > delta))
++ steal = delta;
++
++ rq->prev_steal_time_rq += steal;
++
++ delta -= steal;
++ }
++#endif
++ rq->clock_task += delta;
++}
++
++static inline void update_rq_clock(struct rq *rq)
++{
++ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++
++ if (unlikely(delta < 0))
++ return;
++ rq->clock += delta;
++ update_rq_clock_task(rq, delta);
++}
++
++/*
++ * Niffies are a globally increasing nanosecond counter. They're only used by
++ * update_load_avg and time_slice_expired, however deadlines are based on them
++ * across CPUs. Update them whenever we will call one of those functions, and
++ * synchronise them across CPUs whenever we hold both runqueue locks.
++ */
++static inline void update_clocks(struct rq *rq)
++{
++ s64 ndiff, minndiff;
++ long jdiff;
++
++ update_rq_clock(rq);
++ ndiff = rq->clock - rq->old_clock;
++ rq->old_clock = rq->clock;
++ jdiff = jiffies - rq->last_jiffy;
++
++ /* Subtract any niffies added by balancing with other rqs */
++ ndiff -= rq->niffies - rq->last_niffy;
++ minndiff = JIFFIES_TO_NS(jdiff) - rq->niffies + rq->last_jiffy_niffies;
++ if (minndiff < 0)
++ minndiff = 0;
++ ndiff = max(ndiff, minndiff);
++ rq->niffies += ndiff;
++ rq->last_niffy = rq->niffies;
++ if (jdiff) {
++ rq->last_jiffy += jdiff;
++ rq->last_jiffy_niffies = rq->niffies;
++ }
++}
++
++static inline int task_on_rq_queued(struct task_struct *p)
++{
++ return p->on_rq == TASK_ON_RQ_QUEUED;
++}
++
++static inline int task_on_rq_migrating(struct task_struct *p)
++{
++ return p->on_rq == TASK_ON_RQ_MIGRATING;
++}
++
++static inline int rq_trylock(struct rq *rq)
++ __acquires(rq->lock)
++{
++ return raw_spin_trylock(&rq->lock);
++}
++
++/*
++ * Any time we have two runqueues locked we use that as an opportunity to
++ * synchronise niffies to the highest value as idle ticks may have artificially
++ * kept niffies low on one CPU and the truth can only be later.
++ */
++static inline void synchronise_niffies(struct rq *rq1, struct rq *rq2)
++{
++ if (rq1->niffies > rq2->niffies)
++ rq2->niffies = rq1->niffies;
++ else
++ rq1->niffies = rq2->niffies;
++}
++
++/*
++ * double_rq_lock - safely lock two runqueues
++ *
++ * Note this does not disable interrupts like task_rq_lock,
++ * you need to do so manually before calling.
++ */
++
++/* For when we know rq1 != rq2 */
++static inline void __double_rq_lock(struct rq *rq1, struct rq *rq2)
++ __acquires(rq1->lock)
++ __acquires(rq2->lock)
++{
++ if (rq1 < rq2) {
++ raw_spin_lock(&rq1->lock);
++ raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
++ } else {
++ raw_spin_lock(&rq2->lock);
++ raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
++ }
++}
++
++static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
++ __acquires(rq1->lock)
++ __acquires(rq2->lock)
++{
++ BUG_ON(!irqs_disabled());
++ if (rq1 == rq2) {
++ raw_spin_lock(&rq1->lock);
++ __acquire(rq2->lock); /* Fake it out ;) */
++ } else
++ __double_rq_lock(rq1, rq2);
++ synchronise_niffies(rq1, rq2);
++}
++
++/*
++ * double_rq_unlock - safely unlock two runqueues
++ *
++ * Note this does not restore interrupts like task_rq_unlock,
++ * you need to do so manually after calling.
++ */
++static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
++ __releases(rq1->lock)
++ __releases(rq2->lock)
++{
++ raw_spin_unlock(&rq1->lock);
++ if (rq1 != rq2)
++ raw_spin_unlock(&rq2->lock);
++ else
++ __release(rq2->lock);
++}
++
++static inline void lock_all_rqs(void)
++{
++ int cpu;
++
++ preempt_disable();
++ for_each_possible_cpu(cpu) {
++ struct rq *rq = cpu_rq(cpu);
++
++ do_raw_spin_lock(&rq->lock);
++ }
++}
++
++static inline void unlock_all_rqs(void)
++{
++ int cpu;
++
++ for_each_possible_cpu(cpu) {
++ struct rq *rq = cpu_rq(cpu);
++
++ do_raw_spin_unlock(&rq->lock);
++ }
++ preempt_enable();
++}
++
++/* Specially nest trylock an rq */
++static inline bool trylock_rq(struct rq *this_rq, struct rq *rq)
++{
++ if (unlikely(!do_raw_spin_trylock(&rq->lock)))
++ return false;
++ spin_acquire(&rq->lock.dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++ synchronise_niffies(this_rq, rq);
++ return true;
++}
++
++/* Unlock a specially nested trylocked rq */
++static inline void unlock_rq(struct rq *rq)
++{
++ spin_release(&rq->lock.dep_map, 1, _RET_IP_);
++ do_raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * cmpxchg based fetch_or, macro so it works for different integer types
++ */
++#define fetch_or(ptr, mask) \
++ ({ \
++ typeof(ptr) _ptr = (ptr); \
++ typeof(mask) _mask = (mask); \
++ typeof(*_ptr) _old, _val = *_ptr; \
++ \
++ for (;;) { \
++ _old = cmpxchg(_ptr, _val, _val | _mask); \
++ if (_old == _val) \
++ break; \
++ _val = _old; \
++ } \
++ _old; \
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++ struct thread_info *ti = task_thread_info(p);
++ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++ struct thread_info *ti = task_thread_info(p);
++ typeof(ti->flags) old, val = READ_ONCE(ti->flags);
++
++ for (;;) {
++ if (!(val & _TIF_POLLING_NRFLAG))
++ return false;
++ if (val & _TIF_NEED_RESCHED)
++ return true;
++ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
++ if (old == val)
++ break;
++ val = old;
++ }
++ return true;
++}
++
++#else
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++ set_tsk_need_resched(p);
++ return true;
++}
++
++#ifdef CONFIG_SMP
++static bool set_nr_if_polling(struct task_struct *p)
++{
++ return false;
++}
++#endif
++#endif
++
++void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++ struct wake_q_node *node = &task->wake_q;
++
++ /*
++ * Atomically grab the task, if ->wake_q is !nil already it means
++ * its already queued (either by us or someone else) and will get the
++ * wakeup due to that.
++ *
++ * This cmpxchg() implies a full barrier, which pairs with the write
++ * barrier implied by the wakeup in wake_up_q().
++ */
++ if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
++ return;
++
++ get_task_struct(task);
++
++ /*
++ * The head is context local, there can be no concurrency.
++ */
++ *head->lastp = node;
++ head->lastp = &node->next;
++}
++
++void wake_up_q(struct wake_q_head *head)
++{
++ struct wake_q_node *node = head->first;
++
++ while (node != WAKE_Q_TAIL) {
++ struct task_struct *task;
++
++ task = container_of(node, struct task_struct, wake_q);
++ BUG_ON(!task);
++ /* Task can safely be re-inserted now */
++ node = node->next;
++ task->wake_q.next = NULL;
++
++ /*
++ * wake_up_process() implies a wmb() to pair with the queueing
++ * in wake_q_add() so as not to miss wakeups.
++ */
++ wake_up_process(task);
++ put_task_struct(task);
++ }
++}
++
++static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++ next->on_cpu = 1;
++}
++
++static inline void smp_sched_reschedule(int cpu)
++{
++ if (likely(cpu_online(cpu)))
++ smp_send_reschedule(cpu);
++}
++
++/*
++ * resched_task - mark a task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++void resched_task(struct task_struct *p)
++{
++ int cpu;
++#ifdef CONFIG_LOCKDEP
++ /* Kernel threads call this when creating workqueues while still
++ * inactive from __kthread_bind_mask, holding only the pi_lock */
++ if (!(p->flags & PF_KTHREAD)) {
++ struct rq *rq = task_rq(p);
++
++ lockdep_assert_held(&rq->lock);
++ }
++#endif
++ if (test_tsk_need_resched(p))
++ return;
++
++ cpu = task_cpu(p);
++ if (cpu == smp_processor_id()) {
++ set_tsk_need_resched(p);
++ set_preempt_need_resched();
++ return;
++ }
++
++ if (set_nr_and_not_polling(p))
++ smp_sched_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++}
++
++/*
++ * A task that is not running or queued will not have a node set.
++ * A task that is queued but not running will have a node set.
++ * A task that is currently running will have ->on_cpu set but no node set.
++ */
++static inline bool task_queued(struct task_struct *p)
++{
++ return !skiplist_node_empty(&p->node);
++}
++
++static void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
++static inline void resched_if_idle(struct rq *rq);
++
++/* Dodgy workaround till we figure out where the softirqs are going */
++static inline void do_pending_softirq(struct rq *rq, struct task_struct *next)
++{
++ if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt()))
++ do_softirq_own_stack();
++}
++
++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++ /*
++ * After ->on_cpu is cleared, the task can be moved to a different CPU.
++ * We must ensure this doesn't happen until the switch is completely
++ * finished.
++ *
++ * In particular, the load of prev->state in finish_task_switch() must
++ * happen before this.
++ *
++ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
++ */
++ smp_store_release(&prev->on_cpu, 0);
++#endif
++#ifdef CONFIG_DEBUG_SPINLOCK
++ /* this is a valid case when another task releases the spinlock */
++ rq->lock.owner = current;
++#endif
++ /*
++ * If we are tracking spinlock dependencies then we have to
++ * fix up the runqueue lock - which gets 'carried over' from
++ * prev into current:
++ */
++ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++
++#ifdef CONFIG_SMP
++ /*
++ * If prev was marked as migrating to another CPU in return_task, drop
++ * the local runqueue lock but leave interrupts disabled and grab the
++ * remote lock we're migrating it to before enabling them.
++ */
++ if (unlikely(task_on_rq_migrating(prev))) {
++ sched_info_dequeued(rq, prev);
++ /*
++ * We move the ownership of prev to the new cpu now. ttwu can't
++ * activate prev to the wrong cpu since it has to grab this
++ * runqueue in ttwu_remote.
++ */
++#ifdef CONFIG_THREAD_INFO_IN_TASK
++ prev->cpu = prev->wake_cpu;
++#else
++ task_thread_info(prev)->cpu = prev->wake_cpu;
++#endif
++ raw_spin_unlock(&rq->lock);
++
++ raw_spin_lock(&prev->pi_lock);
++ rq = __task_rq_lock(prev);
++ /* Check that someone else hasn't already queued prev */
++ if (likely(!task_queued(prev))) {
++ enqueue_task(rq, prev, 0);
++ prev->on_rq = TASK_ON_RQ_QUEUED;
++ /* Wake up the CPU if it's not already running */
++ resched_if_idle(rq);
++ }
++ raw_spin_unlock(&prev->pi_lock);
++ }
++#endif
++ /* Accurately set nr_running here for load average calculations */
++ rq->nr_running = rq->sl->entries + !rq_idle(rq);
++ rq_unlock(rq);
++
++ do_pending_softirq(rq, current);
++
++ local_irq_enable();
++}
++
++static inline bool deadline_before(u64 deadline, u64 time)
++{
++ return (deadline < time);
++}
++
++/*
++ * Deadline is "now" in niffies + (offset by priority). Setting the deadline
++ * is the key to everything. It distributes cpu fairly amongst tasks of the
++ * same nice value, it proportions cpu according to nice level, it means the
++ * task that last woke up the longest ago has the earliest deadline, thus
++ * ensuring that interactive tasks get low latency on wake up. The CPU
++ * proportion works out to the square of the virtual deadline difference, so
++ * this equation will give nice 19 3% CPU compared to nice 0.
++ */
++static inline u64 prio_deadline_diff(int user_prio)
++{
++ return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128));
++}
++
++static inline u64 task_deadline_diff(struct task_struct *p)
++{
++ return prio_deadline_diff(TASK_USER_PRIO(p));
++}
++
++static inline u64 static_deadline_diff(int static_prio)
++{
++ return prio_deadline_diff(USER_PRIO(static_prio));
++}
++
++static inline int longest_deadline_diff(void)
++{
++ return prio_deadline_diff(39);
++}
++
++static inline int ms_longest_deadline_diff(void)
++{
++ return NS_TO_MS(longest_deadline_diff());
++}
++
++static inline bool rq_local(struct rq *rq);
++
++#ifndef SCHED_CAPACITY_SCALE
++#define SCHED_CAPACITY_SCALE 1024
++#endif
++
++static inline int rq_load(struct rq *rq)
++{
++ return rq->nr_running;
++}
++
++/*
++ * Update the load average for feeding into cpu frequency governors. Use a
++ * rough estimate of a rolling average with ~ time constant of 32ms.
++ * 80/128 ~ 0.63. * 80 / 32768 / 128 == * 5 / 262144
++ * Make sure a call to update_clocks has been made before calling this to get
++ * an updated rq->niffies.
++ */
++static void update_load_avg(struct rq *rq, unsigned int flags)
++{
++ unsigned long us_interval, curload;
++ long load;
++
++ if (unlikely(rq->niffies <= rq->load_update))
++ return;
++
++ us_interval = NS_TO_US(rq->niffies - rq->load_update);
++ curload = rq_load(rq);
++ load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144);
++ if (unlikely(load < 0))
++ load = 0;
++ load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144;
++ rq->load_avg = load;
++
++ rq->load_update = rq->niffies;
++ if (likely(rq_local(rq)))
++ cpufreq_trigger(rq, flags);
++}
++
++/*
++ * Removing from the runqueue. Enter with rq locked. Deleting a task
++ * from the skip list is done via the stored node reference in the task struct
++ * and does not require a full look up. Thus it occurs in O(k) time where k
++ * is the "level" of the list the task was stored at - usually < 4, max 8.
++ */
++static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
++{
++ skiplist_delete(rq->sl, &p->node);
++ rq->best_key = rq->node.next[0]->key;
++ update_clocks(rq);
++
++ if (!(flags & DEQUEUE_SAVE))
++ sched_info_dequeued(task_rq(p), p);
++ update_load_avg(rq, flags);
++}
++
++#ifdef CONFIG_PREEMPT_RCU
++static bool rcu_read_critical(struct task_struct *p)
++{
++ return p->rcu_read_unlock_special.b.blocked;
++}
++#else /* CONFIG_PREEMPT_RCU */
++#define rcu_read_critical(p) (false)
++#endif /* CONFIG_PREEMPT_RCU */
++
++/*
++ * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as
++ * an idle task, we ensure none of the following conditions are met.
++ */
++static bool idleprio_suitable(struct task_struct *p)
++{
++ return (!(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING)) &&
++ !signal_pending(p) && !rcu_read_critical(p) && !freezing(p));
++}
++
++/*
++ * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check
++ * that the iso_refractory flag is not set.
++ */
++static inline bool isoprio_suitable(struct rq *rq)
++{
++ return !rq->iso_refractory;
++}
++
++/*
++ * Adding to the runqueue. Enter with rq locked.
++ */
++static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
++{
++ unsigned int randseed, cflags = 0;
++ u64 sl_id;
++
++ if (!rt_task(p)) {
++ /* Check it hasn't gotten rt from PI */
++ if ((idleprio_task(p) && idleprio_suitable(p)) ||
++ (iso_task(p) && isoprio_suitable(rq)))
++ p->prio = p->normal_prio;
++ else
++ p->prio = NORMAL_PRIO;
++ }
++ /*
++ * The sl_id key passed to the skiplist generates a sorted list.
++ * Realtime and sched iso tasks run FIFO so they only need be sorted
++ * according to priority. The skiplist will put tasks of the same
++ * key inserted later in FIFO order. Tasks of sched normal, batch
++ * and idleprio are sorted according to their deadlines. Idleprio
++ * tasks are offset by an impossibly large deadline value ensuring
++ * they get sorted into last positions, but still according to their
++ * own deadlines. This creates a "landscape" of skiplists running
++ * from priority 0 realtime in first place to the lowest priority
++ * idleprio tasks last. Skiplist insertion is an O(log n) process.
++ */
++ if (p->prio <= ISO_PRIO) {
++ sl_id = p->prio;
++ cflags = SCHED_CPUFREQ_RT;
++ } else {
++ sl_id = p->deadline;
++ if (idleprio_task(p)) {
++ if (p->prio == IDLE_PRIO)
++ sl_id |= 0xF000000000000000;
++ else
++ sl_id += longest_deadline_diff();
++ }
++ }
++ /*
++ * Some architectures don't have better than microsecond resolution
++ * so mask out ~microseconds as the random seed for skiplist insertion.
++ */
++ update_clocks(rq);
++ if (!(flags & ENQUEUE_RESTORE))
++ sched_info_queued(rq, p);
++ randseed = (rq->niffies >> 10) & 0xFFFFFFFF;
++ skiplist_insert(rq->sl, &p->node, sl_id, p, randseed);
++ rq->best_key = rq->node.next[0]->key;
++ if (p->in_iowait)
++ cflags |= SCHED_CPUFREQ_IOWAIT;
++ update_load_avg(rq, cflags);
++}
++
++/*
++ * Returns the relative length of deadline all compared to the shortest
++ * deadline which is that of nice -20.
++ */
++static inline int task_prio_ratio(struct task_struct *p)
++{
++ return prio_ratios[TASK_USER_PRIO(p)];
++}
++
++/*
++ * task_timeslice - all tasks of all priorities get the exact same timeslice
++ * length. CPU distribution is handled by giving different deadlines to
++ * tasks of different priorities. Use 128 as the base value for fast shifts.
++ */
++static inline int task_timeslice(struct task_struct *p)
++{
++ return (rr_interval * task_prio_ratio(p) / 128);
++}
++
++#ifdef CONFIG_SMP
++/* Entered with rq locked */
++static inline void resched_if_idle(struct rq *rq)
++{
++ if (rq_idle(rq))
++ resched_task(rq->curr);
++}
++
++static inline bool rq_local(struct rq *rq)
++{
++ return (rq->cpu == smp_processor_id());
++}
++#ifdef CONFIG_SMT_NICE
++static const cpumask_t *thread_cpumask(int cpu);
++
++/* Find the best real time priority running on any SMT siblings of cpu and if
++ * none are running, the static priority of the best deadline task running.
++ * The lookups to the other runqueues is done lockless as the occasional wrong
++ * value would be harmless. */
++static int best_smt_bias(struct rq *this_rq)
++{
++ int other_cpu, best_bias = 0;
++
++ for_each_cpu(other_cpu, &this_rq->thread_mask) {
++ struct rq *rq = cpu_rq(other_cpu);
++
++ if (rq_idle(rq))
++ continue;
++ if (unlikely(!rq->online))
++ continue;
++ if (!rq->rq_mm)
++ continue;
++ if (likely(rq->rq_smt_bias > best_bias))
++ best_bias = rq->rq_smt_bias;
++ }
++ return best_bias;
++}
++
++static int task_prio_bias(struct task_struct *p)
++{
++ if (rt_task(p))
++ return 1 << 30;
++ else if (task_running_iso(p))
++ return 1 << 29;
++ else if (task_running_idle(p))
++ return 0;
++ return MAX_PRIO - p->static_prio;
++}
++
++static bool smt_always_schedule(struct task_struct __maybe_unused *p, struct rq __maybe_unused *this_rq)
++{
++ return true;
++}
++
++static bool (*smt_schedule)(struct task_struct *p, struct rq *this_rq) = &smt_always_schedule;
++
++/* We've already decided p can run on CPU, now test if it shouldn't for SMT
++ * nice reasons. */
++static bool smt_should_schedule(struct task_struct *p, struct rq *this_rq)
++{
++ int best_bias, task_bias;
++
++ /* Kernel threads always run */
++ if (unlikely(!p->mm))
++ return true;
++ if (rt_task(p))
++ return true;
++ if (!idleprio_suitable(p))
++ return true;
++ best_bias = best_smt_bias(this_rq);
++ /* The smt siblings are all idle or running IDLEPRIO */
++ if (best_bias < 1)
++ return true;
++ task_bias = task_prio_bias(p);
++ if (task_bias < 1)
++ return false;
++ if (task_bias >= best_bias)
++ return true;
++ /* Dither 25% cpu of normal tasks regardless of nice difference */
++ if (best_bias % 4 == 1)
++ return true;
++ /* Sorry, you lose */
++ return false;
++}
++#else /* CONFIG_SMT_NICE */
++#define smt_schedule(p, this_rq) (true)
++#endif /* CONFIG_SMT_NICE */
++
++static inline void atomic_set_cpu(int cpu, cpumask_t *cpumask)
++{
++ set_bit(cpu, (volatile unsigned long *)cpumask);
++}
++
++/*
++ * The cpu_idle_map stores a bitmap of all the CPUs currently idle to
++ * allow easy lookup of whether any suitable idle CPUs are available.
++ * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the
++ * idle_cpus variable than to do a full bitmask check when we are busy. The
++ * bits are set atomically but read locklessly as occasional false positive /
++ * negative is harmless.
++ */
++static inline void set_cpuidle_map(int cpu)
++{
++ if (likely(cpu_online(cpu)))
++ atomic_set_cpu(cpu, &cpu_idle_map);
++}
++
++static inline void atomic_clear_cpu(int cpu, cpumask_t *cpumask)
++{
++ clear_bit(cpu, (volatile unsigned long *)cpumask);
++}
++
++static inline void clear_cpuidle_map(int cpu)
++{
++ atomic_clear_cpu(cpu, &cpu_idle_map);
++}
++
++static bool suitable_idle_cpus(struct task_struct *p)
++{
++ return (cpumask_intersects(&p->cpus_allowed, &cpu_idle_map));
++}
++
++/*
++ * Resched current on rq. We don't know if rq is local to this CPU nor if it
++ * is locked so we do not use an intermediate variable for the task to avoid
++ * having it dereferenced.
++ */
++static void resched_curr(struct rq *rq)
++{
++ int cpu;
++
++ if (test_tsk_need_resched(rq->curr))
++ return;
++
++ rq->preempt = rq->curr;
++ cpu = rq->cpu;
++
++ /* We're doing this without holding the rq lock if it's not task_rq */
++
++ if (cpu == smp_processor_id()) {
++ set_tsk_need_resched(rq->curr);
++ set_preempt_need_resched();
++ return;
++ }
++
++ if (set_nr_and_not_polling(rq->curr))
++ smp_sched_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++}
++
++#define CPUIDLE_DIFF_THREAD (1)
++#define CPUIDLE_DIFF_CORE (2)
++#define CPUIDLE_CACHE_BUSY (4)
++#define CPUIDLE_DIFF_CPU (8)
++#define CPUIDLE_THREAD_BUSY (16)
++#define CPUIDLE_DIFF_NODE (32)
++
++/*
++ * The best idle CPU is chosen according to the CPUIDLE ranking above where the
++ * lowest value would give the most suitable CPU to schedule p onto next. The
++ * order works out to be the following:
++ *
++ * Same thread, idle or busy cache, idle or busy threads
++ * Other core, same cache, idle or busy cache, idle threads.
++ * Same node, other CPU, idle cache, idle threads.
++ * Same node, other CPU, busy cache, idle threads.
++ * Other core, same cache, busy threads.
++ * Same node, other CPU, busy threads.
++ * Other node, other CPU, idle cache, idle threads.
++ * Other node, other CPU, busy cache, idle threads.
++ * Other node, other CPU, busy threads.
++ */
++static int best_mask_cpu(int best_cpu, struct rq *rq, cpumask_t *tmpmask)
++{
++ int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THREAD_BUSY |
++ CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | CPUIDLE_DIFF_CORE |
++ CPUIDLE_DIFF_THREAD;
++ int cpu_tmp;
++
++ if (cpumask_test_cpu(best_cpu, tmpmask))
++ goto out;
++
++ for_each_cpu(cpu_tmp, tmpmask) {
++ int ranking, locality;
++ struct rq *tmp_rq;
++
++ ranking = 0;
++ tmp_rq = cpu_rq(cpu_tmp);
++
++ locality = rq->cpu_locality[cpu_tmp];
++#ifdef CONFIG_NUMA
++ if (locality > 3)
++ ranking |= CPUIDLE_DIFF_NODE;
++ else
++#endif
++ if (locality > 2)
++ ranking |= CPUIDLE_DIFF_CPU;
++#ifdef CONFIG_SCHED_MC
++ else if (locality == 2)
++ ranking |= CPUIDLE_DIFF_CORE;
++ else if (!(tmp_rq->cache_idle(tmp_rq)))
++ ranking |= CPUIDLE_CACHE_BUSY;
++#endif
++#ifdef CONFIG_SCHED_SMT
++ if (locality == 1)
++ ranking |= CPUIDLE_DIFF_THREAD;
++ if (!(tmp_rq->siblings_idle(tmp_rq)))
++ ranking |= CPUIDLE_THREAD_BUSY;
++#endif
++ if (ranking < best_ranking) {
++ best_cpu = cpu_tmp;
++ best_ranking = ranking;
++ }
++ }
++out:
++ return best_cpu;
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++ struct rq *this_rq = cpu_rq(this_cpu);
++
++ return (this_rq->cpu_locality[that_cpu] < 3);
++}
++
++/* As per resched_curr but only will resched idle task */
++static inline void resched_idle(struct rq *rq)
++{
++ if (test_tsk_need_resched(rq->idle))
++ return;
++
++ rq->preempt = rq->idle;
++
++ set_tsk_need_resched(rq->idle);
++
++ if (rq_local(rq)) {
++ set_preempt_need_resched();
++ return;
++ }
++
++ smp_sched_reschedule(rq->cpu);
++}
++
++static struct rq *resched_best_idle(struct task_struct *p, int cpu)
++{
++ cpumask_t tmpmask;
++ struct rq *rq;
++ int best_cpu;
++
++ cpumask_and(&tmpmask, &p->cpus_allowed, &cpu_idle_map);
++ best_cpu = best_mask_cpu(cpu, task_rq(p), &tmpmask);
++ rq = cpu_rq(best_cpu);
++ if (!smt_schedule(p, rq))
++ return NULL;
++ rq->preempt = p;
++ resched_idle(rq);
++ return rq;
++}
++
++static inline void resched_suitable_idle(struct task_struct *p)
++{
++ if (suitable_idle_cpus(p))
++ resched_best_idle(p, task_cpu(p));
++}
++
++static inline struct rq *rq_order(struct rq *rq, int cpu)
++{
++ return rq->rq_order[cpu];
++}
++#else /* CONFIG_SMP */
++static inline void set_cpuidle_map(int cpu)
++{
++}
++
++static inline void clear_cpuidle_map(int cpu)
++{
++}
++
++static inline bool suitable_idle_cpus(struct task_struct *p)
++{
++ return uprq->curr == uprq->idle;
++}
++
++static inline void resched_suitable_idle(struct task_struct *p)
++{
++}
++
++static inline void resched_curr(struct rq *rq)
++{
++ resched_task(rq->curr);
++}
++
++static inline void resched_if_idle(struct rq *rq)
++{
++}
++
++static inline bool rq_local(struct rq *rq)
++{
++ return true;
++}
++
++static inline struct rq *rq_order(struct rq *rq, int cpu)
++{
++ return rq;
++}
++
++static inline bool smt_schedule(struct task_struct *p, struct rq *rq)
++{
++ return true;
++}
++#endif /* CONFIG_SMP */
++
++static inline int normal_prio(struct task_struct *p)
++{
++ if (has_rt_policy(p))
++ return MAX_RT_PRIO - 1 - p->rt_priority;
++ if (idleprio_task(p))
++ return IDLE_PRIO;
++ if (iso_task(p))
++ return ISO_PRIO;
++ return NORMAL_PRIO;
++}
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks as it will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++ p->normal_prio = normal_prio(p);
++ /*
++ * If we are RT tasks or we were boosted to RT priority,
++ * keep the priority unchanged. Otherwise, update priority
++ * to the normal priority:
++ */
++ if (!rt_prio(p->prio))
++ return p->normal_prio;
++ return p->prio;
++}
++
++/*
++ * activate_task - move a task to the runqueue. Enter with rq locked.
++ */
++static void activate_task(struct task_struct *p, struct rq *rq)
++{
++ resched_if_idle(rq);
++
++ /*
++ * Sleep time is in units of nanosecs, so shift by 20 to get a
++ * milliseconds-range estimation of the amount of time that the task
++ * spent sleeping:
++ */
++ if (unlikely(prof_on == SLEEP_PROFILING)) {
++ if (p->state == TASK_UNINTERRUPTIBLE)
++ profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
++ (rq->niffies - p->last_ran) >> 20);
++ }
++
++ p->prio = effective_prio(p);
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible--;
++
++ enqueue_task(rq, p, 0);
++ p->on_rq = TASK_ON_RQ_QUEUED;
++}
++
++/*
++ * deactivate_task - If it's running, it's not on the runqueue and we can just
++ * decrement the nr_running. Enter with rq locked.
++ */
++static inline void deactivate_task(struct task_struct *p, struct rq *rq)
++{
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible++;
++
++ p->on_rq = 0;
++ sched_info_dequeued(rq, p);
++}
++
++#ifdef CONFIG_SMP
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++ struct rq *rq;
++
++ if (task_cpu(p) == new_cpu)
++ return;
++
++ /* Do NOT call set_task_cpu on a currently queued task as we will not
++ * be reliably holding the rq lock after changing CPU. */
++ BUG_ON(task_queued(p));
++ rq = task_rq(p);
++
++#ifdef CONFIG_LOCKDEP
++ /*
++ * The caller should hold either p->pi_lock or rq->lock, when changing
++ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++ *
++ * Furthermore, all task_rq users should acquire both locks, see
++ * task_rq_lock().
++ */
++ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++ lockdep_is_held(&rq->lock)));
++#endif
++
++ trace_sched_migrate_task(p, new_cpu);
++ perf_event_task_migrate(p);
++
++ /*
++ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
++ * successfully executed on another CPU. We must ensure that updates of
++ * per-task data have been completed by this moment.
++ */
++ smp_wmb();
++
++ p->wake_cpu = new_cpu;
++
++ if (task_running(rq, p)) {
++ /*
++ * We should only be calling this on a running task if we're
++ * holding rq lock.
++ */
++ lockdep_assert_held(&rq->lock);
++
++ /*
++ * We can't change the task_thread_info CPU on a running task
++ * as p will still be protected by the rq lock of the CPU it
++ * is still running on so we only set the wake_cpu for it to be
++ * lazily updated once off the CPU.
++ */
++ return;
++ }
++
++#ifdef CONFIG_THREAD_INFO_IN_TASK
++ p->cpu = new_cpu;
++#else
++ task_thread_info(p)->cpu = new_cpu;
++#endif
++ /* We're no longer protecting p after this point since we're holding
++ * the wrong runqueue lock. */
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * Move a task off the runqueue and take it to a cpu for it will
++ * become the running task.
++ */
++static inline void take_task(struct rq *rq, int cpu, struct task_struct *p)
++{
++ struct rq *p_rq = task_rq(p);
++
++ dequeue_task(p_rq, p, DEQUEUE_SAVE);
++ if (p_rq != rq) {
++ sched_info_dequeued(p_rq, p);
++ sched_info_queued(rq, p);
++ }
++ set_task_cpu(p, cpu);
++}
++
++/*
++ * Returns a descheduling task to the runqueue unless it is being
++ * deactivated.
++ */
++static inline void return_task(struct task_struct *p, struct rq *rq,
++ int cpu, bool deactivate)
++{
++ if (deactivate)
++ deactivate_task(p, rq);
++ else {
++#ifdef CONFIG_SMP
++ /*
++ * set_task_cpu was called on the running task that doesn't
++ * want to deactivate so it has to be enqueued to a different
++ * CPU and we need its lock. Tag it to be moved with as the
++ * lock is dropped in finish_lock_switch.
++ */
++ if (unlikely(p->wake_cpu != cpu))
++ p->on_rq = TASK_ON_RQ_MIGRATING;
++ else
++#endif
++ enqueue_task(rq, p, ENQUEUE_RESTORE);
++ }
++}
++
++/* Enter with rq lock held. We know p is on the local cpu */
++static inline void __set_tsk_resched(struct task_struct *p)
++{
++ set_tsk_need_resched(p);
++ set_preempt_need_resched();
++}
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++ return cpu_curr(task_cpu(p)) == p;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * If @match_state is nonzero, it's the @p->state value just checked and
++ * not expected to change. If it changes, i.e. @p might have woken up,
++ * then return zero. When we succeed in waiting for @p to be off its CPU,
++ * we return a positive number (its total switch count). If a second call
++ * a short while later returns the same number, the caller can be sure that
++ * @p has remained unscheduled the whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, long match_state)
++{
++ int running, queued;
++ unsigned long flags;
++ unsigned long ncsw;
++ struct rq *rq;
++
++ for (;;) {
++ rq = task_rq(p);
++
++ /*
++ * If the task is actively running on another CPU
++ * still, just relax and busy-wait without holding
++ * any locks.
++ *
++ * NOTE! Since we don't hold any locks, it's not
++ * even sure that "rq" stays as the right runqueue!
++ * But we don't care, since this will return false
++ * if the runqueue has changed and p is actually now
++ * running somewhere else!
++ */
++ while (task_running(rq, p)) {
++ if (match_state && unlikely(p->state != match_state))
++ return 0;
++ cpu_relax();
++ }
++
++ /*
++ * Ok, time to look more closely! We need the rq
++ * lock now, to be *sure*. If we're wrong, we'll
++ * just go back and repeat.
++ */
++ rq = task_rq_lock(p, &flags);
++ trace_sched_wait_task(p);
++ running = task_running(rq, p);
++ queued = task_on_rq_queued(p);
++ ncsw = 0;
++ if (!match_state || p->state == match_state)
++ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++ task_rq_unlock(rq, p, &flags);
++
++ /*
++ * If it changed from the expected state, bail out now.
++ */
++ if (unlikely(!ncsw))
++ break;
++
++ /*
++ * Was it really running after all now that we
++ * checked with the proper locks actually held?
++ *
++ * Oops. Go back and try again..
++ */
++ if (unlikely(running)) {
++ cpu_relax();
++ continue;
++ }
++
++ /*
++ * It's not enough that it's not actively running,
++ * it must be off the runqueue _entirely_, and not
++ * preempted!
++ *
++ * So if it was still runnable (but just not actively
++ * running right now), it's preempted, and we should
++ * yield - it could be a while.
++ */
++ if (unlikely(queued)) {
++ ktime_t to = NSEC_PER_SEC / HZ;
++
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_hrtimeout(&to, HRTIMER_MODE_REL);
++ continue;
++ }
++
++ /*
++ * Ahh, all good. It wasn't running, and it wasn't
++ * runnable, which means that it will never become
++ * running in the future either. We're all done!
++ */
++ break;
++ }
++
++ return ncsw;
++}
++
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++ int cpu;
++
++ preempt_disable();
++ cpu = task_cpu(p);
++ if ((cpu != smp_processor_id()) && task_curr(p))
++ smp_sched_reschedule(cpu);
++ preempt_enable();
++}
++EXPORT_SYMBOL_GPL(kick_process);
++#endif
++
++/*
++ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the
++ * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or
++ * between themselves, they cooperatively multitask. An idle rq scores as
++ * prio PRIO_LIMIT so it is always preempted.
++ */
++static inline bool
++can_preempt(struct task_struct *p, int prio, u64 deadline)
++{
++ /* Better static priority RT task or better policy preemption */
++ if (p->prio < prio)
++ return true;
++ if (p->prio > prio)
++ return false;
++ if (p->policy == SCHED_BATCH)
++ return false;
++ /* SCHED_NORMAL and ISO will preempt based on deadline */
++ if (!deadline_before(p->deadline, deadline))
++ return false;
++ return true;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * Check to see if p can run on cpu, and if not, whether there are any online
++ * CPUs it can run on instead.
++ */
++static inline bool needs_other_cpu(struct task_struct *p, int cpu)
++{
++ if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed)))
++ return true;
++ return false;
++}
++#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
++
++static void try_preempt(struct task_struct *p, struct rq *this_rq)
++{
++ int i, this_entries = rq_load(this_rq);
++ cpumask_t tmp;
++
++ if (suitable_idle_cpus(p) && resched_best_idle(p, task_cpu(p)))
++ return;
++
++ /* IDLEPRIO tasks never preempt anything but idle */
++ if (p->policy == SCHED_IDLEPRIO)
++ return;
++
++ cpumask_and(&tmp, &cpu_online_map, &p->cpus_allowed);
++
++ for (i = 0; i < num_possible_cpus(); i++) {
++ struct rq *rq = this_rq->rq_order[i];
++
++ if (!cpumask_test_cpu(rq->cpu, &tmp))
++ continue;
++
++ if (!sched_interactive && rq != this_rq && rq_load(rq) <= this_entries)
++ continue;
++ if (smt_schedule(p, rq) && can_preempt(p, rq->rq_prio, rq->rq_deadline)) {
++ /* We set rq->preempting lockless, it's a hint only */
++ rq->preempting = p;
++ resched_curr(rq);
++ return;
++ }
++ }
++}
++
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++ const struct cpumask *new_mask, bool check);
++#else /* CONFIG_SMP */
++static inline bool needs_other_cpu(struct task_struct *p, int cpu)
++{
++ return false;
++}
++
++static void try_preempt(struct task_struct *p, struct rq *this_rq)
++{
++ if (p->policy == SCHED_IDLEPRIO)
++ return;
++ if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline))
++ resched_curr(uprq);
++}
++
++static inline int __set_cpus_allowed_ptr(struct task_struct *p,
++ const struct cpumask *new_mask, bool check)
++{
++ return set_cpus_allowed_ptr(p, new_mask);
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * wake flags
++ */
++#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
++#define WF_FORK 0x02 /* child wakeup after fork */
++#define WF_MIGRATED 0x04 /* internal use, task got migrated */
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++ struct rq *rq;
++
++ if (!schedstat_enabled())
++ return;
++
++ rq = this_rq();
++
++#ifdef CONFIG_SMP
++ if (cpu == rq->cpu)
++ schedstat_inc(rq->ttwu_local);
++ else {
++ struct sched_domain *sd;
++
++ rcu_read_lock();
++ for_each_domain(rq->cpu, sd) {
++ if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
++ schedstat_inc(sd->ttwu_wake_remote);
++ break;
++ }
++ }
++ rcu_read_unlock();
++ }
++
++#endif /* CONFIG_SMP */
++
++ schedstat_inc(rq->ttwu_count);
++}
++
++static inline void ttwu_activate(struct rq *rq, struct task_struct *p)
++{
++ activate_task(p, rq);
++
++ /* if a worker is waking up, notify the workqueue */
++ if (p->flags & PF_WQ_WORKER)
++ wq_worker_waking_up(p, cpu_of(rq));
++}
++
++/*
++ * Mark the task runnable and perform wakeup-preemption.
++ */
++static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++ /*
++ * Sync wakeups (i.e. those types of wakeups where the waker
++ * has indicated that it will leave the CPU in short order)
++ * don't trigger a preemption if there are no idle cpus,
++ * instead waiting for current to deschedule.
++ */
++ if (wake_flags & WF_SYNC)
++ resched_suitable_idle(p);
++ else
++ try_preempt(p, rq);
++ p->state = TASK_RUNNING;
++ trace_sched_wakeup(p);
++}
++
++static void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++ lockdep_assert_held(&rq->lock);
++
++#ifdef CONFIG_SMP
++ if (p->sched_contributes_to_load)
++ rq->nr_uninterruptible--;
++#endif
++
++ ttwu_activate(rq, p);
++ ttwu_do_wakeup(rq, p, wake_flags);
++}
++
++/*
++ * Called in case the task @p isn't fully descheduled from its runqueue,
++ * in this case we must do a remote wakeup. Its a 'light' wakeup though,
++ * since all we need to do is flip p->state to TASK_RUNNING, since
++ * the task is still ->on_rq.
++ */
++static int ttwu_remote(struct task_struct *p, int wake_flags)
++{
++ struct rq *rq;
++ int ret = 0;
++
++ rq = __task_rq_lock(p);
++ if (likely(task_on_rq_queued(p))) {
++ ttwu_do_wakeup(rq, p, wake_flags);
++ ret = 1;
++ }
++ __task_rq_unlock(rq);
++
++ return ret;
++}
++
++#ifdef CONFIG_SMP
++void sched_ttwu_pending(void)
++{
++ struct rq *rq = this_rq();
++ struct llist_node *llist = llist_del_all(&rq->wake_list);
++ struct task_struct *p, *t;
++ unsigned long flags;
++
++ if (!llist)
++ return;
++
++ rq_lock_irqsave(rq, &flags);
++
++ llist_for_each_entry_safe(p, t, llist, wake_entry)
++ ttwu_do_activate(rq, p, 0);
++
++ rq_unlock_irqrestore(rq, &flags);
++}
++
++void scheduler_ipi(void)
++{
++ /*
++ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
++ * TIF_NEED_RESCHED remotely (for the first time) will also send
++ * this IPI.
++ */
++ preempt_fold_need_resched();
++
++ if (llist_empty(&this_rq()->wake_list) && (!idle_cpu(smp_processor_id()) || need_resched()))
++ return;
++
++ /*
++ * Not all reschedule IPI handlers call irq_enter/irq_exit, since
++ * traditionally all their work was done from the interrupt return
++ * path. Now that we actually do some work, we need to make sure
++ * we do call them.
++ *
++ * Some archs already do call them, luckily irq_enter/exit nest
++ * properly.
++ *
++ * Arguably we should visit all archs and update all handlers,
++ * however a fair share of IPIs are still resched only so this would
++ * somewhat pessimize the simple resched case.
++ */
++ irq_enter();
++ sched_ttwu_pending();
++ irq_exit();
++}
++
++static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
++ if (!set_nr_if_polling(rq->idle))
++ smp_sched_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++ }
++}
++
++void wake_up_if_idle(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ rcu_read_lock();
++
++ if (!is_idle_task(rcu_dereference(rq->curr)))
++ goto out;
++
++ if (set_nr_if_polling(rq->idle)) {
++ trace_sched_wake_idle_without_ipi(cpu);
++ } else {
++ rq_lock_irqsave(rq, &flags);
++ if (likely(is_idle_task(rq->curr)))
++ smp_sched_reschedule(cpu);
++ /* Else cpu is not in idle, do nothing here */
++ rq_unlock_irqrestore(rq, &flags);
++ }
++
++out:
++ rcu_read_unlock();
++}
++
++static int valid_task_cpu(struct task_struct *p)
++{
++ cpumask_t valid_mask;
++
++ if (p->flags & PF_KTHREAD)
++ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_online_mask);
++ else
++ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_active_mask);
++
++ if (unlikely(!cpumask_weight(&valid_mask))) {
++ /* Hotplug boot threads do this before the CPU is up */
++ printk(KERN_INFO "SCHED: No cpumask for %s/%d weight %d\n", p->comm, p->pid, cpumask_weight(&p->cpus_allowed));
++ return cpumask_any(&p->cpus_allowed);
++ }
++ return cpumask_any(&valid_mask);
++}
++
++/*
++ * For a task that's just being woken up we have a valuable balancing
++ * opportunity so choose the nearest cache most lightly loaded runqueue.
++ * Entered with rq locked and returns with the chosen runqueue locked.
++ */
++static inline int select_best_cpu(struct task_struct *p)
++{
++ unsigned int idlest = ~0U;
++ struct rq *rq = NULL;
++ int i;
++
++ if (suitable_idle_cpus(p)) {
++ int cpu = task_cpu(p);
++
++ if (unlikely(needs_other_cpu(p, cpu)))
++ cpu = valid_task_cpu(p);
++ rq = resched_best_idle(p, cpu);
++ if (likely(rq))
++ return rq->cpu;
++ }
++
++ for (i = 0; i < num_possible_cpus(); i++) {
++ struct rq *other_rq = task_rq(p)->rq_order[i];
++ int entries;
++
++ if (!other_rq->online)
++ continue;
++ if (needs_other_cpu(p, other_rq->cpu))
++ continue;
++ entries = rq_load(other_rq);
++ if (entries >= idlest)
++ continue;
++ idlest = entries;
++ rq = other_rq;
++ }
++ if (unlikely(!rq))
++ return task_cpu(p);
++ return rq->cpu;
++}
++#else /* CONFIG_SMP */
++static int valid_task_cpu(struct task_struct *p)
++{
++ return 0;
++}
++
++static inline int select_best_cpu(struct task_struct *p)
++{
++ return 0;
++}
++
++static struct rq *resched_best_idle(struct task_struct *p, int cpu)
++{
++ return NULL;
++}
++#endif /* CONFIG_SMP */
++
++static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++#if defined(CONFIG_SMP)
++ if (!cpus_share_cache(smp_processor_id(), cpu)) {
++ sched_clock_cpu(cpu); /* Sync clocks across CPUs */
++ ttwu_queue_remote(p, cpu, wake_flags);
++ return;
++ }
++#endif
++ rq_lock(rq);
++ ttwu_do_activate(rq, p, wake_flags);
++ rq_unlock(rq);
++}
++
++/***
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Put it on the run-queue if it's not already there. The "current"
++ * thread is always on the run-queue (except when the actual
++ * re-schedule is in progress), and as such you're allowed to do
++ * the simpler "current->state = TASK_RUNNING" to mark yourself
++ * runnable without the overhead of this.
++ *
++ * Return: %true if @p was woken up, %false if it was already running.
++ * or @state didn't match @p's state.
++ */
++static int
++try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
++{
++ unsigned long flags;
++ int cpu, success = 0;
++
++ /*
++ * If we are going to wake up a thread waiting for CONDITION we
++ * need to ensure that CONDITION=1 done by the caller can not be
++ * reordered with p->state check below. This pairs with mb() in
++ * set_current_state() the waiting thread does.
++ */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ smp_mb__after_spinlock();
++ /* state is a volatile long, どうして、分からない */
++ if (!((unsigned int)p->state & state))
++ goto out;
++
++ trace_sched_waking(p);
++
++ /* We're going to change ->state: */
++ success = 1;
++ cpu = task_cpu(p);
++
++ /*
++ * Ensure we load p->on_rq _after_ p->state, otherwise it would
++ * be possible to, falsely, observe p->on_rq == 0 and get stuck
++ * in smp_cond_load_acquire() below.
++ *
++ * sched_ttwu_pending() try_to_wake_up()
++ * [S] p->on_rq = 1; [L] P->state
++ * UNLOCK rq->lock -----.
++ * \
++ * +--- RMB
++ * schedule() /
++ * LOCK rq->lock -----'
++ * UNLOCK rq->lock
++ *
++ * [task p]
++ * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
++ *
++ * Pairs with the UNLOCK+LOCK on rq->lock from the
++ * last wakeup of our task and the schedule that got our task
++ * current.
++ */
++ smp_rmb();
++ if (p->on_rq && ttwu_remote(p, wake_flags))
++ goto stat;
++
++#ifdef CONFIG_SMP
++ /*
++ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++ * possible to, falsely, observe p->on_cpu == 0.
++ *
++ * One must be running (->on_cpu == 1) in order to remove oneself
++ * from the runqueue.
++ *
++ * [S] ->on_cpu = 1; [L] ->on_rq
++ * UNLOCK rq->lock
++ * RMB
++ * LOCK rq->lock
++ * [S] ->on_rq = 0; [L] ->on_cpu
++ *
++ * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
++ * from the consecutive calls to schedule(); the first switching to our
++ * task, the second putting it to sleep.
++ */
++ smp_rmb();
++
++ /*
++ * If the owning (remote) CPU is still in the middle of schedule() with
++ * this task as prev, wait until its done referencing the task.
++ *
++ * Pairs with the smp_store_release() in finish_lock_switch().
++ *
++ * This ensures that tasks getting woken will be fully ordered against
++ * their previous state and preserve Program Order.
++ */
++ smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++ p->sched_contributes_to_load = !!task_contributes_to_load(p);
++ p->state = TASK_WAKING;
++
++ if (p->in_iowait) {
++ delayacct_blkio_end();
++ atomic_dec(&task_rq(p)->nr_iowait);
++ }
++
++ cpu = select_best_cpu(p);
++ if (task_cpu(p) != cpu)
++ set_task_cpu(p, cpu);
++
++#else /* CONFIG_SMP */
++
++ if (p->in_iowait) {
++ delayacct_blkio_end();
++ atomic_dec(&task_rq(p)->nr_iowait);
++ }
++
++#endif /* CONFIG_SMP */
++
++ ttwu_queue(p, cpu, wake_flags);
++stat:
++ ttwu_stat(p, cpu, wake_flags);
++out:
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++ return success;
++}
++
++/**
++ * try_to_wake_up_local - try to wake up a local task with rq lock held
++ * @p: the thread to be awakened
++ *
++ * Put @p on the run-queue if it's not already there. The caller must
++ * ensure that rq is locked and, @p is not the current task.
++ * rq stays locked over invocation.
++ */
++static void try_to_wake_up_local(struct task_struct *p)
++{
++ struct rq *rq = task_rq(p);
++
++ if (WARN_ON_ONCE(rq != this_rq()) ||
++ WARN_ON_ONCE(p == current))
++ return;
++
++ lockdep_assert_held(&rq->lock);
++
++ if (!raw_spin_trylock(&p->pi_lock)) {
++ /*
++ * This is OK, because current is on_cpu, which avoids it being
++ * picked for load-balance and preemption/IRQs are still
++ * disabled avoiding further scheduler activity on it and we've
++ * not yet picked a replacement task.
++ */
++ rq_unlock(rq);
++ raw_spin_lock(&p->pi_lock);
++ rq_lock(rq);
++ }
++
++ if (!(p->state & TASK_NORMAL))
++ goto out;
++
++ trace_sched_waking(p);
++
++ if (!task_on_rq_queued(p)) {
++ if (p->in_iowait) {
++ delayacct_blkio_end();
++ atomic_dec(&rq->nr_iowait);
++ }
++ ttwu_activate(rq, p);
++ }
++
++ ttwu_do_wakeup(rq, p, 0);
++ ttwu_stat(p, smp_processor_id(), 0);
++out:
++ raw_spin_unlock(&p->pi_lock);
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * It may be assumed that this function implies a write memory barrier before
++ * changing the task state if and only if any tasks are woken up.
++ */
++int wake_up_process(struct task_struct *p)
++{
++ return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++ return try_to_wake_up(p, state, 0);
++}
++
++static void time_slice_expired(struct task_struct *p, struct rq *rq);
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ */
++int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p)
++{
++ unsigned long flags;
++ int cpu = get_cpu();
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++ INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++ /*
++ * We mark the process as NEW here. This guarantees that
++ * nobody will actually run it, and a signal or other external
++ * event cannot wake it up and insert it on the runqueue either.
++ */
++ p->state = TASK_NEW;
++
++ /*
++ * The process state is set to the same value of the process executing
++ * do_fork() code. That is running. This guarantees that nobody will
++ * actually run it, and a signal or other external event cannot wake
++ * it up and insert it on the runqueue either.
++ */
++
++ /* Should be reset in fork.c but done here for ease of MuQSS patching */
++ p->on_cpu =
++ p->on_rq =
++ p->utime =
++ p->stime =
++ p->sched_time =
++ p->stime_ns =
++ p->utime_ns = 0;
++ skiplist_node_init(&p->node);
++
++ /*
++ * Revert to default priority/policy on fork if requested.
++ */
++ if (unlikely(p->sched_reset_on_fork)) {
++ if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
++ p->policy = SCHED_NORMAL;
++ p->normal_prio = normal_prio(p);
++ }
++
++ if (PRIO_TO_NICE(p->static_prio) < 0) {
++ p->static_prio = NICE_TO_PRIO(0);
++ p->normal_prio = p->static_prio;
++ }
++
++ /*
++ * We don't need the reset flag anymore after the fork. It has
++ * fulfilled its duty:
++ */
++ p->sched_reset_on_fork = 0;
++ }
++
++ /*
++ * Silence PROVE_RCU.
++ */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ set_task_cpu(p, cpu);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++#ifdef CONFIG_SCHED_INFO
++ if (unlikely(sched_info_on()))
++ memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++ init_task_preempt_count(p);
++
++ put_cpu();
++ return 0;
++}
++
++#ifdef CONFIG_SCHEDSTATS
++
++DEFINE_STATIC_KEY_FALSE(sched_schedstats);
++static bool __initdata __sched_schedstats = false;
++
++static void set_schedstats(bool enabled)
++{
++ if (enabled)
++ static_branch_enable(&sched_schedstats);
++ else
++ static_branch_disable(&sched_schedstats);
++}
++
++void force_schedstat_enabled(void)
++{
++ if (!schedstat_enabled()) {
++ pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
++ static_branch_enable(&sched_schedstats);
++ }
++}
++
++static int __init setup_schedstats(char *str)
++{
++ int ret = 0;
++ if (!str)
++ goto out;
++
++ /*
++ * This code is called before jump labels have been set up, so we can't
++ * change the static branch directly just yet. Instead set a temporary
++ * variable so init_schedstats() can do it later.
++ */
++ if (!strcmp(str, "enable")) {
++ __sched_schedstats = true;
++ ret = 1;
++ } else if (!strcmp(str, "disable")) {
++ __sched_schedstats = false;
++ ret = 1;
++ }
++out:
++ if (!ret)
++ pr_warn("Unable to parse schedstats=\n");
++
++ return ret;
++}
++__setup("schedstats=", setup_schedstats);
++
++static void __init init_schedstats(void)
++{
++ set_schedstats(__sched_schedstats);
++}
++
++#ifdef CONFIG_PROC_SYSCTL
++int sysctl_schedstats(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ struct ctl_table t;
++ int err;
++ int state = static_branch_likely(&sched_schedstats);
++
++ if (write && !capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ t = *table;
++ t.data = &state;
++ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++ if (err < 0)
++ return err;
++ if (write)
++ set_schedstats(state);
++ return err;
++}
++#endif /* CONFIG_PROC_SYSCTL */
++#else /* !CONFIG_SCHEDSTATS */
++static inline void init_schedstats(void) {}
++#endif /* CONFIG_SCHEDSTATS */
++
++static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p);
++
++static void account_task_cpu(struct rq *rq, struct task_struct *p)
++{
++ update_clocks(rq);
++ /* This isn't really a context switch but accounting is the same */
++ update_cpu_clock_switch(rq, p);
++ p->last_ran = rq->niffies;
++}
++
++bool sched_smp_initialized __read_mostly;
++
++static inline int hrexpiry_enabled(struct rq *rq)
++{
++ if (unlikely(!cpu_active(cpu_of(rq)) || !sched_smp_initialized))
++ return 0;
++ return hrtimer_is_hres_active(&rq->hrexpiry_timer);
++}
++
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++static inline void hrexpiry_clear(struct rq *rq)
++{
++ if (!hrexpiry_enabled(rq))
++ return;
++ if (hrtimer_active(&rq->hrexpiry_timer))
++ hrtimer_cancel(&rq->hrexpiry_timer);
++}
++
++/*
++ * High-resolution time_slice expiry.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrexpiry(struct hrtimer *timer)
++{
++ struct rq *rq = container_of(timer, struct rq, hrexpiry_timer);
++ struct task_struct *p;
++
++ /* This can happen during CPU hotplug / resume */
++ if (unlikely(cpu_of(rq) != smp_processor_id()))
++ goto out;
++
++ /*
++ * We're doing this without the runqueue lock but this should always
++ * be run on the local CPU. Time slice should run out in __schedule
++ * but we set it to zero here in case niffies is slightly less.
++ */
++ p = rq->curr;
++ p->time_slice = 0;
++ __set_tsk_resched(p);
++out:
++ return HRTIMER_NORESTART;
++}
++
++/*
++ * Called to set the hrexpiry timer state.
++ *
++ * called with irqs disabled from the local CPU only
++ */
++static void hrexpiry_start(struct rq *rq, u64 delay)
++{
++ if (!hrexpiry_enabled(rq))
++ return;
++
++ hrtimer_start(&rq->hrexpiry_timer, ns_to_ktime(delay),
++ HRTIMER_MODE_REL_PINNED);
++}
++
++static void init_rq_hrexpiry(struct rq *rq)
++{
++ hrtimer_init(&rq->hrexpiry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ rq->hrexpiry_timer.function = hrexpiry;
++}
++
++static inline int rq_dither(struct rq *rq)
++{
++ if (!hrexpiry_enabled(rq))
++ return HALF_JIFFY_US;
++ return 0;
++}
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++ struct task_struct *parent, *rq_curr;
++ struct rq *rq, *new_rq;
++ unsigned long flags;
++
++ parent = p->parent;
++
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ p->state = TASK_RUNNING;
++ /* Task_rq can't change yet on a new task */
++ new_rq = rq = task_rq(p);
++ if (unlikely(needs_other_cpu(p, task_cpu(p)))) {
++ set_task_cpu(p, valid_task_cpu(p));
++ new_rq = task_rq(p);
++ }
++
++ double_rq_lock(rq, new_rq);
++ rq_curr = rq->curr;
++
++ /*
++ * Make sure we do not leak PI boosting priority to the child.
++ */
++ p->prio = rq_curr->normal_prio;
++
++ trace_sched_wakeup_new(p);
++
++ /*
++ * Share the timeslice between parent and child, thus the
++ * total amount of pending timeslices in the system doesn't change,
++ * resulting in more scheduling fairness. If it's negative, it won't
++ * matter since that's the same as being 0. rq->rq_deadline is only
++ * modified within schedule() so it is always equal to
++ * current->deadline.
++ */
++ account_task_cpu(rq, rq_curr);
++ p->last_ran = rq_curr->last_ran;
++ if (likely(rq_curr->policy != SCHED_FIFO)) {
++ rq_curr->time_slice /= 2;
++ if (rq_curr->time_slice < RESCHED_US) {
++ /*
++ * Forking task has run out of timeslice. Reschedule it and
++ * start its child with a new time slice and deadline. The
++ * child will end up running first because its deadline will
++ * be slightly earlier.
++ */
++ __set_tsk_resched(rq_curr);
++ time_slice_expired(p, new_rq);
++ if (suitable_idle_cpus(p))
++ resched_best_idle(p, task_cpu(p));
++ else if (unlikely(rq != new_rq))
++ try_preempt(p, new_rq);
++ } else {
++ p->time_slice = rq_curr->time_slice;
++ if (rq_curr == parent && rq == new_rq && !suitable_idle_cpus(p)) {
++ /*
++ * The VM isn't cloned, so we're in a good position to
++ * do child-runs-first in anticipation of an exec. This
++ * usually avoids a lot of COW overhead.
++ */
++ __set_tsk_resched(rq_curr);
++ } else {
++ /*
++ * Adjust the hrexpiry since rq_curr will keep
++ * running and its timeslice has been shortened.
++ */
++ hrexpiry_start(rq, US_TO_NS(rq_curr->time_slice));
++ try_preempt(p, new_rq);
++ }
++ }
++ } else {
++ time_slice_expired(p, new_rq);
++ try_preempt(p, new_rq);
++ }
++ activate_task(p, new_rq);
++ double_rq_unlock(rq, new_rq);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
++
++void preempt_notifier_inc(void)
++{
++ static_key_slow_inc(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_inc);
++
++void preempt_notifier_dec(void)
++{
++ static_key_slow_dec(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_dec);
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++ if (!static_key_false(&preempt_notifier_key))
++ WARN(1, "registering preempt_notifier while notifiers disabled\n");
++
++ hlist_add_head(&notifier->link, &current->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is *not* safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++ hlist_del(&notifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++ struct preempt_notifier *notifier;
++
++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++ notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++ if (static_key_false(&preempt_notifier_key))
++ __fire_sched_in_preempt_notifiers(curr);
++}
++
++static void
++__fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++ struct preempt_notifier *notifier;
++
++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++ notifier->ops->sched_out(notifier, next);
++}
++
++static __always_inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++ if (static_key_false(&preempt_notifier_key))
++ __fire_sched_out_preempt_notifiers(curr, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next)
++{
++ sched_info_switch(rq, prev, next);
++ perf_event_task_sched_out(prev, next);
++ fire_sched_out_preempt_notifiers(prev, next);
++ prepare_lock_switch(rq, next);
++ prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock. (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. prev == current is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static void finish_task_switch(struct task_struct *prev)
++ __releases(rq->lock)
++{
++ struct rq *rq = this_rq();
++ struct mm_struct *mm = rq->prev_mm;
++ long prev_state;
++
++ /*
++ * The previous task will have left us with a preempt_count of 2
++ * because it left us after:
++ *
++ * schedule()
++ * preempt_disable(); // 1
++ * __schedule()
++ * raw_spin_lock_irq(&rq->lock) // 2
++ *
++ * Also, see FORK_PREEMPT_COUNT.
++ */
++ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
++ "corrupted preempt_count: %s/%d/0x%x\n",
++ current->comm, current->pid, preempt_count()))
++ preempt_count_set(FORK_PREEMPT_COUNT);
++
++ rq->prev_mm = NULL;
++
++ /*
++ * A task struct has one reference for the use as "current".
++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++ * schedule one last time. The schedule call will never return, and
++ * the scheduled task must drop that reference.
++ *
++ * We must observe prev->state before clearing prev->on_cpu (in
++ * finish_lock_switch), otherwise a concurrent wakeup can get prev
++ * running on another CPU and we could rave with its RUNNING -> DEAD
++ * transition, resulting in a double drop.
++ */
++ prev_state = prev->state;
++ vtime_task_switch(prev);
++ perf_event_task_sched_in(prev, current);
++ /*
++ * The membarrier system call requires a full memory barrier
++ * after storing to rq->curr, before going back to user-space.
++ *
++ * TODO: This smp_mb__after_unlock_lock can go away if PPC end
++ * up adding a full barrier to switch_mm(), or we should figure
++ * out if a smp_mb__after_unlock_lock is really the proper API
++ * to use.
++ */
++ smp_mb__after_unlock_lock();
++ finish_lock_switch(rq, prev);
++ finish_arch_post_lock_switch();
++
++ fire_sched_in_preempt_notifiers(current);
++ if (mm)
++ mmdrop(mm);
++ if (unlikely(prev_state == TASK_DEAD)) {
++ /*
++ * Remove function-return probe instances associated with this
++ * task and put them back on the free list.
++ */
++ kprobe_flush_task(prev);
++
++ /* Task is done with its stack. */
++ put_task_stack(prev);
++
++ put_task_struct(prev);
++ }
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++{
++ /*
++ * New tasks start with FORK_PREEMPT_COUNT, see there and
++ * finish_task_switch() for details.
++ *
++ * finish_task_switch() will drop rq->lock() and lower preempt_count
++ * and the preempt_enable() will end up enabling preemption (on
++ * PREEMPT_COUNT kernels).
++ */
++
++ finish_task_switch(prev);
++ preempt_enable();
++
++ if (current->set_child_tid)
++ put_user(task_pid_vnr(current), current->set_child_tid);
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static __always_inline void
++context_switch(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next)
++{
++ struct mm_struct *mm, *oldmm;
++
++ prepare_task_switch(rq, prev, next);
++
++ mm = next->mm;
++ oldmm = prev->active_mm;
++ /*
++ * For paravirt, this is coupled with an exit in switch_to to
++ * combine the page table reload and the switch backend into
++ * one hypercall.
++ */
++ arch_start_context_switch(prev);
++
++ if (!mm) {
++ next->active_mm = oldmm;
++ mmgrab(oldmm);
++ enter_lazy_tlb(oldmm, next);
++ } else
++ switch_mm_irqs_off(oldmm, mm, next);
++
++ if (!prev->mm) {
++ prev->active_mm = NULL;
++ rq->prev_mm = oldmm;
++ }
++ /*
++ * Since the runqueue lock will be released by the next
++ * task (which is an invalid locking op but in the case
++ * of the scheduler it's an obvious special-case), so we
++ * do an early lockdep release here:
++ */
++ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
++
++ /* Here we just switch the register state and the stack. */
++ switch_to(prev, next, prev);
++ barrier();
++
++ finish_task_switch(prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned long nr_running(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_online_cpu(i)
++ sum += cpu_rq(i)->nr_running;
++
++ return sum;
++}
++
++static unsigned long nr_uninterruptible(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_online_cpu(i)
++ sum += cpu_rq(i)->nr_uninterruptible;
++
++ return sum;
++}
++
++/*
++ * Check if only the current task is running on the CPU.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race. The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptable section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
++ */
++bool single_task_running(void)
++{
++ struct rq *rq = cpu_rq(smp_processor_id());
++
++ if (rq_load(rq) == 1)
++ return true;
++ else
++ return false;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches(void)
++{
++ int i;
++ unsigned long long sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += cpu_rq(i)->nr_switches;
++
++ return sum;
++}
++
++/*
++ * IO-wait accounting, and how its mostly bollocks (on SMP).
++ *
++ * The idea behind IO-wait account is to account the idle time that we could
++ * have spend running if it were not for IO. That is, if we were to improve the
++ * storage performance, we'd have a proportional reduction in IO-wait time.
++ *
++ * This all works nicely on UP, where, when a task blocks on IO, we account
++ * idle time as IO-wait, because if the storage were faster, it could've been
++ * running and we'd not be idle.
++ *
++ * This has been extended to SMP, by doing the same for each CPU. This however
++ * is broken.
++ *
++ * Imagine for instance the case where two tasks block on one CPU, only the one
++ * CPU will have IO-wait accounted, while the other has regular idle. Even
++ * though, if the storage were faster, both could've ran at the same time,
++ * utilising both CPUs.
++ *
++ * This means, that when looking globally, the current IO-wait accounting on
++ * SMP is a lower bound, by reason of under accounting.
++ *
++ * Worse, since the numbers are provided per CPU, they are sometimes
++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
++ * associated with any one particular CPU, it can wake to another CPU than it
++ * blocked on. This means the per CPU IO-wait number is meaningless.
++ *
++ * Task CPU affinities can make all that even more 'interesting'.
++ */
++
++unsigned long nr_iowait(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += atomic_read(&cpu_rq(i)->nr_iowait);
++
++ return sum;
++}
++
++/*
++ * Consumers of these two interfaces, like for example the cpufreq menu
++ * governor are using nonsensical data. Boosting frequency for a CPU that has
++ * IO-wait which might not even end up running the task when it does become
++ * runnable.
++ */
++
++unsigned long nr_iowait_cpu(int cpu)
++{
++ struct rq *this = cpu_rq(cpu);
++ return atomic_read(&this->nr_iowait);
++}
++
++unsigned long nr_active(void)
++{
++ return nr_running() + nr_uninterruptible();
++}
++
++/*
++ * I/O wait is the number of running or queued tasks with their ->rq pointer
++ * set to this cpu as being the CPU they're more likely to run on.
++ */
++void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
++{
++ struct rq *rq = this_rq();
++
++ *nr_waiters = atomic_read(&rq->nr_iowait);
++ *load = rq_load(rq);
++}
++
++/* Variables and functions for calc_load */
++static unsigned long calc_load_update;
++unsigned long avenrun[3];
++EXPORT_SYMBOL(avenrun);
++
++/**
++ * get_avenrun - get the load average array
++ * @loads: pointer to dest load array
++ * @offset: offset to add
++ * @shift: shift count to shift the result left
++ *
++ * These values are estimates at best, so no need for locking.
++ */
++void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
++{
++ loads[0] = (avenrun[0] + offset) << shift;
++ loads[1] = (avenrun[1] + offset) << shift;
++ loads[2] = (avenrun[2] + offset) << shift;
++}
++
++static unsigned long
++calc_load(unsigned long load, unsigned long exp, unsigned long active)
++{
++ unsigned long newload;
++
++ newload = load * exp + active * (FIXED_1 - exp);
++ if (active >= load)
++ newload += FIXED_1-1;
++
++ return newload / FIXED_1;
++}
++
++/*
++ * calc_load - update the avenrun load estimates every LOAD_FREQ seconds.
++ */
++void calc_global_load(unsigned long ticks)
++{
++ long active;
++
++ if (time_before(jiffies, READ_ONCE(calc_load_update)))
++ return;
++ active = nr_active() * FIXED_1;
++
++ avenrun[0] = calc_load(avenrun[0], EXP_1, active);
++ avenrun[1] = calc_load(avenrun[1], EXP_5, active);
++ avenrun[2] = calc_load(avenrun[2], EXP_15, active);
++
++ calc_load_update = jiffies + LOAD_FREQ;
++}
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++#ifdef CONFIG_PARAVIRT
++static inline u64 steal_ticks(u64 steal)
++{
++ if (unlikely(steal > NSEC_PER_SEC))
++ return div_u64(steal, TICK_NSEC);
++
++ return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
++}
++#endif
++
++#ifndef nsecs_to_cputime
++# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
++#endif
++
++/*
++ * On each tick, add the number of nanoseconds to the unbanked variables and
++ * once one tick's worth has accumulated, account it allowing for accurate
++ * sub-tick accounting and totals.
++ */
++static void pc_idle_time(struct rq *rq, struct task_struct *idle, unsigned long ns)
++{
++ u64 *cpustat = kcpustat_this_cpu->cpustat;
++ unsigned long ticks;
++
++ if (atomic_read(&rq->nr_iowait) > 0) {
++ rq->iowait_ns += ns;
++ if (rq->iowait_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->iowait_ns);
++ cpustat[CPUTIME_IOWAIT] += (__force u64)TICK_NSEC * ticks;
++ rq->iowait_ns %= JIFFY_NS;
++ }
++ } else {
++ rq->idle_ns += ns;
++ if (rq->idle_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->idle_ns);
++ cpustat[CPUTIME_IDLE] += (__force u64)TICK_NSEC * ticks;
++ rq->idle_ns %= JIFFY_NS;
++ }
++ }
++ acct_update_integrals(idle);
++}
++
++static void pc_system_time(struct rq *rq, struct task_struct *p,
++ int hardirq_offset, unsigned long ns)
++{
++ u64 *cpustat = kcpustat_this_cpu->cpustat;
++ unsigned long ticks;
++
++ p->stime_ns += ns;
++ if (p->stime_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(p->stime_ns);
++ p->stime_ns %= JIFFY_NS;
++ p->stime += (__force u64)TICK_NSEC * ticks;
++ account_group_system_time(p, TICK_NSEC * ticks);
++ }
++ p->sched_time += ns;
++ account_group_exec_runtime(p, ns);
++
++ if (hardirq_count() - hardirq_offset) {
++ rq->irq_ns += ns;
++ if (rq->irq_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->irq_ns);
++ cpustat[CPUTIME_IRQ] += (__force u64)TICK_NSEC * ticks;
++ rq->irq_ns %= JIFFY_NS;
++ }
++ } else if (in_serving_softirq()) {
++ rq->softirq_ns += ns;
++ if (rq->softirq_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->softirq_ns);
++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_NSEC * ticks;
++ rq->softirq_ns %= JIFFY_NS;
++ }
++ } else {
++ rq->system_ns += ns;
++ if (rq->system_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->system_ns);
++ cpustat[CPUTIME_SYSTEM] += (__force u64)TICK_NSEC * ticks;
++ rq->system_ns %= JIFFY_NS;
++ }
++ }
++ acct_update_integrals(p);
++}
++
++static void pc_user_time(struct rq *rq, struct task_struct *p, unsigned long ns)
++{
++ u64 *cpustat = kcpustat_this_cpu->cpustat;
++ unsigned long ticks;
++
++ p->utime_ns += ns;
++ if (p->utime_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(p->utime_ns);
++ p->utime_ns %= JIFFY_NS;
++ p->utime += (__force u64)TICK_NSEC * ticks;
++ account_group_user_time(p, TICK_NSEC * ticks);
++ }
++ p->sched_time += ns;
++ account_group_exec_runtime(p, ns);
++
++ if (this_cpu_ksoftirqd() == p) {
++ /*
++ * ksoftirqd time do not get accounted in cpu_softirq_time.
++ * So, we have to handle it separately here.
++ */
++ rq->softirq_ns += ns;
++ if (rq->softirq_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->softirq_ns);
++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_NSEC * ticks;
++ rq->softirq_ns %= JIFFY_NS;
++ }
++ }
++
++ if (task_nice(p) > 0 || idleprio_task(p)) {
++ rq->nice_ns += ns;
++ if (rq->nice_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->nice_ns);
++ cpustat[CPUTIME_NICE] += (__force u64)TICK_NSEC * ticks;
++ rq->nice_ns %= JIFFY_NS;
++ }
++ } else {
++ rq->user_ns += ns;
++ if (rq->user_ns >= JIFFY_NS) {
++ ticks = NS_TO_JIFFIES(rq->user_ns);
++ cpustat[CPUTIME_USER] += (__force u64)TICK_NSEC * ticks;
++ rq->user_ns %= JIFFY_NS;
++ }
++ }
++ acct_update_integrals(p);
++}
++
++/*
++ * This is called on clock ticks.
++ * Bank in p->sched_time the ns elapsed since the last tick or switch.
++ * CPU scheduler quota accounting is also performed here in microseconds.
++ */
++static void update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
++{
++ s64 account_ns = rq->niffies - p->last_ran;
++ struct task_struct *idle = rq->idle;
++
++ /* Accurate tick timekeeping */
++ if (user_mode(get_irq_regs()))
++ pc_user_time(rq, p, account_ns);
++ else if (p != idle || (irq_count() != HARDIRQ_OFFSET)) {
++ pc_system_time(rq, p, HARDIRQ_OFFSET, account_ns);
++ } else
++ pc_idle_time(rq, idle, account_ns);
++
++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
++ if (p->policy != SCHED_FIFO && p != idle)
++ p->time_slice -= NS_TO_US(account_ns);
++
++ p->last_ran = rq->niffies;
++}
++
++/*
++ * This is called on context switches.
++ * Bank in p->sched_time the ns elapsed since the last tick or switch.
++ * CPU scheduler quota accounting is also performed here in microseconds.
++ */
++static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p)
++{
++ s64 account_ns = rq->niffies - p->last_ran;
++ struct task_struct *idle = rq->idle;
++
++ /* Accurate subtick timekeeping */
++ if (p != idle)
++ pc_user_time(rq, p, account_ns);
++ else
++ pc_idle_time(rq, idle, account_ns);
++
++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
++ if (p->policy != SCHED_FIFO && p != idle)
++ p->time_slice -= NS_TO_US(account_ns);
++}
++
++/*
++ * Return any ns on the sched_clock that have not yet been accounted in
++ * @p in case that task is currently running.
++ *
++ * Called with task_rq_lock(p) held.
++ */
++static inline u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
++{
++ u64 ns = 0;
++
++ /*
++ * Must be ->curr _and_ ->on_rq. If dequeued, we would
++ * project cycles that may never be accounted to this
++ * thread, breaking clock_gettime().
++ */
++ if (p == rq->curr && task_on_rq_queued(p)) {
++ update_clocks(rq);
++ ns = rq->niffies - p->last_ran;
++ }
++
++ return ns;
++}
++
++/*
++ * Return accounted runtime for the task.
++ * Return separately the current's pending runtime that have not been
++ * accounted yet.
++ *
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++ unsigned long flags;
++ struct rq *rq;
++ u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++ /*
++ * 64-bit doesn't need locks to atomically read a 64bit value.
++ * So we have a optimization chance when the task's delta_exec is 0.
++ * Reading ->on_cpu is racy, but this is ok.
++ *
++ * If we race with it leaving CPU, we'll take a lock. So we're correct.
++ * If we race with it entering CPU, unaccounted time is 0. This is
++ * indistinguishable from the read occurring a few cycles earlier.
++ * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++ * been accounted, so we're correct here as well.
++ */
++ if (!p->on_cpu || !task_on_rq_queued(p))
++ return tsk_seruntime(p);
++#endif
++
++ rq = task_rq_lock(p, &flags);
++ ns = p->sched_time + do_task_delta_exec(p, rq);
++ task_rq_unlock(rq, p, &flags);
++
++ return ns;
++}
++
++/*
++ * Functions to test for when SCHED_ISO tasks have used their allocated
++ * quota as real time scheduling and convert them back to SCHED_NORMAL. All
++ * data is modified only by the local runqueue during scheduler_tick with
++ * interrupts disabled.
++ */
++
++/*
++ * Test if SCHED_ISO tasks have run longer than their alloted period as RT
++ * tasks and set the refractory flag if necessary. There is 10% hysteresis
++ * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a
++ * slow division.
++ */
++static inline void iso_tick(struct rq *rq)
++{
++ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - 1) / ISO_PERIOD;
++ rq->iso_ticks += 100;
++ if (rq->iso_ticks > ISO_PERIOD * sched_iso_cpu) {
++ rq->iso_refractory = true;
++ if (unlikely(rq->iso_ticks > ISO_PERIOD * 100))
++ rq->iso_ticks = ISO_PERIOD * 100;
++ }
++}
++
++/* No SCHED_ISO task was running so decrease rq->iso_ticks */
++static inline void no_iso_tick(struct rq *rq, int ticks)
++{
++ if (rq->iso_ticks > 0 || rq->iso_refractory) {
++ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - ticks) / ISO_PERIOD;
++ if (rq->iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128)) {
++ rq->iso_refractory = false;
++ if (unlikely(rq->iso_ticks < 0))
++ rq->iso_ticks = 0;
++ }
++ }
++}
++
++/* This manages tasks that have run out of timeslice during a scheduler_tick */
++static void task_running_tick(struct rq *rq)
++{
++ struct task_struct *p = rq->curr;
++
++ /*
++ * If a SCHED_ISO task is running we increment the iso_ticks. In
++ * order to prevent SCHED_ISO tasks from causing starvation in the
++ * presence of true RT tasks we account those as iso_ticks as well.
++ */
++ if (rt_task(p) || task_running_iso(p))
++ iso_tick(rq);
++ else
++ no_iso_tick(rq, 1);
++
++ /* SCHED_FIFO tasks never run out of timeslice. */
++ if (p->policy == SCHED_FIFO)
++ return;
++
++ if (iso_task(p)) {
++ if (task_running_iso(p)) {
++ if (rq->iso_refractory) {
++ /*
++ * SCHED_ISO task is running as RT and limit
++ * has been hit. Force it to reschedule as
++ * SCHED_NORMAL by zeroing its time_slice
++ */
++ p->time_slice = 0;
++ }
++ } else if (!rq->iso_refractory) {
++ /* Can now run again ISO. Reschedule to pick up prio */
++ goto out_resched;
++ }
++ }
++
++ /*
++ * Tasks that were scheduled in the first half of a tick are not
++ * allowed to run into the 2nd half of the next tick if they will
++ * run out of time slice in the interim. Otherwise, if they have
++ * less than RESCHED_US μs of time slice left they will be rescheduled.
++ * Dither is used as a backup for when hrexpiry is disabled or high res
++ * timers not configured in.
++ */
++ if (p->time_slice - rq->dither >= RESCHED_US)
++ return;
++out_resched:
++ rq_lock(rq);
++ __set_tsk_resched(p);
++ rq_unlock(rq);
++}
++
++#ifdef CONFIG_NO_HZ_FULL
++/*
++ * We can stop the timer tick any time highres timers are active since
++ * we rely entirely on highres timeouts for task expiry rescheduling.
++ */
++static void sched_stop_tick(struct rq *rq, int cpu)
++{
++ if (!hrexpiry_enabled(rq))
++ return;
++ if (!tick_nohz_full_enabled())
++ return;
++ if (!tick_nohz_full_cpu(cpu))
++ return;
++ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++
++static inline void sched_start_tick(struct rq *rq, int cpu)
++{
++ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++
++/**
++ * scheduler_tick_max_deferment
++ *
++ * Keep at least one tick per second when a single
++ * active task is running.
++ *
++ * This makes sure that uptime continues to move forward, even
++ * with a very low granularity.
++ *
++ * Return: Maximum deferment in nanoseconds.
++ */
++u64 scheduler_tick_max_deferment(void)
++{
++ struct rq *rq = this_rq();
++ unsigned long next, now = READ_ONCE(jiffies);
++
++ next = rq->last_jiffy + HZ;
++
++ if (time_before_eq(next, now))
++ return 0;
++
++ return jiffies_to_nsecs(next - now);
++}
++#else
++static inline void sched_stop_tick(struct rq *rq, int cpu)
++{
++}
++
++static inline void sched_start_tick(struct rq *rq, int cpu)
++{
++}
++#endif
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void scheduler_tick(void)
++{
++ int cpu __maybe_unused = smp_processor_id();
++ struct rq *rq = cpu_rq(cpu);
++
++ sched_clock_tick();
++ update_clocks(rq);
++ update_load_avg(rq, 0);
++ update_cpu_clock_tick(rq, rq->curr);
++ if (!rq_idle(rq))
++ task_running_tick(rq);
++ else if (rq->last_jiffy > rq->last_scheduler_tick)
++ no_iso_tick(rq, rq->last_jiffy - rq->last_scheduler_tick);
++ rq->last_scheduler_tick = rq->last_jiffy;
++ rq->last_tick = rq->clock;
++ perf_event_task_tick();
++ sched_stop_tick(rq, cpu);
++}
++
++#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
++ defined(CONFIG_PREEMPT_TRACER))
++/*
++ * If the value passed in is equal to the current preempt count
++ * then we just disabled preemption. Start timing the latency.
++ */
++static inline void preempt_latency_start(int val)
++{
++ if (preempt_count() == val) {
++ unsigned long ip = get_lock_parent_ip();
++#ifdef CONFIG_DEBUG_PREEMPT
++ current->preempt_disable_ip = ip;
++#endif
++ trace_preempt_off(CALLER_ADDR0, ip);
++ }
++}
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++ return;
++#endif
++ __preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Spinlock count overflowing soon?
++ */
++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++ PREEMPT_MASK - 10);
++#endif
++ preempt_latency_start(val);
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++/*
++ * If the value passed in equals to the current preempt count
++ * then we just enabled preemption. Stop timing the latency.
++ */
++static inline void preempt_latency_stop(int val)
++{
++ if (preempt_count() == val)
++ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++}
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++ return;
++ /*
++ * Is the spinlock portion underflowing?
++ */
++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++ !(preempt_count() & PREEMPT_MASK)))
++ return;
++#endif
++
++ preempt_latency_stop(val);
++ __preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#else
++static inline void preempt_latency_start(int val) { }
++static inline void preempt_latency_stop(int val) { }
++#endif
++
++static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ return p->preempt_disable_ip;
++#else
++ return 0;
++#endif
++}
++
++/*
++ * The time_slice is only refilled when it is empty and that is when we set a
++ * new deadline. Make sure update_clocks has been called recently to update
++ * rq->niffies.
++ */
++static void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++ p->time_slice = timeslice();
++ p->deadline = rq->niffies + task_deadline_diff(p);
++#ifdef CONFIG_SMT_NICE
++ if (!p->mm)
++ p->smt_bias = 0;
++ else if (rt_task(p))
++ p->smt_bias = 1 << 30;
++ else if (task_running_iso(p))
++ p->smt_bias = 1 << 29;
++ else if (idleprio_task(p)) {
++ if (task_running_idle(p))
++ p->smt_bias = 0;
++ else
++ p->smt_bias = 1;
++ } else if (--p->smt_bias < 1)
++ p->smt_bias = MAX_PRIO - p->static_prio;
++#endif
++}
++
++/*
++ * Timeslices below RESCHED_US are considered as good as expired as there's no
++ * point rescheduling when there's so little time left. SCHED_BATCH tasks
++ * have been flagged be not latency sensitive and likely to be fully CPU
++ * bound so every time they're rescheduled they have their time_slice
++ * refilled, but get a new later deadline to have little effect on
++ * SCHED_NORMAL tasks.
++
++ */
++static inline void check_deadline(struct task_struct *p, struct rq *rq)
++{
++ if (p->time_slice < RESCHED_US || batch_task(p))
++ time_slice_expired(p, rq);
++}
++
++/*
++ * Task selection with skiplists is a simple matter of picking off the first
++ * task in the sorted list, an O(1) operation. The lookup is amortised O(1)
++ * being bound to the number of processors.
++ *
++ * Runqueues are selectively locked based on their unlocked data and then
++ * unlocked if not needed. At most 3 locks will be held at any time and are
++ * released as soon as they're no longer needed. All balancing between CPUs
++ * is thus done here in an extremely simple first come best fit manner.
++ *
++ * This iterates over runqueues in cache locality order. In interactive mode
++ * it iterates over all CPUs and finds the task with the best key/deadline.
++ * In non-interactive mode it will only take a task if it's from the current
++ * runqueue or a runqueue with more tasks than the current one with a better
++ * key/deadline.
++ */
++#ifdef CONFIG_SMP
++static inline struct task_struct
++*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
++{
++ struct rq *locked = NULL, *chosen = NULL;
++ struct task_struct *edt = idle;
++ int i, best_entries = 0;
++ u64 best_key = ~0ULL;
++
++ for (i = 0; i < num_possible_cpus(); i++) {
++ struct rq *other_rq = rq_order(rq, i);
++ int entries = other_rq->sl->entries;
++ skiplist_node *next;
++
++ /*
++ * Check for queued entres lockless first. The local runqueue
++ * is locked so entries will always be accurate.
++ */
++ if (!sched_interactive) {
++ /*
++ * Don't reschedule balance across nodes unless the CPU
++ * is idle.
++ */
++ if (edt != idle && rq->cpu_locality[other_rq->cpu] > 3)
++ break;
++ if (entries <= best_entries)
++ continue;
++ } else if (!entries)
++ continue;
++
++ /* if (i) implies other_rq != rq */
++ if (i) {
++ /* Check for best id queued lockless first */
++ if (other_rq->best_key >= best_key)
++ continue;
++
++ if (unlikely(!trylock_rq(rq, other_rq)))
++ continue;
++
++ /* Need to reevaluate entries after locking */
++ entries = other_rq->sl->entries;
++ if (unlikely(!entries)) {
++ unlock_rq(other_rq);
++ continue;
++ }
++ }
++
++ next = &other_rq->node;
++ /*
++ * In interactive mode we check beyond the best entry on other
++ * runqueues if we can't get the best for smt or affinity
++ * reasons.
++ */
++ while ((next = next->next[0]) != &other_rq->node) {
++ struct task_struct *p;
++ u64 key = next->key;
++
++ /* Reevaluate key after locking */
++ if (key >= best_key)
++ break;
++
++ p = next->value;
++ if (!smt_schedule(p, rq)) {
++ if (i && !sched_interactive)
++ break;
++ continue;
++ }
++
++ /* Make sure affinity is ok */
++ if (i) {
++ if (needs_other_cpu(p, cpu)) {
++ if (sched_interactive)
++ continue;
++ break;
++ }
++ /* From this point on p is the best so far */
++ if (locked)
++ unlock_rq(locked);
++ chosen = locked = other_rq;
++ }
++ best_entries = entries;
++ best_key = key;
++ edt = p;
++ break;
++ }
++ /* rq->preempting is a hint only as the state may have changed
++ * since it was set with the resched call but if we have met
++ * the condition we can break out here. */
++ if (edt == rq->preempting)
++ break;
++ if (i && other_rq != chosen)
++ unlock_rq(other_rq);
++ }
++
++ if (likely(edt != idle))
++ take_task(rq, cpu, edt);
++
++ if (locked)
++ unlock_rq(locked);
++
++ rq->preempting = NULL;
++
++ return edt;
++}
++#else /* CONFIG_SMP */
++static inline struct task_struct
++*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
++{
++ struct task_struct *edt;
++
++ if (unlikely(!rq->sl->entries))
++ return idle;
++ edt = rq->node.next[0]->value;
++ take_task(rq, cpu, edt);
++ return edt;
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++ /* Save this before calling printk(), since that will clobber it */
++ unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
++
++ if (oops_in_progress)
++ return;
++
++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++ prev->comm, prev->pid, preempt_count());
++
++ debug_show_held_locks(prev);
++ print_modules();
++ if (irqs_disabled())
++ print_irqtrace_events(prev);
++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++ && in_atomic_preempt_off()) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(preempt_disable_ip);
++ pr_cont("\n");
++ }
++ dump_stack();
++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++ if (task_stack_end_corrupted(prev))
++ panic("corrupted stack end detected inside scheduler\n");
++#endif
++
++ if (unlikely(in_atomic_preempt_off())) {
++ __schedule_bug(prev);
++ preempt_count_set(PREEMPT_DISABLED);
++ }
++ rcu_sleep_check();
++
++ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++ schedstat_inc(this_rq()->sched_count);
++}
++
++/*
++ * The currently running task's information is all stored in rq local data
++ * which is only modified by the local CPU.
++ */
++static inline void set_rq_task(struct rq *rq, struct task_struct *p)
++{
++ if (p == rq->idle || p->policy == SCHED_FIFO)
++ hrexpiry_clear(rq);
++ else
++ hrexpiry_start(rq, US_TO_NS(p->time_slice));
++ if (rq->clock - rq->last_tick > HALF_JIFFY_NS)
++ rq->dither = 0;
++ else
++ rq->dither = rq_dither(rq);
++
++ rq->rq_deadline = p->deadline;
++ rq->rq_prio = p->prio;
++#ifdef CONFIG_SMT_NICE
++ rq->rq_mm = p->mm;
++ rq->rq_smt_bias = p->smt_bias;
++#endif
++}
++
++#ifdef CONFIG_SMT_NICE
++static void check_no_siblings(struct rq __maybe_unused *this_rq) {}
++static void wake_no_siblings(struct rq __maybe_unused *this_rq) {}
++static void (*check_siblings)(struct rq *this_rq) = &check_no_siblings;
++static void (*wake_siblings)(struct rq *this_rq) = &wake_no_siblings;
++
++/* Iterate over smt siblings when we've scheduled a process on cpu and decide
++ * whether they should continue running or be descheduled. */
++static void check_smt_siblings(struct rq *this_rq)
++{
++ int other_cpu;
++
++ for_each_cpu(other_cpu, &this_rq->thread_mask) {
++ struct task_struct *p;
++ struct rq *rq;
++
++ rq = cpu_rq(other_cpu);
++ if (rq_idle(rq))
++ continue;
++ p = rq->curr;
++ if (!smt_schedule(p, this_rq))
++ resched_curr(rq);
++ }
++}
++
++static void wake_smt_siblings(struct rq *this_rq)
++{
++ int other_cpu;
++
++ for_each_cpu(other_cpu, &this_rq->thread_mask) {
++ struct rq *rq;
++
++ rq = cpu_rq(other_cpu);
++ if (rq_idle(rq))
++ resched_idle(rq);
++ }
++}
++#else
++static void check_siblings(struct rq __maybe_unused *this_rq) {}
++static void wake_siblings(struct rq __maybe_unused *this_rq) {}
++#endif
++
++/*
++ * schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ * paths. For example, see arch/x86/entry_64.S.
++ *
++ * To drive preemption between tasks, the scheduler sets the flag in timer
++ * interrupt handler scheduler_tick().
++ *
++ * 3. Wakeups don't really cause entry into schedule(). They add a
++ * task to the run-queue and that's it.
++ *
++ * Now, if the new task added to the run-queue preempts the current
++ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ * called on the nearest possible occasion:
++ *
++ * - If the kernel is preemptible (CONFIG_PREEMPT=y):
++ *
++ * - in syscall or exception context, at the next outmost
++ * preempt_enable(). (this might be as soon as the wake_up()'s
++ * spin_unlock()!)
++ *
++ * - in IRQ context, return from interrupt-handler to
++ * preemptible context
++ *
++ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
++ * then at the next:
++ *
++ * - cond_resched() call
++ * - explicit schedule() call
++ * - return from syscall or exception to user-space
++ * - return from interrupt-handler to user-space
++ *
++ * WARNING: must be called with preemption disabled!
++ */
++static void __sched notrace __schedule(bool preempt)
++{
++ struct task_struct *prev, *next, *idle;
++ unsigned long *switch_count;
++ bool deactivate = false;
++ struct rq *rq;
++ u64 niffies;
++ int cpu;
++
++ cpu = smp_processor_id();
++ rq = cpu_rq(cpu);
++ prev = rq->curr;
++ idle = rq->idle;
++
++ schedule_debug(prev);
++
++ local_irq_disable();
++ rcu_note_context_switch(preempt);
++
++ /*
++ * Make sure that signal_pending_state()->signal_pending() below
++ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++ * done by the caller to avoid the race with signal_wake_up().
++ */
++ rq_lock(rq);
++ smp_mb__after_spinlock();
++#ifdef CONFIG_SMP
++ if (rq->preempt) {
++ /*
++ * Make sure resched_curr hasn't triggered a preemption
++ * locklessly on a task that has since scheduled away. Spurious
++ * wakeup of idle is okay though.
++ */
++ if (unlikely(preempt && prev != idle && !test_tsk_need_resched(prev))) {
++ rq->preempt = NULL;
++ clear_preempt_need_resched();
++ rq_unlock_irq(rq);
++ return;
++ }
++ rq->preempt = NULL;
++ }
++#endif
++
++ switch_count = &prev->nivcsw;
++ if (!preempt && prev->state) {
++ if (unlikely(signal_pending_state(prev->state, prev))) {
++ prev->state = TASK_RUNNING;
++ } else {
++ deactivate = true;
++ prev->on_rq = 0;
++
++ if (prev->in_iowait) {
++ atomic_inc(&rq->nr_iowait);
++ delayacct_blkio_start();
++ }
++
++ /*
++ * If a worker is going to sleep, notify and
++ * ask workqueue whether it wants to wake up a
++ * task to maintain concurrency. If so, wake
++ * up the task.
++ */
++ if (prev->flags & PF_WQ_WORKER) {
++ struct task_struct *to_wakeup;
++
++ to_wakeup = wq_worker_sleeping(prev);
++ if (to_wakeup)
++ try_to_wake_up_local(to_wakeup);
++ }
++ }
++ switch_count = &prev->nvcsw;
++ }
++
++ /*
++ * Store the niffy value here for use by the next task's last_ran
++ * below to avoid losing niffies due to update_clocks being called
++ * again after this point.
++ */
++ update_clocks(rq);
++ niffies = rq->niffies;
++ update_cpu_clock_switch(rq, prev);
++
++ clear_tsk_need_resched(prev);
++ clear_preempt_need_resched();
++
++ if (idle != prev) {
++ check_deadline(prev, rq);
++ return_task(prev, rq, cpu, deactivate);
++ }
++
++ next = earliest_deadline_task(rq, cpu, idle);
++ if (likely(next->prio != PRIO_LIMIT))
++ clear_cpuidle_map(cpu);
++ else {
++ set_cpuidle_map(cpu);
++ update_load_avg(rq, 0);
++ }
++
++ set_rq_task(rq, next);
++ next->last_ran = niffies;
++
++ if (likely(prev != next)) {
++ /*
++ * Don't reschedule an idle task or deactivated tasks
++ */
++ if (prev != idle && !deactivate)
++ resched_suitable_idle(prev);
++ if (next != idle)
++ check_siblings(rq);
++ else
++ wake_siblings(rq);
++ rq->nr_switches++;
++ rq->curr = next;
++ /*
++ * The membarrier system call requires each architecture
++ * to have a full memory barrier after updating
++ * rq->curr, before returning to user-space. For TSO
++ * (e.g. x86), the architecture must provide its own
++ * barrier in switch_mm(). For weakly ordered machines
++ * for which spin_unlock() acts as a full memory
++ * barrier, finish_lock_switch() in common code takes
++ * care of this barrier. For weakly ordered machines for
++ * which spin_unlock() acts as a RELEASE barrier (only
++ * arm64 and PowerPC), arm64 has a full barrier in
++ * switch_to(), and PowerPC has
++ * smp_mb__after_unlock_lock() before
++ * finish_lock_switch().
++ */
++ ++*switch_count;
++
++ trace_sched_switch(preempt, prev, next);
++ context_switch(rq, prev, next); /* unlocks the rq */
++ } else {
++ check_siblings(rq);
++ rq_unlock(rq);
++ do_pending_softirq(rq, next);
++ local_irq_enable();
++ }
++}
++
++void __noreturn do_task_dead(void)
++{
++ /*
++ * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
++ * when the following two conditions become true.
++ * - There is race condition of mmap_sem (It is acquired by
++ * exit_mm()), and
++ * - SMI occurs before setting TASK_RUNINNG.
++ * (or hypervisor of virtual machine switches to other guest)
++ * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
++ *
++ * To avoid it, we have to wait for releasing tsk->pi_lock which
++ * is held by try_to_wake_up()
++ */
++ raw_spin_lock_irq(&current->pi_lock);
++ raw_spin_unlock_irq(&current->pi_lock);
++
++ /* Causes final put_task_struct in finish_task_switch(). */
++ __set_current_state(TASK_DEAD);
++
++ /* Tell freezer to ignore us: */
++ current->flags |= PF_NOFREEZE;
++ __schedule(false);
++ BUG();
++
++ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
++ for (;;)
++ cpu_relax();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++ if (!tsk->state || tsk_is_pi_blocked(tsk) ||
++ preempt_count() ||
++ signal_pending_state(tsk->state, tsk))
++ return;
++
++ /*
++ * If we are going to sleep and we have plugged IO queued,
++ * make sure to submit it to avoid deadlocks.
++ */
++ if (blk_needs_flush_plug(tsk))
++ blk_schedule_flush_plug(tsk);
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++ struct task_struct *tsk = current;
++
++ sched_submit_work(tsk);
++ do {
++ preempt_disable();
++ __schedule(false);
++ sched_preempt_enable_no_resched();
++ } while (need_resched());
++}
++
++EXPORT_SYMBOL(schedule);
++
++/*
++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
++ * state (have scheduled out non-voluntarily) by making sure that all
++ * tasks have either left the run queue or have gone into user space.
++ * As idle tasks do not do either, they must not ever be preempted
++ * (schedule out non-voluntarily).
++ *
++ * schedule_idle() is similar to schedule_preempt_disable() except that it
++ * never enables preemption because it does not call sched_submit_work().
++ */
++void __sched schedule_idle(void)
++{
++ /*
++ * As this skips calling sched_submit_work(), which the idle task does
++ * regardless because that function is a nop when the task is in a
++ * TASK_RUNNING state, make sure this isn't used someplace that the
++ * current task can be in any other state. Note, idle is always in the
++ * TASK_RUNNING state.
++ */
++ WARN_ON_ONCE(current->state);
++ do {
++ __schedule(false);
++ } while (need_resched());
++}
++
++#ifdef CONFIG_CONTEXT_TRACKING
++asmlinkage __visible void __sched schedule_user(void)
++{
++ /*
++ * If we come here after a random call to set_need_resched(),
++ * or we have been woken up remotely but the IPI has not yet arrived,
++ * we haven't yet exited the RCU idle mode. Do it here manually until
++ * we find a better solution.
++ *
++ * NB: There are buggy callers of this function. Ideally we
++ * should warn if prev_state != IN_USER, but that will trigger
++ * too frequently to make sense yet.
++ */
++ enum ctx_state prev_state = exception_enter();
++ schedule();
++ exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++ sched_preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++}
++
++static void __sched notrace preempt_schedule_common(void)
++{
++ do {
++ /*
++ * Because the function tracer can trace preempt_count_sub()
++ * and it also uses preempt_enable/disable_notrace(), if
++ * NEED_RESCHED is set, the preempt_enable_notrace() called
++ * by the function tracer will call this function again and
++ * cause infinite recursion.
++ *
++ * Preemption must be disabled here before the function
++ * tracer can trace. Break up preempt_disable() into two
++ * calls. One to disable preemption without fear of being
++ * traced. The other to still record the preemption latency,
++ * which can also be traced by the function tracer.
++ */
++ preempt_disable_notrace();
++ preempt_latency_start(1);
++ __schedule(true);
++ preempt_latency_stop(1);
++ preempt_enable_no_resched_notrace();
++
++ /*
++ * Check again in case we missed a preemption opportunity
++ * between schedule and now.
++ */
++ } while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPT
++/*
++ * this is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable. Kernel preemptions off return from interrupt
++ * occur there and call schedule directly.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++ /*
++ * If there is a non-zero preempt_count or interrupts are disabled,
++ * we do not want to preempt the current task. Just return..
++ */
++ if (likely(!preemptible()))
++ return;
++
++ preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++/**
++ * preempt_schedule_notrace - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
++{
++ enum ctx_state prev_ctx;
++
++ if (likely(!preemptible()))
++ return;
++
++ do {
++ /*
++ * Because the function tracer can trace preempt_count_sub()
++ * and it also uses preempt_enable/disable_notrace(), if
++ * NEED_RESCHED is set, the preempt_enable_notrace() called
++ * by the function tracer will call this function again and
++ * cause infinite recursion.
++ *
++ * Preemption must be disabled here before the function
++ * tracer can trace. Break up preempt_disable() into two
++ * calls. One to disable preemption without fear of being
++ * traced. The other to still record the preemption latency,
++ * which can also be traced by the function tracer.
++ */
++ preempt_disable_notrace();
++ preempt_latency_start(1);
++ /*
++ * Needs preempt disabled in case user_exit() is traced
++ * and the tracer calls preempt_enable_notrace() causing
++ * an infinite recursion.
++ */
++ prev_ctx = exception_enter();
++ __schedule(true);
++ exception_exit(prev_ctx);
++
++ preempt_latency_stop(1);
++ preempt_enable_no_resched_notrace();
++ } while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
++
++#endif /* CONFIG_PREEMPT */
++
++/*
++ * this is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++ enum ctx_state prev_state;
++
++ /* Catch callers which need to be fixed */
++ BUG_ON(preempt_count() || !irqs_disabled());
++
++ prev_state = exception_enter();
++
++ do {
++ preempt_disable();
++ local_irq_enable();
++ __schedule(true);
++ local_irq_disable();
++ sched_preempt_enable_no_resched();
++ } while (need_resched());
++
++ exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
++ void *key)
++{
++ return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++#ifdef CONFIG_RT_MUTEXES
++
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++ if (pi_task)
++ prio = min(prio, pi_task->prio);
++
++ return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++ struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++ return __rt_effective_prio(pi_task, prio);
++}
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task to boost
++ * @pi_task: donor task
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
++{
++ int prio, oldprio;
++ struct rq *rq;
++
++ /* XXX used to be waiter->prio, not waiter->task->prio */
++ prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++ /*
++ * If nothing changed; bail early.
++ */
++ if (p->pi_top_task == pi_task && prio == p->prio)
++ return;
++
++ rq = __task_rq_lock(p);
++ update_rq_clock(rq);
++ /*
++ * Set under pi_lock && rq->lock, such that the value can be used under
++ * either lock.
++ *
++ * Note that there is loads of tricky to make this pointer cache work
++ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++ * ensure a task is de-boosted (pi_task is set to NULL) before the
++ * task is allowed to run again (and can exit). This ensures the pointer
++ * points to a blocked task -- which guaratees the task is present.
++ */
++ p->pi_top_task = pi_task;
++
++ /*
++ * For FIFO/RR we only need to set prio, if that matches we're done.
++ */
++ if (prio == p->prio)
++ goto out_unlock;
++
++ /*
++ * Idle task boosting is a nono in general. There is one
++ * exception, when PREEMPT_RT and NOHZ is active:
++ *
++ * The idle task calls get_next_timer_interrupt() and holds
++ * the timer wheel base->lock on the CPU and another CPU wants
++ * to access the timer (probably to cancel it). We can safely
++ * ignore the boosting request, as the idle CPU runs this code
++ * with interrupts disabled and will complete the lock
++ * protected section without being interrupted. So there is no
++ * real need to boost.
++ */
++ if (unlikely(p == rq->idle)) {
++ WARN_ON(p != rq->curr);
++ WARN_ON(p->pi_blocked_on);
++ goto out_unlock;
++ }
++
++ trace_sched_pi_setprio(p, pi_task);
++ oldprio = p->prio;
++ p->prio = prio;
++ if (task_running(rq, p)){
++ if (prio > oldprio)
++ resched_task(p);
++ } else if (task_queued(p)) {
++ dequeue_task(rq, p, DEQUEUE_SAVE);
++ enqueue_task(rq, p, ENQUEUE_RESTORE);
++ if (prio < oldprio)
++ try_preempt(p, rq);
++ }
++out_unlock:
++ __task_rq_unlock(rq);
++}
++#else
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++ return prio;
++}
++#endif
++
++/*
++ * Adjust the deadline for when the priority is to change, before it's
++ * changed.
++ */
++static inline void adjust_deadline(struct task_struct *p, int new_prio)
++{
++ p->deadline += static_deadline_diff(new_prio) - task_deadline_diff(p);
++}
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++ int new_static, old_static;
++ unsigned long flags;
++ struct rq *rq;
++
++ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++ return;
++ new_static = NICE_TO_PRIO(nice);
++ /*
++ * We have to be careful, if called from sys_setpriority(),
++ * the task might be in the middle of scheduling on another CPU.
++ */
++ rq = task_rq_lock(p, &flags);
++ update_rq_clock(rq);
++
++ /*
++ * The RT priorities are set via sched_setscheduler(), but we still
++ * allow the 'normal' nice value to be set - but as expected
++ * it wont have any effect on scheduling until the task is
++ * not SCHED_NORMAL/SCHED_BATCH:
++ */
++ if (has_rt_policy(p)) {
++ p->static_prio = new_static;
++ goto out_unlock;
++ }
++
++ adjust_deadline(p, new_static);
++ old_static = p->static_prio;
++ p->static_prio = new_static;
++ p->prio = effective_prio(p);
++
++ if (task_queued(p)) {
++ dequeue_task(rq, p, DEQUEUE_SAVE);
++ enqueue_task(rq, p, ENQUEUE_RESTORE);
++ if (new_static < old_static)
++ try_preempt(p, rq);
++ } else if (task_running(rq, p)) {
++ set_rq_task(rq, p);
++ if (old_static < new_static)
++ resched_task(p);
++ }
++out_unlock:
++ task_rq_unlock(rq, p, &flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++ /* Convert nice value [19,-20] to rlimit style value [1,40] */
++ int nice_rlim = nice_to_rlimit(nice);
++
++ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
++ capable(CAP_SYS_NICE));
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++ long nice, retval;
++
++ /*
++ * Setpriority might change our priority at the same moment.
++ * We don't have to worry. Conceptually one call occurs first
++ * and we have a single winner.
++ */
++
++ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
++ nice = task_nice(current) + increment;
++
++ nice = clamp_val(nice, MIN_NICE, MAX_NICE);
++ if (increment < 0 && !can_nice(current, nice))
++ return -EPERM;
++
++ retval = security_task_setnice(current, nice);
++ if (retval)
++ return retval;
++
++ set_user_nice(current, nice);
++ return 0;
++}
++
++#endif
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes
++ * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO).
++ */
++int task_prio(const struct task_struct *p)
++{
++ int delta, prio = p->prio - MAX_RT_PRIO;
++
++ /* rt tasks and iso tasks */
++ if (prio <= 0)
++ goto out;
++
++ /* Convert to ms to avoid overflows */
++ delta = NS_TO_MS(p->deadline - task_rq(p)->niffies);
++ if (unlikely(delta < 0))
++ delta = 0;
++ delta = delta * 40 / ms_longest_deadline_diff();
++ if (delta <= 80)
++ prio += delta;
++ if (idleprio_task(p))
++ prio += 40;
++out:
++ return prio;
++}
++
++/**
++ * idle_cpu - is a given CPU idle currently?
++ * @cpu: the processor in question.
++ *
++ * Return: 1 if the CPU is currently idle. 0 otherwise.
++ */
++int idle_cpu(int cpu)
++{
++ return cpu_curr(cpu) == cpu_rq(cpu)->idle;
++}
++
++/**
++ * idle_task - return the idle task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * Return: The idle task for the CPU @cpu.
++ */
++struct task_struct *idle_task(int cpu)
++{
++ return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ *
++ * The task of @pid, if found. %NULL otherwise.
++ */
++static inline struct task_struct *find_process_by_pid(pid_t pid)
++{
++ return pid ? find_task_by_vpid(pid) : current;
++}
++
++/* Actually do priority change: must hold rq lock. */
++static void __setscheduler(struct task_struct *p, struct rq *rq, int policy,
++ int prio, bool keep_boost)
++{
++ int oldrtprio, oldprio;
++
++ p->policy = policy;
++ oldrtprio = p->rt_priority;
++ p->rt_priority = prio;
++ p->normal_prio = normal_prio(p);
++ oldprio = p->prio;
++ /*
++ * Keep a potential priority boosting if called from
++ * sched_setscheduler().
++ */
++ p->prio = normal_prio(p);
++ if (keep_boost)
++ p->prio = rt_effective_prio(p, p->prio);
++
++ if (task_running(rq, p)) {
++ set_rq_task(rq, p);
++ resched_task(p);
++ } else if (task_queued(p)) {
++ dequeue_task(rq, p, DEQUEUE_SAVE);
++ enqueue_task(rq, p, ENQUEUE_RESTORE);
++ if (p->prio < oldprio || p->rt_priority > oldrtprio)
++ try_preempt(p, rq);
++ }
++}
++
++/*
++ * Check the target process has a UID that matches the current process's
++ */
++static bool check_same_owner(struct task_struct *p)
++{
++ const struct cred *cred = current_cred(), *pcred;
++ bool match;
++
++ rcu_read_lock();
++ pcred = __task_cred(p);
++ match = (uid_eq(cred->euid, pcred->euid) ||
++ uid_eq(cred->euid, pcred->uid));
++ rcu_read_unlock();
++ return match;
++}
++
++static int
++__sched_setscheduler(struct task_struct *p, int policy,
++ const struct sched_param *param, bool user, bool pi)
++{
++ struct sched_param zero_param = { .sched_priority = 0 };
++ unsigned long flags, rlim_rtprio = 0;
++ int retval, oldpolicy = -1;
++ int reset_on_fork;
++ struct rq *rq;
++
++ /* The pi code expects interrupts enabled */
++ BUG_ON(pi && in_interrupt());
++
++ if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) {
++ unsigned long lflags;
++
++ if (!lock_task_sighand(p, &lflags))
++ return -ESRCH;
++ rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
++ unlock_task_sighand(p, &lflags);
++ if (rlim_rtprio)
++ goto recheck;
++ /*
++ * If the caller requested an RT policy without having the
++ * necessary rights, we downgrade the policy to SCHED_ISO.
++ * We also set the parameter to zero to pass the checks.
++ */
++ policy = SCHED_ISO;
++ param = &zero_param;
++ }
++recheck:
++ /* Double check policy once rq lock held */
++ if (policy < 0) {
++ reset_on_fork = p->sched_reset_on_fork;
++ policy = oldpolicy = p->policy;
++ } else {
++ reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
++ policy &= ~SCHED_RESET_ON_FORK;
++
++ if (!SCHED_RANGE(policy))
++ return -EINVAL;
++ }
++
++ /*
++ * Valid priorities for SCHED_FIFO and SCHED_RR are
++ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
++ * SCHED_BATCH is 0.
++ */
++ if (param->sched_priority < 0 ||
++ (p->mm && param->sched_priority > MAX_USER_RT_PRIO - 1) ||
++ (!p->mm && param->sched_priority > MAX_RT_PRIO - 1))
++ return -EINVAL;
++ if (is_rt_policy(policy) != (param->sched_priority != 0))
++ return -EINVAL;
++
++ /*
++ * Allow unprivileged RT tasks to decrease priority:
++ */
++ if (user && !capable(CAP_SYS_NICE)) {
++ if (is_rt_policy(policy)) {
++ unsigned long rlim_rtprio =
++ task_rlimit(p, RLIMIT_RTPRIO);
++
++ /* Can't set/change the rt policy */
++ if (policy != p->policy && !rlim_rtprio)
++ return -EPERM;
++
++ /* Can't increase priority */
++ if (param->sched_priority > p->rt_priority &&
++ param->sched_priority > rlim_rtprio)
++ return -EPERM;
++ } else {
++ switch (p->policy) {
++ /*
++ * Can only downgrade policies but not back to
++ * SCHED_NORMAL
++ */
++ case SCHED_ISO:
++ if (policy == SCHED_ISO)
++ goto out;
++ if (policy != SCHED_NORMAL)
++ return -EPERM;
++ break;
++ case SCHED_BATCH:
++ if (policy == SCHED_BATCH)
++ goto out;
++ if (policy != SCHED_IDLEPRIO)
++ return -EPERM;
++ break;
++ case SCHED_IDLEPRIO:
++ if (policy == SCHED_IDLEPRIO)
++ goto out;
++ return -EPERM;
++ default:
++ break;
++ }
++ }
++
++ /* Can't change other user's priorities */
++ if (!check_same_owner(p))
++ return -EPERM;
++
++ /* Normal users shall not reset the sched_reset_on_fork flag: */
++ if (p->sched_reset_on_fork && !reset_on_fork)
++ return -EPERM;
++ }
++
++ if (user) {
++ retval = security_task_setscheduler(p);
++ if (retval)
++ return retval;
++ }
++
++ /*
++ * Make sure no PI-waiters arrive (or leave) while we are
++ * changing the priority of the task:
++ *
++ * To be able to change p->policy safely, the runqueue lock must be
++ * held.
++ */
++ rq = task_rq_lock(p, &flags);
++ update_rq_clock(rq);
++
++ /*
++ * Changing the policy of the stop threads its a very bad idea:
++ */
++ if (p == rq->stop) {
++ task_rq_unlock(rq, p, &flags);
++ return -EINVAL;
++ }
++
++ /*
++ * If not changing anything there's no need to proceed further:
++ */
++ if (unlikely(policy == p->policy && (!is_rt_policy(policy) ||
++ param->sched_priority == p->rt_priority))) {
++ task_rq_unlock(rq, p, &flags);
++ return 0;
++ }
++
++ /* Re-check policy now with rq lock held */
++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++ policy = oldpolicy = -1;
++ task_rq_unlock(rq, p, &flags);
++ goto recheck;
++ }
++ p->sched_reset_on_fork = reset_on_fork;
++
++ __setscheduler(p, rq, policy, param->sched_priority, pi);
++ task_rq_unlock(rq, p, &flags);
++
++ if (pi)
++ rt_mutex_adjust_pi(p);
++out:
++ return 0;
++}
++
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++ const struct sched_param *param)
++{
++ return __sched_setscheduler(p, policy, param, true, true);
++}
++
++EXPORT_SYMBOL_GPL(sched_setscheduler);
++
++int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
++{
++ const struct sched_param param = { .sched_priority = attr->sched_priority };
++ int policy = attr->sched_policy;
++
++ return __sched_setscheduler(p, policy, &param, true, true);
++}
++EXPORT_SYMBOL_GPL(sched_setattr);
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission. For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++ const struct sched_param *param)
++{
++ return __sched_setscheduler(p, policy, param, false, true);
++}
++EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++ struct sched_param lparam;
++ struct task_struct *p;
++ int retval;
++
++ if (!param || pid < 0)
++ return -EINVAL;
++ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++ return -EFAULT;
++
++ rcu_read_lock();
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (p != NULL)
++ retval = sched_setscheduler(p, policy, &lparam);
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/*
++ * Mimics kernel/events/core.c perf_copy_attr().
++ */
++static int sched_copy_attr(struct sched_attr __user *uattr,
++ struct sched_attr *attr)
++{
++ u32 size;
++ int ret;
++
++ if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
++ return -EFAULT;
++
++ /* Zero the full structure, so that a short copy will be nice: */
++ memset(attr, 0, sizeof(*attr));
++
++ ret = get_user(size, &uattr->size);
++ if (ret)
++ return ret;
++
++ /* Bail out on silly large: */
++ if (size > PAGE_SIZE)
++ goto err_size;
++
++ /* ABI compatibility quirk: */
++ if (!size)
++ size = SCHED_ATTR_SIZE_VER0;
++
++ if (size < SCHED_ATTR_SIZE_VER0)
++ goto err_size;
++
++ /*
++ * If we're handed a bigger struct than we know of,
++ * ensure all the unknown bits are 0 - i.e. new
++ * user-space does not rely on any kernel feature
++ * extensions we dont know about yet.
++ */
++ if (size > sizeof(*attr)) {
++ unsigned char __user *addr;
++ unsigned char __user *end;
++ unsigned char val;
++
++ addr = (void __user *)uattr + sizeof(*attr);
++ end = (void __user *)uattr + size;
++
++ for (; addr < end; addr++) {
++ ret = get_user(val, addr);
++ if (ret)
++ return ret;
++ if (val)
++ goto err_size;
++ }
++ size = sizeof(*attr);
++ }
++
++ ret = copy_from_user(attr, uattr, size);
++ if (ret)
++ return -EFAULT;
++
++ /*
++ * XXX: Do we want to be lenient like existing syscalls; or do we want
++ * to be strict and return an error on out-of-bounds values?
++ */
++ attr->sched_nice = clamp(attr->sched_nice, -20, 19);
++
++ /* sched/core.c uses zero here but we already know ret is zero */
++ return 0;
++
++err_size:
++ put_user(sizeof(*attr), &uattr->size);
++ return -E2BIG;
++}
++
++/*
++ * sched_setparam() passes in -1 for its policy, to let the functions
++ * it calls know not to change it.
++ */
++#define SETPARAM_POLICY -1
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
++{
++ if (policy < 0)
++ return -EINVAL;
++
++ return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++ return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
++}
++
++/**
++ * sys_sched_setattr - same as above, but with extended sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ */
++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
++ unsigned int, flags)
++{
++ struct sched_attr attr;
++ struct task_struct *p;
++ int retval;
++
++ if (!uattr || pid < 0 || flags)
++ return -EINVAL;
++
++ retval = sched_copy_attr(uattr, &attr);
++ if (retval)
++ return retval;
++
++ if ((int)attr.sched_policy < 0)
++ return -EINVAL;
++
++ rcu_read_lock();
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (p != NULL)
++ retval = sched_setattr(p, &attr);
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ *
++ * Return: On success, the policy of the thread. Otherwise, a negative error
++ * code.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++ struct task_struct *p;
++ int retval = -EINVAL;
++
++ if (pid < 0)
++ goto out_nounlock;
++
++ retval = -ESRCH;
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ if (p) {
++ retval = security_task_getscheduler(p);
++ if (!retval)
++ retval = p->policy;
++ }
++ rcu_read_unlock();
++
++out_nounlock:
++ return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ *
++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
++ * code.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++ struct sched_param lp = { .sched_priority = 0 };
++ struct task_struct *p;
++ int retval = -EINVAL;
++
++ if (!param || pid < 0)
++ goto out_nounlock;
++
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ retval = -ESRCH;
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ if (has_rt_policy(p))
++ lp.sched_priority = p->rt_priority;
++ rcu_read_unlock();
++
++ /*
++ * This one might sleep, we cannot do it with a spinlock held ...
++ */
++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++out_nounlock:
++ return retval;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++static int sched_read_attr(struct sched_attr __user *uattr,
++ struct sched_attr *attr,
++ unsigned int usize)
++{
++ int ret;
++
++ if (!access_ok(VERIFY_WRITE, uattr, usize))
++ return -EFAULT;
++
++ /*
++ * If we're handed a smaller struct than we know of,
++ * ensure all the unknown bits are 0 - i.e. old
++ * user-space does not get uncomplete information.
++ */
++ if (usize < sizeof(*attr)) {
++ unsigned char *addr;
++ unsigned char *end;
++
++ addr = (void *)attr + usize;
++ end = (void *)attr + sizeof(*attr);
++
++ for (; addr < end; addr++) {
++ if (*addr)
++ return -EFBIG;
++ }
++
++ attr->size = usize;
++ }
++
++ ret = copy_to_user(uattr, attr, attr->size);
++ if (ret)
++ return -EFAULT;
++
++ /* sched/core.c uses zero here but we already know ret is zero */
++ return ret;
++}
++
++/**
++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @size: sizeof(attr) for fwd/bwd comp.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
++ unsigned int, size, unsigned int, flags)
++{
++ struct sched_attr attr = {
++ .size = sizeof(struct sched_attr),
++ };
++ struct task_struct *p;
++ int retval;
++
++ if (!uattr || pid < 0 || size > PAGE_SIZE ||
++ size < SCHED_ATTR_SIZE_VER0 || flags)
++ return -EINVAL;
++
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ retval = -ESRCH;
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ attr.sched_policy = p->policy;
++ if (rt_task(p))
++ attr.sched_priority = p->rt_priority;
++ else
++ attr.sched_nice = task_nice(p);
++
++ rcu_read_unlock();
++
++ retval = sched_read_attr(uattr, &attr, size);
++ return retval;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
++{
++ cpumask_var_t cpus_allowed, new_mask;
++ struct task_struct *p;
++ int retval;
++
++ rcu_read_lock();
++
++ p = find_process_by_pid(pid);
++ if (!p) {
++ rcu_read_unlock();
++ return -ESRCH;
++ }
++
++ /* Prevent p going away */
++ get_task_struct(p);
++ rcu_read_unlock();
++
++ if (p->flags & PF_NO_SETAFFINITY) {
++ retval = -EINVAL;
++ goto out_put_task;
++ }
++ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
++ retval = -ENOMEM;
++ goto out_put_task;
++ }
++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
++ retval = -ENOMEM;
++ goto out_free_cpus_allowed;
++ }
++ retval = -EPERM;
++ if (!check_same_owner(p)) {
++ rcu_read_lock();
++ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
++ rcu_read_unlock();
++ goto out_unlock;
++ }
++ rcu_read_unlock();
++ }
++
++ retval = security_task_setscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ cpuset_cpus_allowed(p, cpus_allowed);
++ cpumask_and(new_mask, in_mask, cpus_allowed);
++again:
++ retval = __set_cpus_allowed_ptr(p, new_mask, true);
++
++ if (!retval) {
++ cpuset_cpus_allowed(p, cpus_allowed);
++ if (!cpumask_subset(new_mask, cpus_allowed)) {
++ /*
++ * We must have raced with a concurrent cpuset
++ * update. Just reset the cpus_allowed to the
++ * cpuset's cpus_allowed
++ */
++ cpumask_copy(new_mask, cpus_allowed);
++ goto again;
++ }
++ }
++out_unlock:
++ free_cpumask_var(new_mask);
++out_free_cpus_allowed:
++ free_cpumask_var(cpus_allowed);
++out_put_task:
++ put_task_struct(p);
++ return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++ cpumask_t *new_mask)
++{
++ if (len < cpumask_size())
++ cpumask_clear(new_mask);
++ else if (len > cpumask_size())
++ len = cpumask_size();
++
++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++
++/**
++ * sys_sched_setaffinity - set the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++ unsigned long __user *, user_mask_ptr)
++{
++ cpumask_var_t new_mask;
++ int retval;
++
++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
++ return -ENOMEM;
++
++ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
++ if (retval == 0)
++ retval = sched_setaffinity(pid, new_mask);
++ free_cpumask_var(new_mask);
++ return retval;
++}
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++ struct task_struct *p;
++ unsigned long flags;
++ int retval;
++
++ get_online_cpus();
++ rcu_read_lock();
++
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++out_unlock:
++ rcu_read_unlock();
++ put_online_cpus();
++
++ return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++ unsigned long __user *, user_mask_ptr)
++{
++ int ret;
++ cpumask_var_t mask;
++
++ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++ return -EINVAL;
++ if (len & (sizeof(unsigned long)-1))
++ return -EINVAL;
++
++ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++ return -ENOMEM;
++
++ ret = sched_getaffinity(pid, mask);
++ if (ret == 0) {
++ size_t retlen = min_t(size_t, len, cpumask_size());
++
++ if (copy_to_user(user_mask_ptr, mask, retlen))
++ ret = -EFAULT;
++ else
++ ret = retlen;
++ }
++ free_cpumask_var(mask);
++
++ return ret;
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. It does this by
++ * scheduling away the current task. If it still has the earliest deadline
++ * it will be scheduled again as the next task.
++ *
++ * Return: 0.
++ */
++SYSCALL_DEFINE0(sched_yield)
++{
++ struct rq *rq;
++
++ if (!sched_yield_type)
++ goto out;
++
++ local_irq_disable();
++ rq = this_rq();
++ rq_lock(rq);
++
++ if (sched_yield_type > 1)
++ time_slice_expired(current, rq);
++ schedstat_inc(rq->yld_count);
++
++ /*
++ * Since we are going to call schedule() anyway, there's
++ * no need to preempt or enable interrupts:
++ */
++ preempt_disable();
++ rq_unlock(rq);
++ sched_preempt_enable_no_resched();
++
++ schedule();
++out:
++ return 0;
++}
++
++#ifndef CONFIG_PREEMPT
++int __sched _cond_resched(void)
++{
++ if (should_resched(0)) {
++ preempt_schedule_common();
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(_cond_resched);
++#endif
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++ int resched = should_resched(PREEMPT_LOCK_OFFSET);
++ int ret = 0;
++
++ lockdep_assert_held(lock);
++
++ if (spin_needbreak(lock) || resched) {
++ spin_unlock(lock);
++ if (resched)
++ preempt_schedule_common();
++ else
++ cpu_relax();
++ ret = 1;
++ spin_lock(lock);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++int __sched __cond_resched_softirq(void)
++{
++ BUG_ON(!in_softirq());
++
++ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
++ local_bh_enable();
++ preempt_schedule_common();
++ local_bh_disable();
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(__cond_resched_softirq);
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * Do not ever use this function, there's a 99% chance you're doing it wrong.
++ *
++ * The scheduler is at all times free to pick the calling task as the most
++ * eligible task to run, if removing the yield() call from your code breaks
++ * it, its already broken.
++ *
++ * Typical broken usage is:
++ *
++ * while (!event)
++ * yield();
++ *
++ * where one assumes that yield() will let 'the other' process run that will
++ * make event true. If the current task is a SCHED_FIFO task that will never
++ * happen. Never use yield() as a progress guarantee!!
++ *
++ * If you want to use yield() to wait for something, use wait_event().
++ * If you want to use yield() to be 'nice' for others, use cond_resched().
++ * If you still want to use yield(), do not!
++ */
++void __sched yield(void)
++{
++ set_current_state(TASK_RUNNING);
++ sys_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/**
++ * yield_to - yield the current processor to another thread in
++ * your thread group, or accelerate that thread toward the
++ * processor it's on.
++ * @p: target task
++ * @preempt: whether task preemption is allowed or not
++ *
++ * It's the caller's job to ensure that the target task struct
++ * can't go away on us before we can do any checks.
++ *
++ * Return:
++ * true (>0) if we indeed boosted the target task.
++ * false (0) if we failed to boost the target.
++ * -ESRCH if there's no task to yield to.
++ */
++int __sched yield_to(struct task_struct *p, bool preempt)
++{
++ struct task_struct *rq_p;
++ struct rq *rq, *p_rq;
++ unsigned long flags;
++ int yielded = 0;
++
++ local_irq_save(flags);
++ rq = this_rq();
++
++again:
++ p_rq = task_rq(p);
++ /*
++ * If we're the only runnable task on the rq and target rq also
++ * has only one task, there's absolutely no point in yielding.
++ */
++ if (task_running(p_rq, p) || p->state) {
++ yielded = -ESRCH;
++ goto out_irq;
++ }
++
++ double_rq_lock(rq, p_rq);
++ if (unlikely(task_rq(p) != p_rq)) {
++ double_rq_unlock(rq, p_rq);
++ goto again;
++ }
++
++ yielded = 1;
++ schedstat_inc(rq->yld_count);
++ rq_p = rq->curr;
++ if (p->deadline > rq_p->deadline)
++ p->deadline = rq_p->deadline;
++ p->time_slice += rq_p->time_slice;
++ if (p->time_slice > timeslice())
++ p->time_slice = timeslice();
++ time_slice_expired(rq_p, rq);
++ if (preempt && rq != p_rq)
++ resched_task(p_rq->curr);
++ double_rq_unlock(rq, p_rq);
++out_irq:
++ local_irq_restore(flags);
++
++ if (yielded > 0)
++ schedule();
++ return yielded;
++}
++EXPORT_SYMBOL_GPL(yield_to);
++
++int io_schedule_prepare(void)
++{
++ int old_iowait = current->in_iowait;
++
++ current->in_iowait = 1;
++ blk_schedule_flush_plug(current);
++
++ return old_iowait;
++}
++
++void io_schedule_finish(int token)
++{
++ current->in_iowait = token;
++}
++
++/*
++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++
++long __sched io_schedule_timeout(long timeout)
++{
++ int token;
++ long ret;
++
++ token = io_schedule_prepare();
++ ret = schedule_timeout(timeout);
++ io_schedule_finish(token);
++
++ return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++void io_schedule(void)
++{
++ int token;
++
++ token = io_schedule_prepare();
++ schedule();
++ io_schedule_finish(token);
++}
++EXPORT_SYMBOL(io_schedule);
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the maximum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = MAX_USER_RT_PRIO-1;
++ break;
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ case SCHED_ISO:
++ case SCHED_IDLEPRIO:
++ ret = 0;
++ break;
++ }
++ return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the minimum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = 1;
++ break;
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ case SCHED_ISO:
++ case SCHED_IDLEPRIO:
++ ret = 0;
++ break;
++ }
++ return ret;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ *
++ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
++ * an error code.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++ struct timespec __user *, interval)
++{
++ struct task_struct *p;
++ unsigned int time_slice;
++ unsigned long flags;
++ struct timespec t;
++ struct rq *rq;
++ int retval;
++
++ if (pid < 0)
++ return -EINVAL;
++
++ retval = -ESRCH;
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ rq = task_rq_lock(p, &flags);
++ time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p));
++ task_rq_unlock(rq, p, &flags);
++
++ rcu_read_unlock();
++ t = ns_to_timespec(time_slice);
++ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
++ return retval;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++void sched_show_task(struct task_struct *p)
++{
++ unsigned long free = 0;
++ int ppid;
++
++ if (!try_get_task_stack(p))
++ return;
++
++ printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p));
++
++ if (p->state == TASK_RUNNING)
++ printk(KERN_CONT " running task ");
++#ifdef CONFIG_DEBUG_STACK_USAGE
++ free = stack_not_used(p);
++#endif
++ ppid = 0;
++ rcu_read_lock();
++ if (pid_alive(p))
++ ppid = task_pid_nr(rcu_dereference(p->real_parent));
++ rcu_read_unlock();
++ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
++ task_pid_nr(p), ppid,
++ (unsigned long)task_thread_info(p)->flags);
++
++ print_worker_info(KERN_INFO, p);
++ show_stack(p, NULL);
++ put_task_stack(p);
++}
++
++static inline bool
++state_filter_match(unsigned long state_filter, struct task_struct *p)
++{
++ /* no filter, everything matches */
++ if (!state_filter)
++ return true;
++
++ /* filter, but doesn't match */
++ if (!(p->state & state_filter))
++ return false;
++
++ /*
++ * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
++ * TASK_KILLABLE).
++ */
++ if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
++ return false;
++
++ return true;
++}
++
++void show_state_filter(unsigned long state_filter)
++{
++ struct task_struct *g, *p;
++
++#if BITS_PER_LONG == 32
++ printk(KERN_INFO
++ " task PC stack pid father\n");
++#else
++ printk(KERN_INFO
++ " task PC stack pid father\n");
++#endif
++ rcu_read_lock();
++ for_each_process_thread(g, p) {
++ /*
++ * reset the NMI-timeout, listing all files on a slow
++ * console might take a lot of time:
++ * Also, reset softlockup watchdogs on all CPUs, because
++ * another CPU might be blocked waiting for us to process
++ * an IPI.
++ */
++ touch_nmi_watchdog();
++ touch_all_softlockup_watchdogs();
++ if (state_filter_match(state_filter, p))
++ sched_show_task(p);
++ }
++
++ rcu_read_unlock();
++ /*
++ * Only show locks if all tasks are dumped:
++ */
++ if (!state_filter)
++ debug_show_all_locks();
++}
++
++void dump_cpu_task(int cpu)
++{
++ pr_info("Task dump for CPU %d:\n", cpu);
++ sched_show_task(cpu_curr(cpu));
++}
++
++#ifdef CONFIG_SMP
++void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
++{
++ cpumask_copy(&p->cpus_allowed, new_mask);
++ p->nr_cpus_allowed = cpumask_weight(new_mask);
++}
++
++void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++ struct rq *rq = task_rq(p);
++
++ lockdep_assert_held(&p->pi_lock);
++
++ cpumask_copy(&p->cpus_allowed, new_mask);
++
++ if (task_queued(p)) {
++ /*
++ * Because __kthread_bind() calls this on blocked tasks without
++ * holding rq->lock.
++ */
++ lockdep_assert_held(&rq->lock);
++ }
++}
++
++/*
++ * Calling do_set_cpus_allowed from outside the scheduler code should not be
++ * called on a running or queued task. We should be holding pi_lock.
++ */
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++ __do_set_cpus_allowed(p, new_mask);
++ if (needs_other_cpu(p, task_cpu(p))) {
++ struct rq *rq;
++
++ rq = __task_rq_lock(p);
++ set_task_cpu(p, valid_task_cpu(p));
++ resched_task(p);
++ __task_rq_unlock(rq);
++ }
++}
++#endif
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: cpu the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void init_idle(struct task_struct *idle, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&idle->pi_lock, flags);
++ raw_spin_lock(&rq->lock);
++ idle->last_ran = rq->niffies;
++ time_slice_expired(idle, rq);
++ idle->state = TASK_RUNNING;
++ /* Setting prio to illegal value shouldn't matter when never queued */
++ idle->prio = PRIO_LIMIT;
++
++ kasan_unpoison_task_stack(idle);
++
++#ifdef CONFIG_SMP
++ /*
++ * It's possible that init_idle() gets called multiple times on a task,
++ * in that case do_set_cpus_allowed() will not do the right thing.
++ *
++ * And since this is boot we can forgo the serialisation.
++ */
++ set_cpus_allowed_common(idle, cpumask_of(cpu));
++#ifdef CONFIG_SMT_NICE
++ idle->smt_bias = 0;
++#endif
++#endif
++ set_rq_task(rq, idle);
++
++ /* Silence PROVE_RCU */
++ rcu_read_lock();
++ set_task_cpu(idle, cpu);
++ rcu_read_unlock();
++
++ rq->curr = rq->idle = idle;
++ idle->on_rq = TASK_ON_RQ_QUEUED;
++ raw_spin_unlock(&rq->lock);
++ raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
++
++ /* Set the preempt count _outside_ the spinlocks! */
++ init_idle_preempt_count(idle, cpu);
++
++ ftrace_graph_init_idle_task(idle, cpu);
++ vtime_init_idle(idle, cpu);
++#ifdef CONFIG_SMP
++ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
++ const struct cpumask __maybe_unused *trial)
++{
++ return 1;
++}
++
++int task_can_attach(struct task_struct *p,
++ const struct cpumask *cs_cpus_allowed)
++{
++ int ret = 0;
++
++ /*
++ * Kthreads which disallow setaffinity shouldn't be moved
++ * to a new cpuset; we don't want to change their CPU
++ * affinity and isolating such threads by their set of
++ * allowed nodes is unnecessary. Thus, cpusets are not
++ * applicable for such threads. This prevents checking for
++ * success of set_cpus_allowed_ptr() on all attached tasks
++ * before cpus_allowed may be changed.
++ */
++ if (p->flags & PF_NO_SETAFFINITY)
++ ret = -EINVAL;
++
++ return ret;
++}
++
++void resched_cpu(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ rq_lock_irqsave(rq, &flags);
++ resched_task(cpu_curr(cpu));
++ rq_unlock_irqrestore(rq, &flags);
++}
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_NO_HZ_COMMON
++void nohz_balance_enter_idle(int cpu)
++{
++}
++
++void select_nohz_load_balancer(int stop_tick)
++{
++}
++
++void set_cpu_sd_state_idle(void) {}
++
++/*
++ * In the semi idle case, use the nearest busy CPU for migrating timers
++ * from an idle CPU. This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle CPU will add more delays to the timers than intended
++ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
++ */
++int get_nohz_timer_target(void)
++{
++ int i, cpu = smp_processor_id();
++ struct sched_domain *sd;
++
++ if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
++ return cpu;
++
++ rcu_read_lock();
++ for_each_domain(cpu, sd) {
++ for_each_cpu(i, sched_domain_span(sd)) {
++ if (cpu == i)
++ continue;
++
++ if (!idle_cpu(i) && is_housekeeping_cpu(i)) {
++ cpu = i;
++ cpu = i;
++ goto unlock;
++ }
++ }
++ }
++
++ if (!is_housekeeping_cpu(cpu))
++ cpu = housekeeping_any_cpu();
++unlock:
++ rcu_read_unlock();
++ return cpu;
++}
++
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++void wake_up_idle_cpu(int cpu)
++{
++ if (cpu == smp_processor_id())
++ return;
++
++ if (set_nr_and_not_polling(cpu_rq(cpu)->idle))
++ smp_sched_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static bool wake_up_full_nohz_cpu(int cpu)
++{
++ /*
++ * We just need the target to call irq_exit() and re-evaluate
++ * the next tick. The nohz full kick at least implies that.
++ * If needed we can still optimize that later with an
++ * empty IRQ.
++ */
++ if (cpu_is_offline(cpu))
++ return true; /* Don't try to wake offline CPUs. */
++ if (tick_nohz_full_cpu(cpu)) {
++ if (cpu != smp_processor_id() ||
++ tick_nohz_tick_stopped())
++ tick_nohz_full_kick_cpu(cpu);
++ return true;
++ }
++
++ return false;
++}
++
++/*
++ * Wake up the specified CPU. If the CPU is going offline, it is the
++ * caller's responsibility to deal with the lost wakeup, for example,
++ * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
++ */
++void wake_up_nohz_cpu(int cpu)
++{
++ if (!wake_up_full_nohz_cpu(cpu))
++ wake_up_idle_cpu(cpu);
++}
++#endif /* CONFIG_NO_HZ_COMMON */
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * proper CPU and schedule it away if the CPU it's executing on
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++ const struct cpumask *new_mask, bool check)
++{
++ const struct cpumask *cpu_valid_mask = cpu_active_mask;
++ bool queued = false, running_wrong = false, kthread;
++ struct cpumask old_mask;
++ unsigned long flags;
++ struct rq *rq;
++ int ret = 0;
++
++ rq = task_rq_lock(p, &flags);
++ update_rq_clock(rq);
++
++ kthread = !!(p->flags & PF_KTHREAD);
++ if (kthread) {
++ /*
++ * Kernel threads are allowed on online && !active CPUs
++ */
++ cpu_valid_mask = cpu_online_mask;
++ }
++
++ /*
++ * Must re-check here, to close a race against __kthread_bind(),
++ * sched_setaffinity() is not guaranteed to observe the flag.
++ */
++ if (check && (p->flags & PF_NO_SETAFFINITY)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ cpumask_copy(&old_mask, &p->cpus_allowed);
++ if (cpumask_equal(&old_mask, new_mask))
++ goto out;
++
++ if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ queued = task_queued(p);
++ __do_set_cpus_allowed(p, new_mask);
++
++ if (kthread) {
++ /*
++ * For kernel threads that do indeed end up on online &&
++ * !active we want to ensure they are strict per-CPU threads.
++ */
++ WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
++ !cpumask_intersects(new_mask, cpu_active_mask) &&
++ p->nr_cpus_allowed != 1);
++ }
++
++ /* Can the task run on the task's current CPU? If so, we're done */
++ if (cpumask_test_cpu(task_cpu(p), new_mask))
++ goto out;
++
++ if (task_running(rq, p)) {
++ /* Task is running on the wrong cpu now, reschedule it. */
++ if (rq == this_rq()) {
++ set_tsk_need_resched(p);
++ running_wrong = true;
++ } else
++ resched_task(p);
++ } else {
++ int cpu = cpumask_any_and(cpu_valid_mask, new_mask);
++
++ if (queued) {
++ /*
++ * Switch runqueue locks after dequeueing the task
++ * here while still holding the pi_lock to be holding
++ * the correct lock for enqueueing.
++ */
++ dequeue_task(rq, p, 0);
++ rq_unlock(rq);
++
++ rq = cpu_rq(cpu);
++ rq_lock(rq);
++ }
++ set_task_cpu(p, cpu);
++ if (queued)
++ enqueue_task(rq, p, 0);
++ }
++ if (queued)
++ try_preempt(p, rq);
++ if (running_wrong)
++ preempt_disable();
++out:
++ task_rq_unlock(rq, p, &flags);
++
++ if (running_wrong) {
++ __schedule(true);
++ preempt_enable();
++ }
++
++ return ret;
++}
++
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++ return __set_cpus_allowed_ptr(p, new_mask, false);
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Run through task list and find tasks affined to the dead cpu, then remove
++ * that cpu from the list, enable cpu0 and set the zerobound flag. Must hold
++ * cpu 0 and src_cpu's runqueue locks.
++ */
++static void bind_zero(int src_cpu)
++{
++ struct task_struct *p, *t;
++ struct rq *rq0;
++ int bound = 0;
++
++ if (src_cpu == 0)
++ return;
++
++ rq0 = cpu_rq(0);
++
++ do_each_thread(t, p) {
++ if (cpumask_test_cpu(src_cpu, &p->cpus_allowed)) {
++ bool local = (task_cpu(p) == src_cpu);
++ struct rq *rq = task_rq(p);
++
++ /* task_running is the cpu stopper thread */
++ if (local && task_running(rq, p))
++ continue;
++ atomic_clear_cpu(src_cpu, &p->cpus_allowed);
++ atomic_set_cpu(0, &p->cpus_allowed);
++ p->zerobound = true;
++ bound++;
++ if (local) {
++ bool queued = task_queued(p);
++
++ if (queued)
++ dequeue_task(rq, p, 0);
++ set_task_cpu(p, 0);
++ if (queued)
++ enqueue_task(rq0, p, 0);
++ }
++ }
++ } while_each_thread(t, p);
++
++ if (bound) {
++ printk(KERN_INFO "Removed affinity for %d processes to cpu %d\n",
++ bound, src_cpu);
++ }
++}
++
++/* Find processes with the zerobound flag and reenable their affinity for the
++ * CPU coming alive. */
++static void unbind_zero(int src_cpu)
++{
++ int unbound = 0, zerobound = 0;
++ struct task_struct *p, *t;
++
++ if (src_cpu == 0)
++ return;
++
++ do_each_thread(t, p) {
++ if (!p->mm)
++ p->zerobound = false;
++ if (p->zerobound) {
++ unbound++;
++ cpumask_set_cpu(src_cpu, &p->cpus_allowed);
++ /* Once every CPU affinity has been re-enabled, remove
++ * the zerobound flag */
++ if (cpumask_subset(cpu_possible_mask, &p->cpus_allowed)) {
++ p->zerobound = false;
++ zerobound++;
++ }
++ }
++ } while_each_thread(t, p);
++
++ if (unbound) {
++ printk(KERN_INFO "Added affinity for %d processes to cpu %d\n",
++ unbound, src_cpu);
++ }
++ if (zerobound) {
++ printk(KERN_INFO "Released forced binding to cpu0 for %d processes\n",
++ zerobound);
++ }
++}
++
++/*
++ * Ensure that the idle task is using init_mm right before its cpu goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++ struct mm_struct *mm = current->active_mm;
++
++ BUG_ON(cpu_online(smp_processor_id()));
++
++ if (mm != &init_mm) {
++ switch_mm(mm, &init_mm, current);
++ finish_arch_post_lock_switch();
++ }
++ mmdrop(mm);
++}
++#else /* CONFIG_HOTPLUG_CPU */
++static void unbind_zero(int src_cpu) {}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++ struct sched_param stop_param = { .sched_priority = STOP_PRIO };
++ struct sched_param start_param = { .sched_priority = 0 };
++ struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++ if (stop) {
++ /*
++ * Make it appear like a SCHED_FIFO task, its something
++ * userspace knows about and won't get confused about.
++ *
++ * Also, it will make PI more or less work without too
++ * much confusion -- but then, stop work should not
++ * rely on PI working anyway.
++ */
++ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
++ }
++
++ cpu_rq(cpu)->stop = stop;
++
++ if (old_stop) {
++ /*
++ * Reset it back to a normal scheduling policy so that
++ * it can die in pieces.
++ */
++ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
++ }
++}
++
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++
++static struct ctl_table sd_ctl_dir[] = {
++ {
++ .procname = "sched_domain",
++ .mode = 0555,
++ },
++ {}
++};
++
++static struct ctl_table sd_ctl_root[] = {
++ {
++ .procname = "kernel",
++ .mode = 0555,
++ .child = sd_ctl_dir,
++ },
++ {}
++};
++
++static struct ctl_table *sd_alloc_ctl_entry(int n)
++{
++ struct ctl_table *entry =
++ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
++
++ return entry;
++}
++
++static void sd_free_ctl_entry(struct ctl_table **tablep)
++{
++ struct ctl_table *entry;
++
++ /*
++ * In the intermediate directories, both the child directory and
++ * procname are dynamically allocated and could fail but the mode
++ * will always be set. In the lowest directory the names are
++ * static strings and all have proc handlers.
++ */
++ for (entry = *tablep; entry->mode; entry++) {
++ if (entry->child)
++ sd_free_ctl_entry(&entry->child);
++ if (entry->proc_handler == NULL)
++ kfree(entry->procname);
++ }
++
++ kfree(*tablep);
++ *tablep = NULL;
++}
++
++#define CPU_LOAD_IDX_MAX 5
++static int min_load_idx = 0;
++static int max_load_idx = CPU_LOAD_IDX_MAX-1;
++
++static void
++set_table_entry(struct ctl_table *entry,
++ const char *procname, void *data, int maxlen,
++ umode_t mode, proc_handler *proc_handler,
++ bool load_idx)
++{
++ entry->procname = procname;
++ entry->data = data;
++ entry->maxlen = maxlen;
++ entry->mode = mode;
++ entry->proc_handler = proc_handler;
++
++ if (load_idx) {
++ entry->extra1 = &min_load_idx;
++ entry->extra2 = &max_load_idx;
++ }
++}
++
++static struct ctl_table *
++sd_alloc_ctl_domain_table(struct sched_domain *sd)
++{
++ struct ctl_table *table = sd_alloc_ctl_entry(14);
++
++ if (table == NULL)
++ return NULL;
++
++ set_table_entry(&table[0], "min_interval", &sd->min_interval,
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
++ set_table_entry(&table[1], "max_interval", &sd->max_interval,
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
++ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[9], "cache_nice_tries",
++ &sd->cache_nice_tries,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[10], "flags", &sd->flags,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[11], "max_newidle_lb_cost",
++ &sd->max_newidle_lb_cost,
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
++ set_table_entry(&table[12], "name", sd->name,
++ CORENAME_MAX_SIZE, 0444, proc_dostring, false);
++ /* &table[13] is terminator */
++
++ return table;
++}
++
++static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
++{
++ struct ctl_table *entry, *table;
++ struct sched_domain *sd;
++ int domain_num = 0, i;
++ char buf[32];
++
++ for_each_domain(cpu, sd)
++ domain_num++;
++ entry = table = sd_alloc_ctl_entry(domain_num + 1);
++ if (table == NULL)
++ return NULL;
++
++ i = 0;
++ for_each_domain(cpu, sd) {
++ snprintf(buf, 32, "domain%d", i);
++ entry->procname = kstrdup(buf, GFP_KERNEL);
++ entry->mode = 0555;
++ entry->child = sd_alloc_ctl_domain_table(sd);
++ entry++;
++ i++;
++ }
++ return table;
++}
++
++static cpumask_var_t sd_sysctl_cpus;
++static struct ctl_table_header *sd_sysctl_header;
++
++void register_sched_domain_sysctl(void)
++{
++ static struct ctl_table *cpu_entries;
++ static struct ctl_table **cpu_idx;
++ char buf[32];
++ int i;
++
++ if (!cpu_entries) {
++ cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
++ if (!cpu_entries)
++ return;
++
++ WARN_ON(sd_ctl_dir[0].child);
++ sd_ctl_dir[0].child = cpu_entries;
++ }
++
++ if (!cpu_idx) {
++ struct ctl_table *e = cpu_entries;
++
++ cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
++ if (!cpu_idx)
++ return;
++
++ /* deal with sparse possible map */
++ for_each_possible_cpu(i) {
++ cpu_idx[i] = e;
++ e++;
++ }
++ }
++
++ if (!cpumask_available(sd_sysctl_cpus)) {
++ if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
++ return;
++
++ /* init to possible to not have holes in @cpu_entries */
++ cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
++ }
++
++ for_each_cpu(i, sd_sysctl_cpus) {
++ struct ctl_table *e = cpu_idx[i];
++
++ if (e->child)
++ sd_free_ctl_entry(&e->child);
++
++ if (!e->procname) {
++ snprintf(buf, 32, "cpu%d", i);
++ e->procname = kstrdup(buf, GFP_KERNEL);
++ }
++ e->mode = 0555;
++ e->child = sd_alloc_ctl_cpu_table(i);
++
++ __cpumask_clear_cpu(i, sd_sysctl_cpus);
++ }
++
++ WARN_ON(sd_sysctl_header);
++ sd_sysctl_header = register_sysctl_table(sd_ctl_root);
++}
++
++void dirty_sched_domain_sysctl(int cpu)
++{
++ if (cpumask_available(sd_sysctl_cpus))
++ __cpumask_set_cpu(cpu, sd_sysctl_cpus);
++}
++
++/* may be called multiple times per register */
++void unregister_sched_domain_sysctl(void)
++{
++ unregister_sysctl_table(sd_sysctl_header);
++ sd_sysctl_header = NULL;
++}
++#endif /* CONFIG_SYSCTL */
++
++void set_rq_online(struct rq *rq)
++{
++ if (!rq->online) {
++ cpumask_set_cpu(cpu_of(rq), rq->rd->online);
++ rq->online = true;
++ }
++}
++
++void set_rq_offline(struct rq *rq)
++{
++ if (rq->online) {
++ int cpu = cpu_of(rq);
++
++ cpumask_clear_cpu(cpu, rq->rd->online);
++ rq->online = false;
++ clear_cpuidle_map(cpu);
++ }
++}
++
++/*
++ * used to mark begin/end of suspend/resume:
++ */
++static int num_cpus_frozen;
++
++/*
++ * Update cpusets according to cpu_active mask. If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static void cpuset_cpu_active(void)
++{
++ if (cpuhp_tasks_frozen) {
++ /*
++ * num_cpus_frozen tracks how many CPUs are involved in suspend
++ * resume sequence. As long as this is not the last online
++ * operation in the resume sequence, just build a single sched
++ * domain, ignoring cpusets.
++ */
++ partition_sched_domains(1, NULL, NULL);
++ if (--num_cpus_frozen)
++ return;
++ /*
++ * This is the last CPU online operation. So fall through and
++ * restore the original sched domains by considering the
++ * cpuset configurations.
++ */
++ cpuset_force_rebuild();
++ }
++
++ cpuset_update_active_cpus();
++}
++
++static int cpuset_cpu_inactive(unsigned int cpu)
++{
++ if (!cpuhp_tasks_frozen) {
++ cpuset_update_active_cpus();
++ } else {
++ num_cpus_frozen++;
++ partition_sched_domains(1, NULL, NULL);
++ }
++ return 0;
++}
++
++int sched_cpu_activate(unsigned int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ set_cpu_active(cpu, true);
++
++ if (sched_smp_initialized) {
++ sched_domains_numa_masks_set(cpu);
++ cpuset_cpu_active();
++ }
++
++ /*
++ * Put the rq online, if not already. This happens:
++ *
++ * 1) In the early boot process, because we build the real domains
++ * after all CPUs have been brought up.
++ *
++ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
++ * domains.
++ */
++ rq_lock_irqsave(rq, &flags);
++ if (rq->rd) {
++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
++ set_rq_online(rq);
++ }
++ unbind_zero(cpu);
++ rq_unlock_irqrestore(rq, &flags);
++
++ return 0;
++}
++
++int sched_cpu_deactivate(unsigned int cpu)
++{
++ int ret;
++
++ set_cpu_active(cpu, false);
++ /*
++ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
++ * users of this state to go away such that all new such users will
++ * observe it.
++ *
++ * Do sync before park smpboot threads to take care the rcu boost case.
++ */
++ synchronize_rcu_mult(call_rcu, call_rcu_sched);
++
++ if (!sched_smp_initialized)
++ return 0;
++
++ ret = cpuset_cpu_inactive(cpu);
++ if (ret) {
++ set_cpu_active(cpu, true);
++ return ret;
++ }
++ sched_domains_numa_masks_clear(cpu);
++ return 0;
++}
++
++int sched_cpu_starting(unsigned int __maybe_unused cpu)
++{
++ return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++int sched_cpu_dying(unsigned int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ local_irq_save(flags);
++ double_rq_lock(rq, cpu_rq(0));
++ if (rq->rd) {
++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
++ set_rq_offline(rq);
++ }
++ bind_zero(cpu);
++ double_rq_unlock(rq, cpu_rq(0));
++ sched_start_tick(rq, cpu);
++ hrexpiry_clear(rq);
++ local_irq_restore(flags);
++
++ return 0;
++}
++#endif
++
++#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
++/*
++ * Cheaper version of the below functions in case support for SMT and MC is
++ * compiled in but CPUs have no siblings.
++ */
++static bool sole_cpu_idle(struct rq *rq)
++{
++ return rq_idle(rq);
++}
++#endif
++#ifdef CONFIG_SCHED_SMT
++static const cpumask_t *thread_cpumask(int cpu)
++{
++ return topology_sibling_cpumask(cpu);
++}
++/* All this CPU's SMT siblings are idle */
++static bool siblings_cpu_idle(struct rq *rq)
++{
++ return cpumask_subset(&rq->thread_mask, &cpu_idle_map);
++}
++#endif
++#ifdef CONFIG_SCHED_MC
++static const cpumask_t *core_cpumask(int cpu)
++{
++ return topology_core_cpumask(cpu);
++}
++/* All this CPU's shared cache siblings are idle */
++static bool cache_cpu_idle(struct rq *rq)
++{
++ return cpumask_subset(&rq->core_mask, &cpu_idle_map);
++}
++#endif
++
++enum sched_domain_level {
++ SD_LV_NONE = 0,
++ SD_LV_SIBLING,
++ SD_LV_MC,
++ SD_LV_BOOK,
++ SD_LV_CPU,
++ SD_LV_NODE,
++ SD_LV_ALLNODES,
++ SD_LV_MAX
++};
++
++void __init sched_init_smp(void)
++{
++ struct sched_domain *sd;
++ int cpu, other_cpu;
++#ifdef CONFIG_SCHED_SMT
++ bool smt_threads = false;
++#endif
++ cpumask_var_t non_isolated_cpus;
++ struct rq *rq;
++
++ alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
++
++ sched_init_numa();
++
++ /*
++ * There's no userspace yet to cause hotplug operations; hence all the
++ * cpu masks are stable and all blatant races in the below code cannot
++ * happen.
++ */
++ mutex_lock(&sched_domains_mutex);
++ sched_init_domains(cpu_active_mask);
++ cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
++ if (cpumask_empty(non_isolated_cpus))
++ cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
++ mutex_unlock(&sched_domains_mutex);
++
++ /* Move init over to a non-isolated CPU */
++ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
++ BUG();
++ free_cpumask_var(non_isolated_cpus);
++
++ mutex_lock(&sched_domains_mutex);
++ local_irq_disable();
++ lock_all_rqs();
++ /*
++ * Set up the relative cache distance of each online cpu from each
++ * other in a simple array for quick lookup. Locality is determined
++ * by the closest sched_domain that CPUs are separated by. CPUs with
++ * shared cache in SMT and MC are treated as local. Separate CPUs
++ * (within the same package or physically) within the same node are
++ * treated as not local. CPUs not even in the same domain (different
++ * nodes) are treated as very distant.
++ */
++ for_each_online_cpu(cpu) {
++ rq = cpu_rq(cpu);
++
++ /* First check if this cpu is in the same node */
++ for_each_domain(cpu, sd) {
++ if (sd->level > SD_LV_MC)
++ continue;
++ /* Set locality to local node if not already found lower */
++ for_each_cpu(other_cpu, sched_domain_span(sd)) {
++ if (rq->cpu_locality[other_cpu] > 3)
++ rq->cpu_locality[other_cpu] = 3;
++ }
++ }
++
++ /*
++ * Each runqueue has its own function in case it doesn't have
++ * siblings of its own allowing mixed topologies.
++ */
++#ifdef CONFIG_SCHED_MC
++ for_each_cpu(other_cpu, core_cpumask(cpu)) {
++ if (rq->cpu_locality[other_cpu] > 2)
++ rq->cpu_locality[other_cpu] = 2;
++ }
++ if (cpumask_weight(core_cpumask(cpu)) > 1) {
++ cpumask_copy(&rq->core_mask, core_cpumask(cpu));
++ cpumask_clear_cpu(cpu, &rq->core_mask);
++ rq->cache_idle = cache_cpu_idle;
++ }
++#endif
++#ifdef CONFIG_SCHED_SMT
++ if (cpumask_weight(thread_cpumask(cpu)) > 1) {
++ cpumask_copy(&rq->thread_mask, thread_cpumask(cpu));
++ cpumask_clear_cpu(cpu, &rq->thread_mask);
++ for_each_cpu(other_cpu, thread_cpumask(cpu))
++ rq->cpu_locality[other_cpu] = 1;
++ rq->siblings_idle = siblings_cpu_idle;
++ smt_threads = true;
++ }
++#endif
++ }
++ for_each_possible_cpu(cpu) {
++ int total_cpus = 1, locality;
++
++ rq = cpu_rq(cpu);
++ for (locality = 1; locality <= 4; locality++) {
++ for_each_possible_cpu(other_cpu) {
++ if (rq->cpu_locality[other_cpu] == locality)
++ rq->rq_order[total_cpus++] = cpu_rq(other_cpu);
++ }
++ }
++ }
++#ifdef CONFIG_SMT_NICE
++ if (smt_threads) {
++ check_siblings = &check_smt_siblings;
++ wake_siblings = &wake_smt_siblings;
++ smt_schedule = &smt_should_schedule;
++ }
++#endif
++ unlock_all_rqs();
++ local_irq_enable();
++ mutex_unlock(&sched_domains_mutex);
++
++ for_each_online_cpu(cpu) {
++ rq = cpu_rq(cpu);
++
++ for_each_online_cpu(other_cpu) {
++ if (other_cpu <= cpu)
++ continue;
++ printk(KERN_DEBUG "MuQSS locality CPU %d to %d: %d\n", cpu, other_cpu, rq->cpu_locality[other_cpu]);
++ }
++ }
++
++ sched_smp_initialized = true;
++}
++#else
++void __init sched_init_smp(void)
++{
++ sched_smp_initialized = true;
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++ return in_lock_functions(addr) ||
++ (addr >= (unsigned long)__sched_text_start
++ && addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task group related information */
++struct task_group {
++ struct cgroup_subsys_state css;
++
++ struct rcu_head rcu;
++ struct list_head list;
++
++ struct task_group *parent;
++ struct list_head siblings;
++ struct list_head children;
++};
++
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++
++/* Cacheline aligned slab cache for task_group */
++static struct kmem_cache *task_group_cache __read_mostly;
++#endif /* CONFIG_CGROUP_SCHED */
++
++void __init sched_init(void)
++{
++#ifdef CONFIG_SMP
++ int cpu_ids;
++#endif
++ int i;
++ struct rq *rq;
++
++ sched_clock_init();
++
++ wait_bit_init();
++
++ prio_ratios[0] = 128;
++ for (i = 1 ; i < NICE_WIDTH ; i++)
++ prio_ratios[i] = prio_ratios[i - 1] * 11 / 10;
++
++ skiplist_node_init(&init_task.node);
++
++#ifdef CONFIG_SMP
++ init_defrootdomain();
++ cpumask_clear(&cpu_idle_map);
++#else
++ uprq = &per_cpu(runqueues, 0);
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++ task_group_cache = KMEM_CACHE(task_group, 0);
++
++ list_add(&root_task_group.list, &task_groups);
++ INIT_LIST_HEAD(&root_task_group.children);
++ INIT_LIST_HEAD(&root_task_group.siblings);
++#endif /* CONFIG_CGROUP_SCHED */
++ for_each_possible_cpu(i) {
++ rq = cpu_rq(i);
++ skiplist_init(&rq->node);
++ rq->sl = new_skiplist(&rq->node);
++ raw_spin_lock_init(&rq->lock);
++ rq->nr_running = 0;
++ rq->nr_uninterruptible = 0;
++ rq->nr_switches = 0;
++ rq->clock = rq->old_clock = rq->last_niffy = rq->niffies = 0;
++ rq->last_jiffy = jiffies;
++ rq->user_ns = rq->nice_ns = rq->softirq_ns = rq->system_ns =
++ rq->iowait_ns = rq->idle_ns = 0;
++ rq->dither = 0;
++ set_rq_task(rq, &init_task);
++ rq->iso_ticks = 0;
++ rq->iso_refractory = false;
++#ifdef CONFIG_SMP
++ rq->sd = NULL;
++ rq->rd = NULL;
++ rq->online = false;
++ rq->cpu = i;
++ rq_attach_root(rq, &def_root_domain);
++#endif
++ init_rq_hrexpiry(rq);
++ atomic_set(&rq->nr_iowait, 0);
++ }
++
++#ifdef CONFIG_SMP
++ cpu_ids = i;
++ /*
++ * Set the base locality for cpu cache distance calculation to
++ * "distant" (3). Make sure the distance from a CPU to itself is 0.
++ */
++ for_each_possible_cpu(i) {
++ int j;
++
++ rq = cpu_rq(i);
++#ifdef CONFIG_SCHED_SMT
++ rq->siblings_idle = sole_cpu_idle;
++#endif
++#ifdef CONFIG_SCHED_MC
++ rq->cache_idle = sole_cpu_idle;
++#endif
++ rq->cpu_locality = kmalloc(cpu_ids * sizeof(int *), GFP_ATOMIC);
++ for_each_possible_cpu(j) {
++ if (i == j)
++ rq->cpu_locality[j] = 0;
++ else
++ rq->cpu_locality[j] = 4;
++ }
++ rq->rq_order = kmalloc(cpu_ids * sizeof(struct rq *), GFP_ATOMIC);
++ rq->rq_order[0] = rq;
++ for (j = 1; j < cpu_ids; j++)
++ rq->rq_order[j] = cpu_rq(j);
++ }
++#endif
++
++ /*
++ * The boot idle thread does lazy MMU switching as well:
++ */
++ mmgrab(&init_mm);
++ enter_lazy_tlb(&init_mm, current);
++
++ /*
++ * Make us the idle thread. Technically, schedule() should not be
++ * called from this thread, however somewhere below it might be,
++ * but because we are the idle thread, we just pick up running again
++ * when this runqueue becomes "idle".
++ */
++ init_idle(current, smp_processor_id());
++
++#ifdef CONFIG_SMP
++ /* May be allocated at isolcpus cmdline parse time */
++ if (cpu_isolated_map == NULL)
++ zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
++ idle_thread_set_boot_cpu();
++#endif /* SMP */
++
++ init_schedstats();
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++static inline int preempt_count_equals(int preempt_offset)
++{
++ int nested = preempt_count() + rcu_preempt_depth();
++
++ return (nested == preempt_offset);
++}
++
++void __might_sleep(const char *file, int line, int preempt_offset)
++{
++ /*
++ * Blocking primitives will set (and therefore destroy) current->state,
++ * since we will exit with TASK_RUNNING make sure we enter with it,
++ * otherwise we will destroy state.
++ */
++ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
++ "do not call blocking ops when !TASK_RUNNING; "
++ "state=%lx set at [<%p>] %pS\n",
++ current->state,
++ (void *)current->task_state_change,
++ (void *)current->task_state_change);
++
++ ___might_sleep(file, line, preempt_offset);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++void ___might_sleep(const char *file, int line, int preempt_offset)
++{
++ /* Ratelimiting timestamp: */
++ static unsigned long prev_jiffy;
++
++ unsigned long preempt_disable_ip;
++
++ /* WARN_ON_ONCE() by default, no rate limit required: */
++ rcu_sleep_check();
++
++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
++ !is_idle_task(current)) ||
++ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++ oops_in_progress)
++ return;
++
++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++ return;
++ prev_jiffy = jiffies;
++
++ /* Save this before calling printk(), since that will clobber it: */
++ preempt_disable_ip = get_preempt_disable_ip(current);
++
++ printk(KERN_ERR
++ "BUG: sleeping function called from invalid context at %s:%d\n",
++ file, line);
++ printk(KERN_ERR
++ "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++ in_atomic(), irqs_disabled(),
++ current->pid, current->comm);
++
++ if (task_stack_end_corrupted(current))
++ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
++
++ debug_show_held_locks(current);
++ if (irqs_disabled())
++ print_irqtrace_events(current);
++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++ && !preempt_count_equals(preempt_offset)) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(preempt_disable_ip);
++ pr_cont("\n");
++ }
++ dump_stack();
++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL(___might_sleep);
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++static inline void normalise_rt_tasks(void)
++{
++ struct task_struct *g, *p;
++ unsigned long flags;
++ struct rq *rq;
++
++ read_lock(&tasklist_lock);
++ for_each_process_thread(g, p) {
++ /*
++ * Only normalize user tasks:
++ */
++ if (p->flags & PF_KTHREAD)
++ continue;
++
++ if (!rt_task(p) && !iso_task(p))
++ continue;
++
++ rq = task_rq_lock(p, &flags);
++ __setscheduler(p, rq, SCHED_NORMAL, 0, false);
++ task_rq_unlock(rq, p, &flags);
++ }
++ read_unlock(&tasklist_lock);
++}
++
++void normalize_rt_tasks(void)
++{
++ normalise_rt_tasks();
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for the IA64 MCA handling, or kdb.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++ return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_IA64
++/**
++ * set_curr_task - set the current task for a given CPU.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack. It allows the architecture to switch the
++ * notion of the current task on a CPU in a non-blocking manner. This function
++ * must be called with all CPU's synchronised, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void ia64_set_curr_task(int cpu, struct task_struct *p)
++{
++ cpu_curr(cpu) = p;
++}
++
++#endif
++
++void init_idle_bootup_task(struct task_struct *idle)
++{}
++
++#ifdef CONFIG_SCHED_DEBUG
++__read_mostly bool sched_debug_enabled;
++
++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
++ struct seq_file *m)
++{}
++
++void proc_sched_set_task(struct task_struct *p)
++{}
++#endif
++
++#ifdef CONFIG_SMP
++#define SCHED_LOAD_SHIFT (10)
++#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
++
++unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
++{
++ return SCHED_LOAD_SCALE;
++}
++
++unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
++{
++ unsigned long weight = cpumask_weight(sched_domain_span(sd));
++ unsigned long smt_gain = sd->smt_gain;
++
++ smt_gain /= weight;
++
++ return smt_gain;
++}
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++static void sched_free_group(struct task_group *tg)
++{
++ kmem_cache_free(task_group_cache, tg);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++ struct task_group *tg;
++
++ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
++ if (!tg)
++ return ERR_PTR(-ENOMEM);
++
++ return tg;
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void sched_free_group_rcu(struct rcu_head *rhp)
++{
++ /* Now it should be safe to free those cfs_rqs */
++ sched_free_group(container_of(rhp, struct task_group, rcu));
++}
++
++void sched_destroy_group(struct task_group *tg)
++{
++ /* Wait for possible concurrent references to cfs_rqs complete */
++ call_rcu(&tg->rcu, sched_free_group_rcu);
++}
++
++void sched_offline_group(struct task_group *tg)
++{
++}
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++ return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++ struct task_group *parent = css_tg(parent_css);
++ struct task_group *tg;
++
++ if (!parent) {
++ /* This is early initialization for the top cgroup */
++ return &root_task_group.css;
++ }
++
++ tg = sched_create_group(parent);
++ if (IS_ERR(tg))
++ return ERR_PTR(-ENOMEM);
++ return &tg->css;
++}
++
++/* Expose task group only after completing cgroup initialization */
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++ struct task_group *parent = css_tg(css->parent);
++
++ if (parent)
++ sched_online_group(tg, parent);
++ return 0;
++}
++
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++
++ sched_offline_group(tg);
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++
++ /*
++ * Relies on the RCU grace period between css_released() and this.
++ */
++ sched_free_group(tg);
++}
++
++static void cpu_cgroup_fork(struct task_struct *task)
++{
++}
++
++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
++{
++ return 0;
++}
++
++static void cpu_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++
++static struct cftype cpu_files[] = {
++ { } /* Terminate */
++};
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++ .css_alloc = cpu_cgroup_css_alloc,
++ .css_online = cpu_cgroup_css_online,
++ .css_released = cpu_cgroup_css_released,
++ .css_free = cpu_cgroup_css_free,
++ .fork = cpu_cgroup_fork,
++ .can_attach = cpu_cgroup_can_attach,
++ .attach = cpu_cgroup_attach,
++ .legacy_cftypes = cpu_files,
++ .early_init = true,
++};
++#endif /* CONFIG_CGROUP_SCHED */
+diff -Nur a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h
+--- a/kernel/sched/MuQSS.h 1970-01-01 01:00:00.000000000 +0100
++++ b/kernel/sched/MuQSS.h 2018-11-03 16:06:32.715529032 +0000
+@@ -0,0 +1,725 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#include <linux/sched.h>
++#include <linux/cpuidle.h>
++#include <linux/freezer.h>
++#include <linux/interrupt.h>
++#include <linux/skip_list.h>
++#include <linux/stop_machine.h>
++#include <linux/sched/topology.h>
++#include <linux/u64_stats_sync.h>
++#include <linux/tsacct_kern.h>
++#include <linux/sched/clock.h>
++#include <linux/sched/wake_q.h>
++#include <linux/sched/signal.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/cpufreq.h>
++#include <linux/sched/stat.h>
++#include <linux/sched/nohz.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/hotplug.h>
++#include <linux/sched/task.h>
++#include <linux/sched/task_stack.h>
++#include <linux/sched/cputime.h>
++#include <linux/sched/init.h>
++
++#include <linux/u64_stats_sync.h>
++#include <linux/kernel_stat.h>
++#include <linux/tick.h>
++#include <linux/slab.h>
++
++#ifdef CONFIG_PARAVIRT
++#include <asm/paravirt.h>
++#endif
++
++#include "cpuacct.h"
++
++#ifndef MUQSS_SCHED_H
++#define MUQSS_SCHED_H
++
++#ifdef CONFIG_SCHED_DEBUG
++# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
++#else
++# define SCHED_WARN_ON(x) ((void)(x))
++#endif
++
++/* task_struct::on_rq states: */
++#define TASK_ON_RQ_QUEUED 1
++#define TASK_ON_RQ_MIGRATING 2
++
++struct rq;
++
++#ifdef CONFIG_SMP
++
++static inline bool sched_asym_prefer(int a, int b)
++{
++ return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
++}
++
++/*
++ * We add the notion of a root-domain which will be used to define per-domain
++ * variables. Each exclusive cpuset essentially defines an island domain by
++ * fully partitioning the member cpus from any other cpuset. Whenever a new
++ * exclusive cpuset is created, we also create and attach a new root-domain
++ * object.
++ *
++ */
++struct root_domain {
++ atomic_t refcount;
++ atomic_t rto_count;
++ struct rcu_head rcu;
++ cpumask_var_t span;
++ cpumask_var_t online;
++
++ /* Indicate more than one runnable task for any CPU */
++ bool overload;
++
++ /*
++ * The bit corresponding to a CPU gets set here if such CPU has more
++ * than one runnable -deadline task (as it is below for RT tasks).
++ */
++ cpumask_var_t dlo_mask;
++ atomic_t dlo_count;
++ /* Replace unused CFS structures with void */
++ //struct dl_bw dl_bw;
++ //struct cpudl cpudl;
++ void *dl_bw;
++ void *cpudl;
++
++ /*
++ * The "RT overload" flag: it gets set if a CPU has more than
++ * one runnable RT task.
++ */
++ cpumask_var_t rto_mask;
++ //struct cpupri cpupri;
++ void *cpupri;
++
++ unsigned long max_cpu_capacity;
++};
++
++extern struct root_domain def_root_domain;
++extern struct mutex sched_domains_mutex;
++
++extern void init_defrootdomain(void);
++extern int sched_init_domains(const struct cpumask *cpu_map);
++extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
++
++static inline void cpupri_cleanup(void __maybe_unused *cpupri)
++{
++}
++
++static inline void cpudl_cleanup(void __maybe_unused *cpudl)
++{
++}
++
++static inline void init_dl_bw(void __maybe_unused *dl_bw)
++{
++}
++
++static inline int cpudl_init(void __maybe_unused *dl_bw)
++{
++ return 0;
++}
++
++static inline int cpupri_init(void __maybe_unused *cpupri)
++{
++ return 0;
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ * This data should only be modified by the local cpu.
++ */
++struct rq {
++ raw_spinlock_t lock;
++
++ struct task_struct *curr, *idle, *stop;
++ struct mm_struct *prev_mm;
++
++ unsigned int nr_running;
++ /*
++ * This is part of a global counter where only the total sum
++ * over all CPUs matters. A task can increase this counter on
++ * one CPU and if it got migrated afterwards it may decrease
++ * it on another CPU. Always updated under the runqueue lock:
++ */
++ unsigned long nr_uninterruptible;
++ u64 nr_switches;
++
++ /* Stored data about rq->curr to work outside rq lock */
++ u64 rq_deadline;
++ int rq_prio;
++
++ /* Best queued id for use outside lock */
++ u64 best_key;
++
++ unsigned long last_scheduler_tick; /* Last jiffy this RQ ticked */
++ unsigned long last_jiffy; /* Last jiffy this RQ updated rq clock */
++ u64 niffies; /* Last time this RQ updated rq clock */
++ u64 last_niffy; /* Last niffies as updated by local clock */
++ u64 last_jiffy_niffies; /* Niffies @ last_jiffy */
++
++ u64 load_update; /* When we last updated load */
++ unsigned long load_avg; /* Rolling load average */
++#ifdef CONFIG_SMT_NICE
++ struct mm_struct *rq_mm;
++ int rq_smt_bias; /* Policy/nice level bias across smt siblings */
++#endif
++ /* Accurate timekeeping data */
++ unsigned long user_ns, nice_ns, irq_ns, softirq_ns, system_ns,
++ iowait_ns, idle_ns;
++ atomic_t nr_iowait;
++
++ skiplist_node node;
++ skiplist *sl;
++#ifdef CONFIG_SMP
++ struct task_struct *preempt; /* Preempt triggered on this task */
++ struct task_struct *preempting; /* Hint only, what task is preempting */
++
++ int cpu; /* cpu of this runqueue */
++ bool online;
++
++ struct root_domain *rd;
++ struct sched_domain *sd;
++
++ unsigned long cpu_capacity_orig;
++
++ int *cpu_locality; /* CPU relative cache distance */
++ struct rq **rq_order; /* RQs ordered by relative cache distance */
++
++#ifdef CONFIG_SCHED_SMT
++ cpumask_t thread_mask;
++ bool (*siblings_idle)(struct rq *rq);
++ /* See if all smt siblings are idle */
++#endif /* CONFIG_SCHED_SMT */
++#ifdef CONFIG_SCHED_MC
++ cpumask_t core_mask;
++ bool (*cache_idle)(struct rq *rq);
++ /* See if all cache siblings are idle */
++#endif /* CONFIG_SCHED_MC */
++#endif /* CONFIG_SMP */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++ u64 prev_irq_time;
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++#ifdef CONFIG_PARAVIRT
++ u64 prev_steal_time;
++#endif /* CONFIG_PARAVIRT */
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++ u64 prev_steal_time_rq;
++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
++
++ u64 clock, old_clock, last_tick;
++ u64 clock_task;
++ int dither;
++
++ int iso_ticks;
++ bool iso_refractory;
++
++#ifdef CONFIG_HIGH_RES_TIMERS
++ struct hrtimer hrexpiry_timer;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++
++ /* latency stats */
++ struct sched_info rq_sched_info;
++ unsigned long long rq_cpu_time;
++ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
++
++ /* sys_sched_yield() stats */
++ unsigned int yld_count;
++
++ /* schedule() stats */
++ unsigned int sched_switch;
++ unsigned int sched_count;
++ unsigned int sched_goidle;
++
++ /* try_to_wake_up() stats */
++ unsigned int ttwu_count;
++ unsigned int ttwu_local;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_SMP
++ struct llist_head wake_list;
++#endif
++
++#ifdef CONFIG_CPU_IDLE
++ /* Must be inspected within a rcu lock section */
++ struct cpuidle_state *idle_state;
++#endif
++};
++
++#ifdef CONFIG_SMP
++struct rq *cpu_rq(int cpu);
++#endif
++
++#ifndef CONFIG_SMP
++extern struct rq *uprq;
++#define cpu_rq(cpu) (uprq)
++#define this_rq() (uprq)
++#define raw_rq() (uprq)
++#define task_rq(p) (uprq)
++#define cpu_curr(cpu) ((uprq)->curr)
++#else /* CONFIG_SMP */
++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#define this_rq() this_cpu_ptr(&runqueues)
++#define raw_rq() raw_cpu_ptr(&runqueues)
++#define task_rq(p) cpu_rq(task_cpu(p))
++#endif /* CONFIG_SMP */
++
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++ return rq->curr == p;
++}
++
++static inline int task_running(struct rq *rq, struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++ return p->on_cpu;
++#else
++ return task_current(rq, p);
++#endif
++}
++
++static inline void rq_lock(struct rq *rq)
++ __acquires(rq->lock)
++{
++ raw_spin_lock(&rq->lock);
++}
++
++static inline void rq_unlock(struct rq *rq)
++ __releases(rq->lock)
++{
++ raw_spin_unlock(&rq->lock);
++}
++
++static inline void rq_lock_irq(struct rq *rq)
++ __acquires(rq->lock)
++{
++ raw_spin_lock_irq(&rq->lock);
++}
++
++static inline void rq_unlock_irq(struct rq *rq)
++ __releases(rq->lock)
++{
++ raw_spin_unlock_irq(&rq->lock);
++}
++
++static inline void rq_lock_irqsave(struct rq *rq, unsigned long *flags)
++ __acquires(rq->lock)
++{
++ raw_spin_lock_irqsave(&rq->lock, *flags);
++}
++
++static inline void rq_unlock_irqrestore(struct rq *rq, unsigned long *flags)
++ __releases(rq->lock)
++{
++ raw_spin_unlock_irqrestore(&rq->lock, *flags);
++}
++
++static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
++ __acquires(p->pi_lock)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++ while (42) {
++ raw_spin_lock_irqsave(&p->pi_lock, *flags);
++ rq = task_rq(p);
++ raw_spin_lock(&rq->lock);
++ if (likely(rq == task_rq(p)))
++ break;
++ raw_spin_unlock(&rq->lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++ }
++ return rq;
++}
++
++static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
++ __releases(rq->lock)
++ __releases(p->pi_lock)
++{
++ rq_unlock(rq);
++ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++}
++
++static inline struct rq *__task_rq_lock(struct task_struct *p)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++ lockdep_assert_held(&p->pi_lock);
++
++ while (42) {
++ rq = task_rq(p);
++ raw_spin_lock(&rq->lock);
++ if (likely(rq == task_rq(p)))
++ break;
++ raw_spin_unlock(&rq->lock);
++ }
++ return rq;
++}
++
++static inline void __task_rq_unlock(struct rq *rq)
++{
++ rq_unlock(rq);
++}
++
++/*
++ * {de,en}queue flags: Most not used on MuQSS.
++ *
++ * DEQUEUE_SLEEP - task is no longer runnable
++ * ENQUEUE_WAKEUP - task just became runnable
++ *
++ * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
++ * are in a known state which allows modification. Such pairs
++ * should preserve as much state as possible.
++ *
++ * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
++ * in the runqueue.
++ *
++ * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
++ * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
++ * ENQUEUE_MIGRATED - the task was migrated during wakeup
++ *
++ */
++
++#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */
++
++#define ENQUEUE_RESTORE 0x02
++
++static inline u64 __rq_clock_broken(struct rq *rq)
++{
++ return READ_ONCE(rq->clock);
++}
++
++static inline u64 rq_clock(struct rq *rq)
++{
++ lockdep_assert_held(&rq->lock);
++
++ return rq->clock;
++}
++
++static inline u64 rq_clock_task(struct rq *rq)
++{
++ lockdep_assert_held(&rq->lock);
++
++ return rq->clock_task;
++}
++
++#ifdef CONFIG_NUMA
++enum numa_topology_type {
++ NUMA_DIRECT,
++ NUMA_GLUELESS_MESH,
++ NUMA_BACKPLANE,
++};
++extern enum numa_topology_type sched_numa_topology_type;
++extern int sched_max_numa_distance;
++extern bool find_numa_distance(int distance);
++
++extern void sched_init_numa(void);
++extern void sched_domains_numa_masks_set(unsigned int cpu);
++extern void sched_domains_numa_masks_clear(unsigned int cpu);
++#else
++static inline void sched_init_numa(void) { }
++static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
++static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
++#endif
++
++extern struct mutex sched_domains_mutex;
++extern struct static_key_false sched_schedstats;
++
++#define rcu_dereference_check_sched_domain(p) \
++ rcu_dereference_check((p), \
++ lockdep_is_held(&sched_domains_mutex))
++
++#ifdef CONFIG_SMP
++
++/*
++ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
++ * See detach_destroy_domains: synchronize_sched for details.
++ *
++ * The domain tree of any CPU may only be accessed from within
++ * preempt-disabled sections.
++ */
++#define for_each_domain(cpu, __sd) \
++ for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
++ __sd; __sd = __sd->parent)
++
++#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
++
++/**
++ * highest_flag_domain - Return highest sched_domain containing flag.
++ * @cpu: The cpu whose highest level of sched domain is to
++ * be returned.
++ * @flag: The flag to check for the highest sched_domain
++ * for the given cpu.
++ *
++ * Returns the highest sched_domain of a cpu which contains the given flag.
++ */
++static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
++{
++ struct sched_domain *sd, *hsd = NULL;
++
++ for_each_domain(cpu, sd) {
++ if (!(sd->flags & flag))
++ break;
++ hsd = sd;
++ }
++
++ return hsd;
++}
++
++static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
++{
++ struct sched_domain *sd;
++
++ for_each_domain(cpu, sd) {
++ if (sd->flags & flag)
++ break;
++ }
++
++ return sd;
++}
++
++DECLARE_PER_CPU(struct sched_domain *, sd_llc);
++DECLARE_PER_CPU(int, sd_llc_size);
++DECLARE_PER_CPU(int, sd_llc_id);
++DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
++DECLARE_PER_CPU(struct sched_domain *, sd_numa);
++DECLARE_PER_CPU(struct sched_domain *, sd_asym);
++
++struct sched_group_capacity {
++ atomic_t ref;
++ /*
++ * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
++ * for a single CPU.
++ */
++ unsigned long capacity;
++ unsigned long min_capacity; /* Min per-CPU capacity in group */
++ unsigned long next_update;
++ int imbalance; /* XXX unrelated to capacity but shared group state */
++
++#ifdef CONFIG_SCHED_DEBUG
++ int id;
++#endif
++
++ unsigned long cpumask[0]; /* balance mask */
++};
++
++struct sched_group {
++ struct sched_group *next; /* Must be a circular list */
++ atomic_t ref;
++
++ unsigned int group_weight;
++ struct sched_group_capacity *sgc;
++ int asym_prefer_cpu; /* cpu of highest priority in group */
++
++ /*
++ * The CPUs this group covers.
++ *
++ * NOTE: this field is variable length. (Allocated dynamically
++ * by attaching extra space to the end of the structure,
++ * depending on how many CPUs the kernel has booted up with)
++ */
++ unsigned long cpumask[0];
++};
++
++static inline struct cpumask *sched_group_span(struct sched_group *sg)
++{
++ return to_cpumask(sg->cpumask);
++}
++
++/*
++ * See build_balance_mask().
++ */
++static inline struct cpumask *group_balance_mask(struct sched_group *sg)
++{
++ return to_cpumask(sg->sgc->cpumask);
++}
++
++/**
++ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
++ * @group: The group whose first cpu is to be returned.
++ */
++static inline unsigned int group_first_cpu(struct sched_group *group)
++{
++ return cpumask_first(sched_group_span(group));
++}
++
++
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++void register_sched_domain_sysctl(void);
++void dirty_sched_domain_sysctl(int cpu);
++void unregister_sched_domain_sysctl(void);
++#else
++static inline void register_sched_domain_sysctl(void)
++{
++}
++static inline void dirty_sched_domain_sysctl(int cpu)
++{
++}
++static inline void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++extern void sched_ttwu_pending(void);
++extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
++extern void set_rq_online (struct rq *rq);
++extern void set_rq_offline(struct rq *rq);
++extern bool sched_smp_initialized;
++
++static inline void update_group_capacity(struct sched_domain *sd, int cpu)
++{
++}
++
++static inline void trigger_load_balance(struct rq *rq)
++{
++}
++
++#define sched_feat(x) 0
++
++#else /* CONFIG_SMP */
++
++static inline void sched_ttwu_pending(void) { }
++
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_CPU_IDLE
++static inline void idle_set_state(struct rq *rq,
++ struct cpuidle_state *idle_state)
++{
++ rq->idle_state = idle_state;
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++ SCHED_WARN_ON(!rcu_read_lock_held());
++ return rq->idle_state;
++}
++#else
++static inline void idle_set_state(struct rq *rq,
++ struct cpuidle_state *idle_state)
++{
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++ return NULL;
++}
++#endif
++
++#ifdef CONFIG_SCHED_DEBUG
++extern bool sched_debug_enabled;
++#endif
++
++extern void schedule_idle(void);
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++struct irqtime {
++ u64 total;
++ u64 tick_delta;
++ u64 irq_start_time;
++ struct u64_stats_sync sync;
++};
++
++DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
++
++/*
++ * Returns the irqtime minus the softirq time computed by ksoftirqd.
++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
++ * and never move forward.
++ */
++static inline u64 irq_time_read(int cpu)
++{
++ struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
++ unsigned int seq;
++ u64 total;
++
++ do {
++ seq = __u64_stats_fetch_begin(&irqtime->sync);
++ total = irqtime->total;
++ } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
++
++ return total;
++}
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++#ifdef CONFIG_SMP
++static inline int cpu_of(struct rq *rq)
++{
++ return rq->cpu;
++}
++#else /* CONFIG_SMP */
++static inline int cpu_of(struct rq *rq)
++{
++ return 0;
++}
++#endif
++
++#ifdef CONFIG_CPU_FREQ
++DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
++
++static inline void cpufreq_trigger(struct rq *rq, unsigned int flags)
++{
++ struct update_util_data *data;
++
++ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
++ cpu_of(rq)));
++
++ if (data)
++ data->func(data, rq->niffies, flags);
++}
++#else
++static inline void cpufreq_trigger(struct rq *rq, unsigned int flag)
++{
++}
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef arch_scale_freq_capacity
++#ifndef arch_scale_freq_invariant
++#define arch_scale_freq_invariant() (true)
++#endif
++#else /* arch_scale_freq_capacity */
++#define arch_scale_freq_invariant() (false)
++#endif
++
++/*
++ * This should only be called when current == rq->idle. Dodgy workaround for
++ * when softirqs are pending and we are in the idle loop. Setting current to
++ * resched will kick us out of the idle loop and the softirqs will be serviced
++ * on our next pass through schedule().
++ */
++static inline bool softirq_pending(int cpu)
++{
++ if (likely(!local_softirq_pending()))
++ return false;
++ set_tsk_need_resched(current);
++ return true;
++}
++
++#ifdef CONFIG_64BIT
++static inline u64 read_sum_exec_runtime(struct task_struct *t)
++{
++ return tsk_seruntime(t);
++}
++#else
++struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags);
++void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags);
++
++static inline u64 read_sum_exec_runtime(struct task_struct *t)
++{
++ unsigned long flags;
++ u64 ns;
++ struct rq *rq;
++
++ rq = task_rq_lock(t, &flags);
++ ns = tsk_seruntime(t);
++ task_rq_unlock(rq, t, &flags);
++
++ return ns;
++}
++#endif
++
++#endif /* MUQSS_SCHED_H */
+diff -Nur a/kernel/sched/sched.h b/kernel/sched/sched.h
+--- a/kernel/sched/sched.h 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/sched/sched.h 2018-11-03 16:06:32.717529096 +0000
+@@ -1,5 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+
++#ifdef CONFIG_SCHED_MUQSS
++#include "MuQSS.h"
++#else /* CONFIG_SCHED_MUQSS */
+ #include <linux/sched.h>
+ #include <linux/sched/autogroup.h>
+ #include <linux/sched/sysctl.h>
+@@ -2103,3 +2106,29 @@
+ #else /* arch_scale_freq_capacity */
+ #define arch_scale_freq_invariant() (false)
+ #endif
++
++static inline bool softirq_pending(int cpu)
++{
++ return false;
++}
++
++#ifdef CONFIG_64BIT
++static inline u64 read_sum_exec_runtime(struct task_struct *t)
++{
++ return t->se.sum_exec_runtime;
++}
++#else
++static inline u64 read_sum_exec_runtime(struct task_struct *t)
++{
++ u64 ns;
++ struct rq_flags rf;
++ struct rq *rq;
++
++ rq = task_rq_lock(t, &rf);
++ ns = t->se.sum_exec_runtime;
++ task_rq_unlock(rq, t, &rf);
++
++ return ns;
++}
++#endif
++#endif /* CONFIG_SCHED_MUQSS */
+diff -Nur a/kernel/skip_list.c b/kernel/skip_list.c
+--- a/kernel/skip_list.c 1970-01-01 01:00:00.000000000 +0100
++++ b/kernel/skip_list.c 2018-11-03 16:06:32.717529096 +0000
+@@ -0,0 +1,148 @@
++/*
++ Copyright (C) 2011,2016 Con Kolivas.
++
++ Code based on example originally by William Pugh.
++
++Skip Lists are a probabilistic alternative to balanced trees, as
++described in the June 1990 issue of CACM and were invented by
++William Pugh in 1987.
++
++A couple of comments about this implementation:
++The routine randomLevel has been hard-coded to generate random
++levels using p=0.25. It can be easily changed.
++
++The insertion routine has been implemented so as to use the
++dirty hack described in the CACM paper: if a random level is
++generated that is more than the current maximum level, the
++current maximum level plus one is used instead.
++
++Levels start at zero and go up to MaxLevel (which is equal to
++MaxNumberOfLevels-1).
++
++The routines defined in this file are:
++
++init: defines slnode
++
++new_skiplist: returns a new, empty list
++
++randomLevel: Returns a random level based on a u64 random seed passed to it.
++In MuQSS, the "niffy" time is used for this purpose.
++
++insert(l,key, value): inserts the binding (key, value) into l. This operation
++occurs in O(log n) time.
++
++delnode(slnode, l, node): deletes any binding of key from the l based on the
++actual node value. This operation occurs in O(k) time where k is the
++number of levels of the node in question (max 8). The original delete
++function occurred in O(log n) time and involved a search.
++
++MuQSS Notes: In this implementation of skiplists, there are bidirectional
++next/prev pointers and the insert function returns a pointer to the actual
++node the value is stored. The key here is chosen by the scheduler so as to
++sort tasks according to the priority list requirements and is no longer used
++by the scheduler after insertion. The scheduler lookup, however, occurs in
++O(1) time because it is always the first item in the level 0 linked list.
++Since the task struct stores a copy of the node pointer upon skiplist_insert,
++it can also remove it much faster than the original implementation with the
++aid of prev<->next pointer manipulation and no searching.
++
++*/
++
++#include <linux/slab.h>
++#include <linux/skip_list.h>
++
++#define MaxNumberOfLevels 8
++#define MaxLevel (MaxNumberOfLevels - 1)
++
++void skiplist_init(skiplist_node *slnode)
++{
++ int i;
++
++ slnode->key = 0xFFFFFFFFFFFFFFFF;
++ slnode->level = 0;
++ slnode->value = NULL;
++ for (i = 0; i < MaxNumberOfLevels; i++)
++ slnode->next[i] = slnode->prev[i] = slnode;
++}
++
++skiplist *new_skiplist(skiplist_node *slnode)
++{
++ skiplist *l = kzalloc(sizeof(skiplist), GFP_ATOMIC);
++
++ BUG_ON(!l);
++ l->header = slnode;
++ return l;
++}
++
++void free_skiplist(skiplist *l)
++{
++ skiplist_node *p, *q;
++
++ p = l->header;
++ do {
++ q = p->next[0];
++ p->next[0]->prev[0] = q->prev[0];
++ skiplist_node_init(p);
++ p = q;
++ } while (p != l->header);
++ kfree(l);
++}
++
++void skiplist_node_init(skiplist_node *node)
++{
++ memset(node, 0, sizeof(skiplist_node));
++}
++
++static inline unsigned int randomLevel(const long unsigned int randseed)
++{
++ return find_first_bit(&randseed, MaxLevel) / 2;
++}
++
++void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed)
++{
++ skiplist_node *update[MaxNumberOfLevels];
++ skiplist_node *p, *q;
++ int k = l->level;
++
++ p = l->header;
++ do {
++ while (q = p->next[k], q->key <= key)
++ p = q;
++ update[k] = p;
++ } while (--k >= 0);
++
++ ++l->entries;
++ k = randomLevel(randseed);
++ if (k > l->level) {
++ k = ++l->level;
++ update[k] = l->header;
++ }
++
++ node->level = k;
++ node->key = key;
++ node->value = value;
++ do {
++ p = update[k];
++ node->next[k] = p->next[k];
++ p->next[k] = node;
++ node->prev[k] = p;
++ node->next[k]->prev[k] = node;
++ } while (--k >= 0);
++}
++
++void skiplist_delete(skiplist *l, skiplist_node *node)
++{
++ int k, m = node->level;
++
++ for (k = 0; k <= m; k++) {
++ node->prev[k]->next[k] = node->next[k];
++ node->next[k]->prev[k] = node->prev[k];
++ }
++ skiplist_node_init(node);
++ if (m == l->level) {
++ while (l->header->next[m] == l->header && l->header->prev[m] == l->header && m > 0)
++ m--;
++ l->level = m;
++ }
++ l->entries--;
++}
+diff -Nur a/kernel/sysctl.c b/kernel/sysctl.c
+--- a/kernel/sysctl.c 2018-11-03 16:00:51.933620936 +0000
++++ b/kernel/sysctl.c 2018-11-03 16:12:48.444570622 +0000
+@@ -133,8 +133,14 @@
+ static int __maybe_unused two __read_only = 2;
+ static int __maybe_unused four __read_only = 4;
+ static unsigned long one_ul __read_only = 1;
+-static int one_hundred __read_only = 100;
+-static int one_thousand __read_only = 1000;
++static int one_hundred __read_only = 100;
++static int one_thousand __read_only = 1000;
++#ifdef CONFIG_SCHED_MUQSS
++extern int rr_interval;
++extern int sched_interactive;
++extern int sched_iso_cpu;
++extern int sched_yield_type;
++#endif
+ #ifdef CONFIG_PRINTK
+ static int ten_thousand __read_only = 10000;
+ #endif
+@@ -296,7 +302,7 @@
+ { }
+ };
+
+-#ifdef CONFIG_SCHED_DEBUG
++#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_MUQSS)
+ static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */
+ static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
+ static int min_wakeup_granularity_ns __read_only; /* 0 usecs */
+@@ -313,6 +319,7 @@
+ #endif
+
+ static struct ctl_table kern_table[] = {
++#ifndef CONFIG_SCHED_MUQSS
+ {
+ .procname = "sched_child_runs_first",
+ .data = &sysctl_sched_child_runs_first,
+@@ -475,6 +482,7 @@
+ .extra1 = &one,
+ },
+ #endif
++#endif /* !CONFIG_SCHED_MUQSS */
+ #ifdef CONFIG_PROVE_LOCKING
+ {
+ .procname = "prove_locking",
+@@ -1073,6 +1081,44 @@
+ .proc_handler = proc_dointvec,
+ },
+ #endif
++#ifdef CONFIG_SCHED_MUQSS
++ {
++ .procname = "rr_interval",
++ .data = &rr_interval,
++ .maxlen = sizeof (int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &one_thousand,
++ },
++ {
++ .procname = "interactive",
++ .data = &sched_interactive,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++ {
++ .procname = "iso_cpu",
++ .data = &sched_iso_cpu,
++ .maxlen = sizeof (int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &one_hundred,
++ },
++ {
++ .procname = "yield_type",
++ .data = &sched_yield_type,
++ .maxlen = sizeof (int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &two,
++ },
++#endif
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ {
+ .procname = "spin_retry",
+diff -Nur a/kernel/time/clockevents.c b/kernel/time/clockevents.c
+--- a/kernel/time/clockevents.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/time/clockevents.c 2018-11-03 16:06:32.719529160 +0000
+@@ -198,8 +198,13 @@
+
+ #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
+
++#ifdef CONFIG_SCHED_MUQSS
++/* Limit min_delta to 100us */
++#define MIN_DELTA_LIMIT (NSEC_PER_SEC / 10000)
++#else
+ /* Limit min_delta to a jiffie */
+ #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
++#endif
+
+ /**
+ * clockevents_increase_min_delta - raise minimum delta of a clock event device
+diff -Nur a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+--- a/kernel/time/posix-cpu-timers.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/time/posix-cpu-timers.c 2018-11-03 16:06:32.719529160 +0000
+@@ -818,7 +818,7 @@
+ tsk_expires->virt_exp = expires;
+
+ tsk_expires->sched_exp = check_timers_list(++timers, firing,
+- tsk->se.sum_exec_runtime);
++ tsk_seruntime(tsk));
+
+ /*
+ * Check for the special case thread timers.
+@@ -828,7 +828,7 @@
+ unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
+
+ if (hard != RLIM_INFINITY &&
+- tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
++ tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
+ /*
+ * At the hard limit, we just die.
+ * No need to calculate anything else now.
+@@ -840,7 +840,7 @@
+ __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
+ return;
+ }
+- if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
++ if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
+ /*
+ * At the soft limit, send a SIGXCPU every second.
+ */
+@@ -1081,7 +1081,7 @@
+ struct task_cputime task_sample;
+
+ task_cputime(tsk, &task_sample.utime, &task_sample.stime);
+- task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
++ task_sample.sum_exec_runtime = tsk_seruntime(tsk);
+ if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
+ return 1;
+ }
+diff -Nur a/kernel/time/timer.c b/kernel/time/timer.c
+--- a/kernel/time/timer.c 2018-11-03 16:00:51.934620967 +0000
++++ b/kernel/time/timer.c 2018-11-03 16:06:32.720529192 +0000
+@@ -1434,7 +1434,7 @@
+ * Check, if the next hrtimer event is before the next timer wheel
+ * event:
+ */
+-static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
++static u64 cmp_next_hrtimer_event(struct timer_base *base, u64 basem, u64 expires)
+ {
+ u64 nextevt = hrtimer_get_next_event();
+
+@@ -1452,6 +1452,9 @@
+ if (nextevt <= basem)
+ return basem;
+
++ if (nextevt < expires && nextevt - basem <= TICK_NSEC)
++ base->is_idle = false;
++
+ /*
+ * Round up to the next jiffie. High resolution timers are
+ * off, so the hrtimers are expired in the tick and we need to
+@@ -1521,7 +1524,7 @@
+ }
+ raw_spin_unlock(&base->lock);
+
+- return cmp_next_hrtimer_event(basem, expires);
++ return cmp_next_hrtimer_event(base, basem, expires);
+ }
+
+ /**
+diff -Nur a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+--- a/kernel/trace/trace_selftest.c 2018-10-10 07:54:28.000000000 +0100
++++ b/kernel/trace/trace_selftest.c 2018-11-03 16:06:32.720529192 +0000
+@@ -1041,10 +1041,15 @@
+ {
+ /* Make this a -deadline thread */
+ static const struct sched_attr attr = {
++#ifdef CONFIG_SCHED_MUQSS
++ /* No deadline on MuQSS, use RR */
++ .sched_policy = SCHED_RR,
++#else
+ .sched_policy = SCHED_DEADLINE,
+ .sched_runtime = 100000ULL,
+ .sched_deadline = 10000000ULL,
+ .sched_period = 10000000ULL
++#endif
+ };
+ struct wakeup_test_data *x = data;
+
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0002-BFQ-v8r12-20180404.patch b/sys-kernel/linux-sources-redcore-lts/files/0002-BFQ-v8r12-20180404.patch
new file mode 100644
index 00000000..104325d6
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0002-BFQ-v8r12-20180404.patch
@@ -0,0 +1,4611 @@
+From 7bd365a925748767d7ed807e5498f90bae0ebc25 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Tue, 14 Nov 2017 08:28:45 +0100
+Subject: [PATCH 01/23] block, bfq-mq: turn BUG_ON on request-size into WARN_ON
+
+BFQ has many checks of internal and external consistency. One of them
+checks that an I/O request has still sectors to serve, if it happens
+to be retired without being served. If the request has no sector to
+serve, a BUG_ON signals the failure and causes the kernel to
+terminate. Yet, from a crash report by a user [1], this condition may
+happen to hold, in apparently correct functioning, for I/O with a
+CD/DVD.
+
+To address this issue, this commit turns the above BUG_ON into a
+WARN_ON. This commit also adds a companion WARN_ON on request
+insertion into the scheduler.
+
+[1] https://groups.google.com/d/msg/bfq-iosched/DDOTJBroBa4/VyU1zUFtCgAJ
+
+Reported-by: Alexandre Frade <admfrade@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 0c09609a6099..0fc757ae7a42 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1540,6 +1540,8 @@ static void bfq_add_request(struct request *rq)
+
+ BUG_ON(!RQ_BFQQ(rq));
+ BUG_ON(RQ_BFQQ(rq) != bfqq);
++ WARN_ON(blk_rq_sectors(rq) == 0);
++
+ elv_rb_add(&bfqq->sort_list, rq);
+
+ /*
+@@ -4962,7 +4964,7 @@ static void bfq_finish_request(struct request *rq)
+ rq_io_start_time_ns(rq),
+ rq->cmd_flags);
+
+- BUG_ON(blk_rq_sectors(rq) == 0 && !(rq->rq_flags & RQF_STARTED));
++ WARN_ON(blk_rq_sectors(rq) == 0 && !(rq->rq_flags & RQF_STARTED));
+
+ if (likely(rq->rq_flags & RQF_STARTED)) {
+ unsigned long flags;
+
+From 1097d368a20456c88acd75b3184c68df38e8f7b8 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Sun, 12 Nov 2017 22:43:46 +0100
+Subject: [PATCH 02/23] block, bfq-sq, bfq-mq: consider also past I/O in soft
+ real-time detection
+
+BFQ privileges the I/O of soft real-time applications, such as video
+players, to guarantee to these application a high bandwidth and a low
+latency. In this respect, it is not easy to correctly detect when an
+application is soft real-time. A particularly nasty false positive is
+that of an I/O-bound application that occasionally happens to meet all
+requirements to be deemed as soft real-time. After being detected as
+soft real-time, such an application monopolizes the device. Fortunately,
+BFQ will realize soon that the application is actually not soft
+real-time and suspend every privilege. Yet, the application may happen
+again to be wrongly detected as soft real-time, and so on.
+
+As highlighted by our tests, this problem causes BFQ to occasionally
+fail to guarantee a high responsiveness, in the presence of heavy
+background I/O workloads. The reason is that the background workload
+happens to be detected as soft real-time, more or less frequently,
+during the execution of the interactive task under test. To give an
+idea, because of this problem, Libreoffice Writer occasionally takes 8
+seconds, instead of 3, to start up, if there are sequential reads and
+writes in the background, on a Kingston SSDNow V300.
+
+This commit addresses this issue by leveraging the following facts.
+
+The reason why some applications are detected as soft real-time despite
+all BFQ checks to avoid false positives, is simply that, during high
+CPU or storage-device load, I/O-bound applications may happen to do
+I/O slowly enough to meet all soft real-time requirements, and pass
+all BFQ extra checks. Yet, this happens only for limited time periods:
+slow-speed time intervals are usually interspersed between other time
+intervals during which these applications do I/O at a very high speed.
+To exploit these facts, this commit introduces a little change, in the
+detection of soft real-time behavior, to systematically consider also
+the recent past: the higher the speed was in the recent past, the
+later next I/O should arrive for the application to be considered as
+soft real-time. At the beginning of a slow-speed interval, the minimum
+arrival time allowed for the next I/O usually happens to still be so
+high, to fall *after* the end of the slow-speed period itself. As a
+consequence, the application does not risk to be deemed as soft
+real-time during the slow-speed interval. Then, during the next
+high-speed interval, the application cannot, evidently, be deemed as
+soft real-time (exactly because of its speed), and so on.
+
+This extra filtering proved to be rather effective: in the above test,
+the frequency of false positives became so low that the start-up time
+was 3 seconds in all iterations (apart from occasional outliers,
+caused by page-cache-management issues, which are out of the scope of
+this commit, and cannot be solved by an I/O scheduler).
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+---
+ block/bfq-mq-iosched.c | 115 ++++++++++++++++++++++++++++++++++---------------
+ block/bfq-sq-iosched.c | 115 ++++++++++++++++++++++++++++++++++---------------
+ 2 files changed, 162 insertions(+), 68 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 0fc757ae7a42..4d06d900f45e 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -3201,37 +3201,78 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * whereas soft_rt_next_start is set to infinity for applications that do
+ * not.
+ *
+- * Unfortunately, even a greedy application may happen to behave in an
+- * isochronous way if the CPU load is high. In fact, the application may
+- * stop issuing requests while the CPUs are busy serving other processes,
+- * then restart, then stop again for a while, and so on. In addition, if
+- * the disk achieves a low enough throughput with the request pattern
+- * issued by the application (e.g., because the request pattern is random
+- * and/or the device is slow), then the application may meet the above
+- * bandwidth requirement too. To prevent such a greedy application to be
+- * deemed as soft real-time, a further rule is used in the computation of
+- * soft_rt_next_start: soft_rt_next_start must be higher than the current
+- * time plus the maximum time for which the arrival of a request is waited
+- * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
+- * This filters out greedy applications, as the latter issue instead their
+- * next request as soon as possible after the last one has been completed
+- * (in contrast, when a batch of requests is completed, a soft real-time
+- * application spends some time processing data).
++ * Unfortunately, even a greedy (i.e., I/O-bound) application may
++ * happen to meet, occasionally or systematically, both the above
++ * bandwidth and isochrony requirements. This may happen at least in
++ * the following circumstances. First, if the CPU load is high. The
++ * application may stop issuing requests while the CPUs are busy
++ * serving other processes, then restart, then stop again for a while,
++ * and so on. The other circumstances are related to the storage
++ * device: the storage device is highly loaded or reaches a low-enough
++ * throughput with the I/O of the application (e.g., because the I/O
++ * is random and/or the device is slow). In all these cases, the
++ * I/O of the application may be simply slowed down enough to meet
++ * the bandwidth and isochrony requirements. To reduce the probability
++ * that greedy applications are deemed as soft real-time in these
++ * corner cases, a further rule is used in the computation of
++ * soft_rt_next_start: the return value of this function is forced to
++ * be higher than the maximum between the following two quantities.
+ *
+- * Unfortunately, the last filter may easily generate false positives if
+- * only bfqd->bfq_slice_idle is used as a reference time interval and one
+- * or both the following cases occur:
+- * 1) HZ is so low that the duration of a jiffy is comparable to or higher
+- * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
+- * HZ=100.
++ * (a) Current time plus: (1) the maximum time for which the arrival
++ * of a request is waited for when a sync queue becomes idle,
++ * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
++ * postpone for a moment the reason for adding a few extra
++ * jiffies; we get back to it after next item (b). Lower-bounding
++ * the return value of this function with the current time plus
++ * bfqd->bfq_slice_idle tends to filter out greedy applications,
++ * because the latter issue their next request as soon as possible
++ * after the last one has been completed. In contrast, a soft
++ * real-time application spends some time processing data, after a
++ * batch of its requests has been completed.
++ *
++ * (b) Current value of bfqq->soft_rt_next_start. As pointed out
++ * above, greedy applications may happen to meet both the
++ * bandwidth and isochrony requirements under heavy CPU or
++ * storage-device load. In more detail, in these scenarios, these
++ * applications happen, only for limited time periods, to do I/O
++ * slowly enough to meet all the requirements described so far,
++ * including the filtering in above item (a). These slow-speed
++ * time intervals are usually interspersed between other time
++ * intervals during which these applications do I/O at a very high
++ * speed. Fortunately, exactly because of the high speed of the
++ * I/O in the high-speed intervals, the values returned by this
++ * function happen to be so high, near the end of any such
++ * high-speed interval, to be likely to fall *after* the end of
++ * the low-speed time interval that follows. These high values are
++ * stored in bfqq->soft_rt_next_start after each invocation of
++ * this function. As a consequence, if the last value of
++ * bfqq->soft_rt_next_start is constantly used to lower-bound the
++ * next value that this function may return, then, from the very
++ * beginning of a low-speed interval, bfqq->soft_rt_next_start is
++ * likely to be constantly kept so high that any I/O request
++ * issued during the low-speed interval is considered as arriving
++ * to soon for the application to be deemed as soft
++ * real-time. Then, in the high-speed interval that follows, the
++ * application will not be deemed as soft real-time, just because
++ * it will do I/O at a high speed. And so on.
++ *
++ * Getting back to the filtering in item (a), in the following two
++ * cases this filtering might be easily passed by a greedy
++ * application, if the reference quantity was just
++ * bfqd->bfq_slice_idle:
++ * 1) HZ is so low that the duration of a jiffy is comparable to or
++ * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
++ * devices with HZ=100. The time granularity may be so coarse
++ * that the approximation, in jiffies, of bfqd->bfq_slice_idle
++ * is rather lower than the exact value.
+ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
+ * for a while, then suddenly 'jump' by several units to recover the lost
+ * increments. This seems to happen, e.g., inside virtual machines.
+- * To address this issue, we do not use as a reference time interval just
+- * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
+- * particular we add the minimum number of jiffies for which the filter
+- * seems to be quite precise also in embedded systems and KVM/QEMU virtual
+- * machines.
++ * To address this issue, in the filtering in (a) we do not use as a
++ * reference time interval just bfqd->bfq_slice_idle, but
++ * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
++ * minimum number of jiffies for which the filter seems to be quite
++ * precise also in embedded systems and KVM/QEMU virtual machines.
+ */
+ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+@@ -3243,10 +3284,11 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ jiffies_to_msecs(HZ * bfqq->service_from_backlogged /
+ bfqd->bfq_wr_max_softrt_rate));
+
+- return max(bfqq->last_idle_bklogged +
+- HZ * bfqq->service_from_backlogged /
+- bfqd->bfq_wr_max_softrt_rate,
+- jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
++ return max3(bfqq->soft_rt_next_start,
++ bfqq->last_idle_bklogged +
++ HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
+ }
+
+ /**
+@@ -4395,10 +4437,15 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfqq->split_time = bfq_smallest_from_now();
+
+ /*
+- * Set to the value for which bfqq will not be deemed as
+- * soft rt when it becomes backlogged.
++ * To not forget the possibly high bandwidth consumed by a
++ * process/queue in the recent past,
++ * bfq_bfqq_softrt_next_start() returns a value at least equal
++ * to the current value of bfqq->soft_rt_next_start (see
++ * comments on bfq_bfqq_softrt_next_start). Set
++ * soft_rt_next_start to now, to mean that bfqq has consumed
++ * no bandwidth so far.
+ */
+- bfqq->soft_rt_next_start = bfq_greatest_from_now();
++ bfqq->soft_rt_next_start = jiffies;
+
+ /* first request is almost certainly seeky */
+ bfqq->seek_history = 1;
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 4bbd7f4c0154..987dc255c82c 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -3089,37 +3089,78 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * whereas soft_rt_next_start is set to infinity for applications that do
+ * not.
+ *
+- * Unfortunately, even a greedy application may happen to behave in an
+- * isochronous way if the CPU load is high. In fact, the application may
+- * stop issuing requests while the CPUs are busy serving other processes,
+- * then restart, then stop again for a while, and so on. In addition, if
+- * the disk achieves a low enough throughput with the request pattern
+- * issued by the application (e.g., because the request pattern is random
+- * and/or the device is slow), then the application may meet the above
+- * bandwidth requirement too. To prevent such a greedy application to be
+- * deemed as soft real-time, a further rule is used in the computation of
+- * soft_rt_next_start: soft_rt_next_start must be higher than the current
+- * time plus the maximum time for which the arrival of a request is waited
+- * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
+- * This filters out greedy applications, as the latter issue instead their
+- * next request as soon as possible after the last one has been completed
+- * (in contrast, when a batch of requests is completed, a soft real-time
+- * application spends some time processing data).
++ * Unfortunately, even a greedy (i.e., I/O-bound) application may
++ * happen to meet, occasionally or systematically, both the above
++ * bandwidth and isochrony requirements. This may happen at least in
++ * the following circumstances. First, if the CPU load is high. The
++ * application may stop issuing requests while the CPUs are busy
++ * serving other processes, then restart, then stop again for a while,
++ * and so on. The other circumstances are related to the storage
++ * device: the storage device is highly loaded or reaches a low-enough
++ * throughput with the I/O of the application (e.g., because the I/O
++ * is random and/or the device is slow). In all these cases, the
++ * I/O of the application may be simply slowed down enough to meet
++ * the bandwidth and isochrony requirements. To reduce the probability
++ * that greedy applications are deemed as soft real-time in these
++ * corner cases, a further rule is used in the computation of
++ * soft_rt_next_start: the return value of this function is forced to
++ * be higher than the maximum between the following two quantities.
+ *
+- * Unfortunately, the last filter may easily generate false positives if
+- * only bfqd->bfq_slice_idle is used as a reference time interval and one
+- * or both the following cases occur:
+- * 1) HZ is so low that the duration of a jiffy is comparable to or higher
+- * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
+- * HZ=100.
++ * (a) Current time plus: (1) the maximum time for which the arrival
++ * of a request is waited for when a sync queue becomes idle,
++ * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
++ * postpone for a moment the reason for adding a few extra
++ * jiffies; we get back to it after next item (b). Lower-bounding
++ * the return value of this function with the current time plus
++ * bfqd->bfq_slice_idle tends to filter out greedy applications,
++ * because the latter issue their next request as soon as possible
++ * after the last one has been completed. In contrast, a soft
++ * real-time application spends some time processing data, after a
++ * batch of its requests has been completed.
++ *
++ * (b) Current value of bfqq->soft_rt_next_start. As pointed out
++ * above, greedy applications may happen to meet both the
++ * bandwidth and isochrony requirements under heavy CPU or
++ * storage-device load. In more detail, in these scenarios, these
++ * applications happen, only for limited time periods, to do I/O
++ * slowly enough to meet all the requirements described so far,
++ * including the filtering in above item (a). These slow-speed
++ * time intervals are usually interspersed between other time
++ * intervals during which these applications do I/O at a very high
++ * speed. Fortunately, exactly because of the high speed of the
++ * I/O in the high-speed intervals, the values returned by this
++ * function happen to be so high, near the end of any such
++ * high-speed interval, to be likely to fall *after* the end of
++ * the low-speed time interval that follows. These high values are
++ * stored in bfqq->soft_rt_next_start after each invocation of
++ * this function. As a consequence, if the last value of
++ * bfqq->soft_rt_next_start is constantly used to lower-bound the
++ * next value that this function may return, then, from the very
++ * beginning of a low-speed interval, bfqq->soft_rt_next_start is
++ * likely to be constantly kept so high that any I/O request
++ * issued during the low-speed interval is considered as arriving
++ * to soon for the application to be deemed as soft
++ * real-time. Then, in the high-speed interval that follows, the
++ * application will not be deemed as soft real-time, just because
++ * it will do I/O at a high speed. And so on.
++ *
++ * Getting back to the filtering in item (a), in the following two
++ * cases this filtering might be easily passed by a greedy
++ * application, if the reference quantity was just
++ * bfqd->bfq_slice_idle:
++ * 1) HZ is so low that the duration of a jiffy is comparable to or
++ * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
++ * devices with HZ=100. The time granularity may be so coarse
++ * that the approximation, in jiffies, of bfqd->bfq_slice_idle
++ * is rather lower than the exact value.
+ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
+ * for a while, then suddenly 'jump' by several units to recover the lost
+ * increments. This seems to happen, e.g., inside virtual machines.
+- * To address this issue, we do not use as a reference time interval just
+- * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
+- * particular we add the minimum number of jiffies for which the filter
+- * seems to be quite precise also in embedded systems and KVM/QEMU virtual
+- * machines.
++ * To address this issue, in the filtering in (a) we do not use as a
++ * reference time interval just bfqd->bfq_slice_idle, but
++ * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
++ * minimum number of jiffies for which the filter seems to be quite
++ * precise also in embedded systems and KVM/QEMU virtual machines.
+ */
+ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+@@ -3131,10 +3172,11 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ jiffies_to_msecs(HZ * bfqq->service_from_backlogged /
+ bfqd->bfq_wr_max_softrt_rate));
+
+- return max(bfqq->last_idle_bklogged +
+- HZ * bfqq->service_from_backlogged /
+- bfqd->bfq_wr_max_softrt_rate,
+- jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
++ return max3(bfqq->soft_rt_next_start,
++ bfqq->last_idle_bklogged +
++ HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
+ }
+
+ /**
+@@ -4167,10 +4209,15 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfqq->split_time = bfq_smallest_from_now();
+
+ /*
+- * Set to the value for which bfqq will not be deemed as
+- * soft rt when it becomes backlogged.
++ * To not forget the possibly high bandwidth consumed by a
++ * process/queue in the recent past,
++ * bfq_bfqq_softrt_next_start() returns a value at least equal
++ * to the current value of bfqq->soft_rt_next_start (see
++ * comments on bfq_bfqq_softrt_next_start). Set
++ * soft_rt_next_start to now, to mean that bfqq has consumed
++ * no bandwidth so far.
+ */
+- bfqq->soft_rt_next_start = bfq_greatest_from_now();
++ bfqq->soft_rt_next_start = jiffies;
+
+ /* first request is almost certainly seeky */
+ bfqq->seek_history = 1;
+
+From 2a09b505660c81dbb80a5d68c9bc558c326d041f Mon Sep 17 00:00:00 2001
+From: Chiara Bruschi <bruschi.chiara@outlook.it>
+Date: Thu, 7 Dec 2017 09:57:19 +0100
+Subject: [PATCH 03/23] block, bfq-mq: fix occurrences of request
+ prepare/finish methods' old names
+
+Commits 'b01f1fa3bb19' (Port of "blk-mq-sched: unify request prepare
+methods") and 'cc10d2d7d2c1' (Port of "blk-mq-sched: unify request
+finished methods") changed the old names of current bfq_prepare_request
+and bfq_finish_request methods, but left them unchanged elsewhere in
+the code (related comments, part of function name bfq_put_rq_priv_body).
+
+This commit fixes every occurrence of the old names of these methods
+by changing them into the current names.
+
+Fixes: b01f1fa3bb19 (Port of "blk-mq-sched: unify request prepare methods")
+Fixes: cc10d2d7d2c1 (Port of "blk-mq-sched: unify request finished methods")
+Reviewed-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Federico Motta <federico@willer.it>
+Signed-off-by: Chiara Bruschi <bruschi.chiara@outlook.it>
+---
+ block/bfq-mq-iosched.c | 38 +++++++++++++++++++-------------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 4d06d900f45e..8f8d5eccb016 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4018,20 +4018,20 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ /*
+ * TESTING: reset DISP_LIST flag, because: 1)
+ * this rq this request has passed through
+- * get_rq_private, 2) then it will have
+- * put_rq_private invoked on it, and 3) in
+- * put_rq_private we use this flag to check
+- * that put_rq_private is not invoked on
+- * requests for which get_rq_private has been
+- * invoked.
++ * bfq_prepare_request, 2) then it will have
++ * bfq_finish_request invoked on it, and 3) in
++ * bfq_finish_request we use this flag to check
++ * that bfq_finish_request is not invoked on
++ * requests for which bfq_prepare_request has
++ * been invoked.
+ */
+ rq->rq_flags &= ~RQF_DISP_LIST;
+ goto inc_in_driver_start_rq;
+ }
+
+ /*
+- * We exploit the put_rq_private hook to decrement
+- * rq_in_driver, but put_rq_private will not be
++ * We exploit the bfq_finish_request hook to decrement
++ * rq_in_driver, but bfq_finish_request will not be
+ * invoked on this request. So, to avoid unbalance,
+ * just start this request, without incrementing
+ * rq_in_driver. As a negative consequence,
+@@ -4040,14 +4040,14 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ * bfq_schedule_dispatch to be invoked uselessly.
+ *
+ * As for implementing an exact solution, the
+- * put_request hook, if defined, is probably invoked
+- * also on this request. So, by exploiting this hook,
+- * we could 1) increment rq_in_driver here, and 2)
+- * decrement it in put_request. Such a solution would
+- * let the value of the counter be always accurate,
+- * but it would entail using an extra interface
+- * function. This cost seems higher than the benefit,
+- * being the frequency of non-elevator-private
++ * bfq_finish_request hook, if defined, is probably
++ * invoked also on this request. So, by exploiting
++ * this hook, we could 1) increment rq_in_driver here,
++ * and 2) decrement it in bfq_finish_request. Such a
++ * solution would let the value of the counter be
++ * always accurate, but it would entail using an extra
++ * interface function. This cost seems higher than the
++ * benefit, being the frequency of non-elevator-private
+ * requests very low.
+ */
+ goto start_rq;
+@@ -4963,7 +4963,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ }
+ }
+
+-static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
++static void bfq_finish_request_body(struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "put_request_body: allocated %d", bfqq->allocated);
+@@ -5019,7 +5019,7 @@ static void bfq_finish_request(struct request *rq)
+ spin_lock_irqsave(&bfqd->lock, flags);
+
+ bfq_completed_request(bfqq, bfqd);
+- bfq_put_rq_priv_body(bfqq);
++ bfq_finish_request_body(bfqq);
+
+ spin_unlock_irqrestore(&bfqd->lock, flags);
+ } else {
+@@ -5042,7 +5042,7 @@ static void bfq_finish_request(struct request *rq)
+ bfqg_stats_update_io_remove(bfqq_group(bfqq),
+ rq->cmd_flags);
+ }
+- bfq_put_rq_priv_body(bfqq);
++ bfq_finish_request_body(bfqq);
+ }
+
+ rq->elv.priv[0] = NULL;
+
+From 4df19943c3a767df453abea3d2ac3433c3326ce0 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 16 Nov 2017 18:38:13 +0100
+Subject: [PATCH 04/23] block, bfq-sq, bfq-mq: add missing rq_pos_tree update
+ on rq removal
+
+If two processes do I/O close to each other, then BFQ merges the
+bfq_queues associated with these processes, to get a more sequential
+I/O, and thus a higher throughput. In this respect, to detect whether
+two processes are doing I/O close to each other, BFQ keeps a list of
+the head-of-line I/O requests of all active bfq_queues. The list is
+ordered by initial sectors, and implemented through a red-black tree
+(rq_pos_tree).
+
+Unfortunately, the update of the rq_pos_tree was incomplete, because
+the tree was not updated on the removal of the head-of-line I/O
+request of a bfq_queue, in case the queue did not remain empty. This
+commit adds the missing update.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+---
+ block/bfq-mq-iosched.c | 3 +++
+ block/bfq-sq-iosched.c | 3 +++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 8f8d5eccb016..603191c9008f 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -1729,6 +1729,9 @@ static void bfq_remove_request(struct request_queue *q,
+ rb_erase(&bfqq->pos_node, bfqq->pos_root);
+ bfqq->pos_root = NULL;
+ }
++ } else {
++ BUG_ON(!bfqq->next_rq);
++ bfq_pos_tree_add_move(bfqd, bfqq);
+ }
+
+ if (rq->cmd_flags & REQ_META) {
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 987dc255c82c..ea90ace79e49 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -1669,6 +1669,9 @@ static void bfq_remove_request(struct request *rq)
+ rb_erase(&bfqq->pos_node, bfqq->pos_root);
+ bfqq->pos_root = NULL;
+ }
++ } else {
++ BUG_ON(!bfqq->next_rq);
++ bfq_pos_tree_add_move(bfqd, bfqq);
+ }
+
+ if (rq->cmd_flags & REQ_META) {
+
+From b844e345140aaea957d84a21d2aa67588b020cd5 Mon Sep 17 00:00:00 2001
+From: Angelo Ruocco <angeloruocco90@gmail.com>
+Date: Mon, 18 Dec 2017 08:28:08 +0100
+Subject: [PATCH 05/23] block, bfq-sq, bfq-mq: check low_latency flag in
+ bfq_bfqq_save_state()
+
+A just-created bfq_queue will certainly be deemed as interactive on
+the arrival of its first I/O request, if the low_latency flag is
+set. Yet, if the queue is merged with another queue on the arrival of
+its first I/O request, it will not have the chance to be flagged as
+interactive. Nevertheless, if the queue is then split soon enough, it
+has to be flagged as interactive after the split.
+
+To handle this early-merge scenario correctly, BFQ saves the state of
+the queue, on the merge, as if the latter had already been deemed
+interactive. So, if the queue is split soon, it will get
+weight-raised, because the previous state of the queue is resumed on
+the split.
+
+Unfortunately, in the act of saving the state of the newly-created
+queue, BFQ doesn't check whether the low_latency flag is set, and this
+causes early-merged queues to be then weight-raised, on queue splits,
+even if low_latency is off. This commit addresses this problem by
+adding the missing check.
+
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 3 ++-
+ block/bfq-sq-iosched.c | 3 ++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 603191c9008f..ff9776c8836a 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -2231,7 +2231,8 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
+ if (unlikely(bfq_bfqq_just_created(bfqq) &&
+- !bfq_bfqq_in_large_burst(bfqq))) {
++ !bfq_bfqq_in_large_burst(bfqq) &&
++ bfqq->bfqd->low_latency)) {
+ /*
+ * bfqq being merged ritgh after being created: bfqq
+ * would have deserved interactive weight raising, but
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index ea90ace79e49..3a2d764e760c 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -2109,7 +2109,8 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
+ if (unlikely(bfq_bfqq_just_created(bfqq) &&
+- !bfq_bfqq_in_large_burst(bfqq))) {
++ !bfq_bfqq_in_large_burst(bfqq) &&
++ bfqq->bfqd->low_latency)) {
+ /*
+ * bfqq being merged ritgh after being created: bfqq
+ * would have deserved interactive weight raising, but
+
+From 4cc6896fe1de2e0b4de151a6e70658f10b9ec2fa Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Fri, 27 Oct 2017 11:12:14 +0200
+Subject: [PATCH 06/23] block, bfq-sq, bfq-mq: let a queue be merged only
+ shortly after starting I/O
+
+In BFQ and CFQ, two processes are said to be cooperating if they do
+I/O in such a way that the union of their I/O requests yields a
+sequential I/O pattern. To get such a sequential I/O pattern out of
+the non-sequential pattern of each cooperating process, BFQ and CFQ
+merge the queues associated with these processes. In more detail,
+cooperating processes, and thus their associated queues, usually
+start, or restart, to do I/O shortly after each other. This is the
+case, e.g., for the I/O threads of KVM/QEMU and of the dump
+utility. Basing on this assumption, this commit allows a bfq_queue to
+be merged only during a short time interval (100ms) after it starts,
+or re-starts, to do I/O. This filtering provides two important
+benefits.
+
+First, it greatly reduces the probability that two non-cooperating
+processes have their queues merged by mistake, if they just happen to
+do I/O close to each other for a short time interval. These spurious
+merges cause loss of service guarantees. A low-weight bfq_queue may
+unjustly get more than its expected share of the throughput: if such a
+low-weight queue is merged with a high-weight queue, then the I/O for
+the low-weight queue is served as if the queue had a high weight. This
+may damage other high-weight queues unexpectedly. For instance,
+because of this issue, lxterminal occasionally took 7.5 seconds to
+start, instead of 6.5 seconds, when some sequential readers and
+writers did I/O in the background on a FUJITSU MHX2300BT HDD. The
+reason is that the bfq_queues associated with some of the readers or
+the writers were merged with the high-weight queues of some processes
+that had to do some urgent but little I/O. The readers then exploited
+the inherited high weight for all or most of their I/O, during the
+start-up of terminal. The filtering introduced by this commit
+eliminated any outlier caused by spurious queue merges in our start-up
+time tests.
+
+This filtering also provides a little boost of the throughput
+sustainable by BFQ: 3-4%, depending on the CPU. The reason is that,
+once a bfq_queue cannot be merged any longer, this commit makes BFQ
+stop updating the data needed to handle merging for the queue.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+---
+ block/bfq-mq-iosched.c | 64 +++++++++++++++++++++++++++++++++++++++++---------
+ block/bfq-mq.h | 1 +
+ block/bfq-sched.c | 4 ++++
+ block/bfq-sq-iosched.c | 64 +++++++++++++++++++++++++++++++++++++++++---------
+ block/bfq.h | 2 ++
+ 5 files changed, 113 insertions(+), 22 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index ff9776c8836a..8b17b25a3c30 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -119,6 +119,20 @@ static const int bfq_async_charge_factor = 10;
+ /* Default timeout values, in jiffies, approximating CFQ defaults. */
+ static const int bfq_timeout = (HZ / 8);
+
++/*
++ * Time limit for merging (see comments in bfq_setup_cooperator). Set
++ * to the slowest value that, in our tests, proved to be effective in
++ * removing false positives, while not causing true positives to miss
++ * queue merging.
++ *
++ * As can be deduced from the low time limit below, queue merging, if
++ * successful, happens at the very beggining of the I/O of the involved
++ * cooperating processes, as a consequence of the arrival of the very
++ * first requests from each cooperator. After that, there is very
++ * little chance to find cooperators.
++ */
++static const unsigned long bfq_merge_time_limit = HZ/10;
++
+ static struct kmem_cache *bfq_pool;
+
+ /* Below this threshold (in ns), we consider thinktime immediate. */
+@@ -389,6 +403,13 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
+ return bfqq;
+ }
+
++static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
++{
++ return bfqq->service_from_backlogged > 0 &&
++ time_is_before_jiffies(bfqq->first_IO_time +
++ bfq_merge_time_limit);
++}
++
+ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ {
+ struct rb_node **p, *parent;
+@@ -399,6 +420,14 @@ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfqq->pos_root = NULL;
+ }
+
++ /*
++ * bfqq cannot be merged any longer (see comments in
++ * bfq_setup_cooperator): no point in adding bfqq into the
++ * position tree.
++ */
++ if (bfq_too_late_for_merging(bfqq))
++ return;
++
+ if (bfq_class_idle(bfqq))
+ return;
+ if (!bfqq->next_rq)
+@@ -2081,6 +2110,13 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
+ struct bfq_queue *new_bfqq)
+ {
++ if (bfq_too_late_for_merging(new_bfqq)) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "[%s] too late for bfq%d to be merged",
++ __func__, new_bfqq->pid);
++ return false;
++ }
++
+ if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
+ (bfqq->ioprio_class != new_bfqq->ioprio_class))
+ return false;
+@@ -2149,6 +2185,23 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ {
+ struct bfq_queue *in_service_bfqq, *new_bfqq;
+
++ /*
++ * Prevent bfqq from being merged if it has been created too
++ * long ago. The idea is that true cooperating processes, and
++ * thus their associated bfq_queues, are supposed to be
++ * created shortly after each other. This is the case, e.g.,
++ * for KVM/QEMU and dump I/O threads. Basing on this
++ * assumption, the following filtering greatly reduces the
++ * probability that two non-cooperating processes, which just
++ * happen to do close I/O for some short time interval, have
++ * their queues merged by mistake.
++ */
++ if (bfq_too_late_for_merging(bfqq)) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have looked for coop, but too late");
++ return NULL;
++ }
++
+ if (bfqq->new_bfqq)
+ return bfqq->new_bfqq;
+
+@@ -3338,17 +3391,6 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
+ */
+ slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
+
+- /*
+- * Increase service_from_backlogged before next statement,
+- * because the possible next invocation of
+- * bfq_bfqq_charge_time would likely inflate
+- * entity->service. In contrast, service_from_backlogged must
+- * contain real service, to enable the soft real-time
+- * heuristic to correctly compute the bandwidth consumed by
+- * bfqq.
+- */
+- bfqq->service_from_backlogged += entity->service;
+-
+ /*
+ * As above explained, charge slow (typically seeky) and
+ * timed-out queues with the time and not the service
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 1cb05bb853d2..a5947b203ef2 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -337,6 +337,7 @@ struct bfq_queue {
+ unsigned long wr_start_at_switch_to_srt;
+
+ unsigned long split_time; /* time of last split */
++ unsigned long first_IO_time; /* time of first I/O for this queue */
+ };
+
+ /**
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 616c0692335a..9d261dd428e4 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -939,6 +939,10 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
+ struct bfq_entity *entity = &bfqq->entity;
+ struct bfq_service_tree *st;
+
++ if (!bfqq->service_from_backlogged)
++ bfqq->first_IO_time = jiffies;
++
++ bfqq->service_from_backlogged += served;
+ for_each_entity(entity) {
+ st = bfq_entity_service_tree(entity);
+
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 3a2d764e760c..cd00a41ca35d 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -113,6 +113,20 @@ static const int bfq_async_charge_factor = 10;
+ /* Default timeout values, in jiffies, approximating CFQ defaults. */
+ static const int bfq_timeout = (HZ / 8);
+
++/*
++ * Time limit for merging (see comments in bfq_setup_cooperator). Set
++ * to the slowest value that, in our tests, proved to be effective in
++ * removing false positives, while not causing true positives to miss
++ * queue merging.
++ *
++ * As can be deduced from the low time limit below, queue merging, if
++ * successful, happens at the very beggining of the I/O of the involved
++ * cooperating processes, as a consequence of the arrival of the very
++ * first requests from each cooperator. After that, there is very
++ * little chance to find cooperators.
++ */
++static const unsigned long bfq_merge_time_limit = HZ/10;
++
+ static struct kmem_cache *bfq_pool;
+
+ /* Below this threshold (in ns), we consider thinktime immediate. */
+@@ -351,6 +365,13 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
+ return bfqq;
+ }
+
++static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
++{
++ return bfqq->service_from_backlogged > 0 &&
++ time_is_before_jiffies(bfqq->first_IO_time +
++ bfq_merge_time_limit);
++}
++
+ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ {
+ struct rb_node **p, *parent;
+@@ -361,6 +382,14 @@ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfqq->pos_root = NULL;
+ }
+
++ /*
++ * bfqq cannot be merged any longer (see comments in
++ * bfq_setup_cooperator): no point in adding bfqq into the
++ * position tree.
++ */
++ if (bfq_too_late_for_merging(bfqq))
++ return;
++
+ if (bfq_class_idle(bfqq))
+ return;
+ if (!bfqq->next_rq)
+@@ -1960,6 +1989,13 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
+ struct bfq_queue *new_bfqq)
+ {
++ if (bfq_too_late_for_merging(new_bfqq)) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "[%s] too late for bfq%d to be merged",
++ __func__, new_bfqq->pid);
++ return false;
++ }
++
+ if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
+ (bfqq->ioprio_class != new_bfqq->ioprio_class))
+ return false;
+@@ -2028,6 +2064,23 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ {
+ struct bfq_queue *in_service_bfqq, *new_bfqq;
+
++ /*
++ * Prevent bfqq from being merged if it has been created too
++ * long ago. The idea is that true cooperating processes, and
++ * thus their associated bfq_queues, are supposed to be
++ * created shortly after each other. This is the case, e.g.,
++ * for KVM/QEMU and dump I/O threads. Basing on this
++ * assumption, the following filtering greatly reduces the
++ * probability that two non-cooperating processes, which just
++ * happen to do close I/O for some short time interval, have
++ * their queues merged by mistake.
++ */
++ if (bfq_too_late_for_merging(bfqq)) {
++ bfq_log_bfqq(bfqd, bfqq,
++ "would have looked for coop, but too late");
++ return NULL;
++ }
++
+ if (bfqq->new_bfqq)
+ return bfqq->new_bfqq;
+
+@@ -3226,17 +3279,6 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
+ */
+ slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
+
+- /*
+- * Increase service_from_backlogged before next statement,
+- * because the possible next invocation of
+- * bfq_bfqq_charge_time would likely inflate
+- * entity->service. In contrast, service_from_backlogged must
+- * contain real service, to enable the soft real-time
+- * heuristic to correctly compute the bandwidth consumed by
+- * bfqq.
+- */
+- bfqq->service_from_backlogged += entity->service;
+-
+ /*
+ * As above explained, charge slow (typically seeky) and
+ * timed-out queues with the time and not the service
+diff --git a/block/bfq.h b/block/bfq.h
+index 47cd4d5a8c32..59539adc00a5 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -329,6 +329,8 @@ struct bfq_queue {
+ unsigned long wr_start_at_switch_to_srt;
+
+ unsigned long split_time; /* time of last split */
++
++ unsigned long first_IO_time; /* time of first I/O for this queue */
+ };
+
+ /**
+
+From 157f39c43ab182280634cd4f6335d0187b3741a0 Mon Sep 17 00:00:00 2001
+From: Angelo Ruocco <angeloruocco90@gmail.com>
+Date: Mon, 11 Dec 2017 14:19:54 +0100
+Subject: [PATCH 07/23] block, bfq-sq, bfq-mq: remove superfluous check in
+ queue-merging setup
+
+When two or more processes do I/O in a way that the their requests are
+sequential in respect to one another, BFQ merges the bfq_queues associated
+with the processes. This way the overall I/O pattern becomes sequential,
+and thus there is a boost in througput.
+These cooperating processes usually start or restart to do I/O shortly
+after each other. So, in order to avoid merging non-cooperating processes,
+BFQ ensures that none of these queues has been in weight raising for too
+long.
+
+In this respect, from commit "block, bfq-sq, bfq-mq: let a queue be merged
+only shortly after being created", BFQ checks whether any queue (and not
+only weight-raised ones) is doing I/O continuously from too long to be
+merged.
+
+This new additional check makes the first one useless: a queue doing
+I/O from long enough, if being weight-raised, is also a queue in
+weight raising for too long to be merged. Accordingly, this commit
+removes the first check.
+
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.com>
+---
+ block/bfq-mq-iosched.c | 53 ++++----------------------------------------------
+ block/bfq-sq-iosched.c | 53 ++++----------------------------------------------
+ 2 files changed, 8 insertions(+), 98 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 8b17b25a3c30..f5db8613a70f 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -2140,20 +2140,6 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
+ return true;
+ }
+
+-/*
+- * If this function returns true, then bfqq cannot be merged. The idea
+- * is that true cooperation happens very early after processes start
+- * to do I/O. Usually, late cooperations are just accidental false
+- * positives. In case bfqq is weight-raised, such false positives
+- * would evidently degrade latency guarantees for bfqq.
+- */
+-static bool wr_from_too_long(struct bfq_queue *bfqq)
+-{
+- return bfqq->wr_coeff > 1 &&
+- time_is_before_jiffies(bfqq->last_wr_start_finish +
+- msecs_to_jiffies(100));
+-}
+-
+ /*
+ * Attempt to schedule a merge of bfqq with the currently in-service
+ * queue or with a close queue among the scheduled queues. Return
+@@ -2167,11 +2153,6 @@ static bool wr_from_too_long(struct bfq_queue *bfqq)
+ * to maintain. Besides, in such a critical condition as an out of memory,
+ * the benefits of queue merging may be little relevant, or even negligible.
+ *
+- * Weight-raised queues can be merged only if their weight-raising
+- * period has just started. In fact cooperating processes are usually
+- * started together. Thus, with this filter we avoid false positives
+- * that would jeopardize low-latency guarantees.
+- *
+ * WARNING: queue merging may impair fairness among non-weight raised
+ * queues, for at least two reasons: 1) the original weight of a
+ * merged queue may change during the merged state, 2) even being the
+@@ -2205,15 +2186,7 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ if (bfqq->new_bfqq)
+ return bfqq->new_bfqq;
+
+- if (io_struct && wr_from_too_long(bfqq) &&
+- likely(bfqq != &bfqd->oom_bfqq))
+- bfq_log_bfqq(bfqd, bfqq,
+- "would have looked for coop, but bfq%d wr",
+- bfqq->pid);
+-
+- if (!io_struct ||
+- wr_from_too_long(bfqq) ||
+- unlikely(bfqq == &bfqd->oom_bfqq))
++ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
+ return NULL;
+
+ /* If there is only one backlogged queue, don't search. */
+@@ -2223,17 +2196,8 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ in_service_bfqq = bfqd->in_service_queue;
+
+ if (in_service_bfqq && in_service_bfqq != bfqq &&
+- wr_from_too_long(in_service_bfqq)
+- && likely(in_service_bfqq == &bfqd->oom_bfqq))
+- bfq_log_bfqq(bfqd, bfqq,
+- "would have tried merge with in-service-queue, but wr");
+-
+- if (!in_service_bfqq || in_service_bfqq == bfqq
+- || wr_from_too_long(in_service_bfqq) ||
+- unlikely(in_service_bfqq == &bfqd->oom_bfqq))
+- goto check_scheduled;
+-
+- if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++ likely(in_service_bfqq != &bfqd->oom_bfqq) &&
++ bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
+ bfqq->entity.parent == in_service_bfqq->entity.parent &&
+ bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
+ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
+@@ -2245,21 +2209,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * queues. The only thing we need is that the bio/request is not
+ * NULL, as we need it to establish whether a cooperator exists.
+ */
+-check_scheduled:
+ new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
+ bfq_io_struct_pos(io_struct, request));
+
+ BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent);
+
+- if (new_bfqq && wr_from_too_long(new_bfqq) &&
+- likely(new_bfqq != &bfqd->oom_bfqq) &&
+- bfq_may_be_close_cooperator(bfqq, new_bfqq))
+- bfq_log_bfqq(bfqd, bfqq,
+- "would have merged with bfq%d, but wr",
+- new_bfqq->pid);
+-
+- if (new_bfqq && !wr_from_too_long(new_bfqq) &&
+- likely(new_bfqq != &bfqd->oom_bfqq) &&
++ if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
+ bfq_may_be_close_cooperator(bfqq, new_bfqq))
+ return bfq_setup_merge(bfqq, new_bfqq);
+
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index cd00a41ca35d..d8a358e5e284 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -2019,20 +2019,6 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
+ return true;
+ }
+
+-/*
+- * If this function returns true, then bfqq cannot be merged. The idea
+- * is that true cooperation happens very early after processes start
+- * to do I/O. Usually, late cooperations are just accidental false
+- * positives. In case bfqq is weight-raised, such false positives
+- * would evidently degrade latency guarantees for bfqq.
+- */
+-static bool wr_from_too_long(struct bfq_queue *bfqq)
+-{
+- return bfqq->wr_coeff > 1 &&
+- time_is_before_jiffies(bfqq->last_wr_start_finish +
+- msecs_to_jiffies(100));
+-}
+-
+ /*
+ * Attempt to schedule a merge of bfqq with the currently in-service
+ * queue or with a close queue among the scheduled queues. Return
+@@ -2046,11 +2032,6 @@ static bool wr_from_too_long(struct bfq_queue *bfqq)
+ * to maintain. Besides, in such a critical condition as an out of memory,
+ * the benefits of queue merging may be little relevant, or even negligible.
+ *
+- * Weight-raised queues can be merged only if their weight-raising
+- * period has just started. In fact cooperating processes are usually
+- * started together. Thus, with this filter we avoid false positives
+- * that would jeopardize low-latency guarantees.
+- *
+ * WARNING: queue merging may impair fairness among non-weight raised
+ * queues, for at least two reasons: 1) the original weight of a
+ * merged queue may change during the merged state, 2) even being the
+@@ -2084,15 +2065,7 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ if (bfqq->new_bfqq)
+ return bfqq->new_bfqq;
+
+- if (io_struct && wr_from_too_long(bfqq) &&
+- likely(bfqq != &bfqd->oom_bfqq))
+- bfq_log_bfqq(bfqd, bfqq,
+- "would have looked for coop, but bfq%d wr",
+- bfqq->pid);
+-
+- if (!io_struct ||
+- wr_from_too_long(bfqq) ||
+- unlikely(bfqq == &bfqd->oom_bfqq))
++ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
+ return NULL;
+
+ /* If there is only one backlogged queue, don't search. */
+@@ -2102,17 +2075,8 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ in_service_bfqq = bfqd->in_service_queue;
+
+ if (in_service_bfqq && in_service_bfqq != bfqq &&
+- bfqd->in_service_bic && wr_from_too_long(in_service_bfqq)
+- && likely(in_service_bfqq == &bfqd->oom_bfqq))
+- bfq_log_bfqq(bfqd, bfqq,
+- "would have tried merge with in-service-queue, but wr");
+-
+- if (!in_service_bfqq || in_service_bfqq == bfqq ||
+- !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) ||
+- unlikely(in_service_bfqq == &bfqd->oom_bfqq))
+- goto check_scheduled;
+-
+- if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++ likely(in_service_bfqq != &bfqd->oom_bfqq) &&
++ bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
+ bfqq->entity.parent == in_service_bfqq->entity.parent &&
+ bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
+ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
+@@ -2124,21 +2088,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * queues. The only thing we need is that the bio/request is not
+ * NULL, as we need it to establish whether a cooperator exists.
+ */
+-check_scheduled:
+ new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
+ bfq_io_struct_pos(io_struct, request));
+
+ BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent);
+
+- if (new_bfqq && wr_from_too_long(new_bfqq) &&
+- likely(new_bfqq != &bfqd->oom_bfqq) &&
+- bfq_may_be_close_cooperator(bfqq, new_bfqq))
+- bfq_log_bfqq(bfqd, bfqq,
+- "would have merged with bfq%d, but wr",
+- new_bfqq->pid);
+-
+- if (new_bfqq && !wr_from_too_long(new_bfqq) &&
+- likely(new_bfqq != &bfqd->oom_bfqq) &&
++ if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
+ bfq_may_be_close_cooperator(bfqq, new_bfqq))
+ return bfq_setup_merge(bfqq, new_bfqq);
+
+
+From b82eb91d87f172aba7eb5eb98e8d5e2a621adf51 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 30 Nov 2017 17:48:28 +0100
+Subject: [PATCH 08/23] block, bfq-sq, bfq-mq: increase threshold to deem I/O
+ as random
+
+If two processes do I/O close to each other, i.e., are cooperating
+processes in BFQ (and CFQ'S) nomenclature, then BFQ merges their
+associated bfq_queues, so as to get sequential I/O from the union of
+the I/O requests of the processes, and thus reach a higher
+throughput. A merged queue is then split if its I/O stops being
+sequential. In this respect, BFQ deems the I/O of a bfq_queue as
+(mostly) sequential only if less than 4 I/O requests are random, out
+of the last 32 requests inserted into the queue.
+
+Unfortunately, extensive testing (with the interleaved_io benchmark of
+the S suite [1], and with real applications spawning cooperating
+processes) has clearly shown that, with such a low threshold, only a
+rather low I/O throughput may be reached when several cooperating
+processes do I/O. In particular, the outcome of each test run was
+bimodal: if queue merging occurred and was stable during the test,
+then the throughput was close to the peak rate of the storage device,
+otherwise the throughput was arbitrarily low (usually around 1/10 of
+the peak rate with a rotational device). The probability to get the
+unlucky outcomes grew with the number of cooperating processes: it was
+already significant with 5 processes, and close to one with 7 or more
+processes.
+
+The cause of the low throughput in the unlucky runs was that the
+merged queues containing the I/O of these cooperating processes were
+soon split, because they contained more random I/O requests than those
+tolerated by the 4/32 threshold, but
+- that I/O would have however allowed the storage device to reach
+ peak throughput or almost peak throughput;
+- in contrast, the I/O of these processes, if served individually
+ (from separate queues) yielded a rather low throughput.
+
+So we repeated our tests with increasing values of the threshold,
+until we found the minimum value (19) for which we obtained maximum
+throughput, reliably, with at least up to 9 cooperating
+processes. Then we checked that the use of that higher threshold value
+did not cause any regression for any other benchmark in the suite [1].
+This commit raises the threshold to such a higher value.
+
+[1] https://github.com/Algodev-github/S
+
+Signed-off-by: Angelo Ruocco <angeloruocco90@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 2 +-
+ block/bfq-sq-iosched.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index f5db8613a70f..cb5f49ddecb6 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -145,7 +145,7 @@ static struct kmem_cache *bfq_pool;
+ #define BFQQ_SEEK_THR (sector_t)(8 * 100)
+ #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
+ #define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
+-#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
++#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
+
+ /* Min number of samples required to perform peak-rate update */
+ #define BFQ_RATE_MIN_SAMPLES 32
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index d8a358e5e284..e1c6dc651be1 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -139,7 +139,7 @@ static struct kmem_cache *bfq_pool;
+ #define BFQQ_SEEK_THR (sector_t)(8 * 100)
+ #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
+ #define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
+-#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
++#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
+
+ /* Min number of samples required to perform peak-rate update */
+ #define BFQ_RATE_MIN_SAMPLES 32
+
+From b739dda4e4b3a1cbbc905f86f9fbb0860b068ce7 Mon Sep 17 00:00:00 2001
+From: Chiara Bruschi <bruschi.chiara@outlook.it>
+Date: Mon, 11 Dec 2017 18:55:26 +0100
+Subject: [PATCH 09/23] block, bfq-sq, bfq-mq: specify usage condition of
+ delta_us in bfq_log_bfqq call
+
+Inside the function bfq_completed_request the value of a variable
+called delta_us is computed as current request completion time.
+delta_us is used inside a call to the function bfq_log_bfqq as divisor
+in a division operation to compute a rate value, but no check makes
+sure that delta_us has non-zero value. A divisor with value 0 leads
+to a division error that could result in a kernel oops (therefore
+unstable/unreliable system state) and consequently cause kernel panic
+if resources are unavailable after the system fault.
+
+This commit fixes this call to bfq_log_bfqq specifying the condition
+that allows delta_us to be safely used as divisor.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Chiara Bruschi <bruschi.chiara@outlook.it>
+---
+ block/bfq-mq-iosched.c | 5 ++++-
+ block/bfq-sq-iosched.c | 5 ++++-
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index cb5f49ddecb6..6ce2c0789046 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4904,9 +4904,12 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ bfq_log_bfqq(bfqd, bfqq,
+ "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
++ delta_us > 0 ?
+ (USEC_PER_SEC*
+ (u64)((bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us))
+- >>BFQ_RATE_SHIFT,
++ >>BFQ_RATE_SHIFT :
++ (USEC_PER_SEC*
++ (u64)(bfqd->last_rq_max_size<<BFQ_RATE_SHIFT))>>BFQ_RATE_SHIFT,
+ (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT);
+
+ /*
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index e1c6dc651be1..eff4c4edf5a0 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -4565,9 +4565,12 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
+
+ bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
++ delta_us > 0 ?
+ (USEC_PER_SEC*
+ (u64)((bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us))
+- >>BFQ_RATE_SHIFT,
++ >>BFQ_RATE_SHIFT :
++ (USEC_PER_SEC*
++ (u64)(bfqd->last_rq_max_size<<BFQ_RATE_SHIFT))>>BFQ_RATE_SHIFT,
+ (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT);
+
+ /*
+
+From ae4310c13eca762644734d53074d8456c85e2dec Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Tue, 19 Dec 2017 12:07:12 +0100
+Subject: [PATCH 10/23] block, bfq-mq: limit tags for writes and async I/O
+
+Asynchronous I/O can easily starve synchronous I/O (both sync reads
+and sync writes), by consuming all request tags. Similarly, storms of
+synchronous writes, such as those that sync(2) may trigger, can starve
+synchronous reads. In their turn, these two problems may also cause
+BFQ to loose control on latency for interactive and soft real-time
+applications. For example, on a PLEXTOR PX-256M5S SSD, LibreOffice
+Writer takes 0.6 seconds to start if the device is idle, but it takes
+more than 45 seconds (!) if there are sequential writes in the
+background.
+
+This commit addresses this issue by limiting the maximum percentage of
+tags that asynchronous I/O requests and synchronous write requests can
+consume. In particular, this commit grants a higher threshold to
+synchronous writes, to prevent the latter from being starved by
+asynchronous I/O.
+
+According to the above test, LibreOffice Writer now starts in about
+1.2 seconds on average, regardless of the background workload, and
+apart from some rare outlier. To check this improvement, run, e.g.,
+sudo ./comm_startup_lat.sh bfq-mq 5 5 seq 10 "lowriter --terminate_after_init"
+for the comm_startup_lat benchmark in the S suite [1].
+
+[1] https://github.com/Algodev-github/S
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ block/bfq-mq.h | 12 ++++++++
+ 2 files changed, 89 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 6ce2c0789046..f384f5566672 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -362,6 +362,82 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
+ }
+ }
+
++/*
++ * See the comments on bfq_limit_depth for the purpose of
++ * the depths set in the function.
++ */
++static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
++{
++ bfqd->sb_shift = bt->sb.shift;
++
++ /*
++ * In-word depths if no bfq_queue is being weight-raised:
++ * leaving 25% of tags only for sync reads.
++ *
++ * In next formulas, right-shift the value
++ * (1U<<bfqd->sb_shift), instead of computing directly
++ * (1U<<(bfqd->sb_shift - something)), to be robust against
++ * any possible value of bfqd->sb_shift, without having to
++ * limit 'something'.
++ */
++ /* no more than 50% of tags for async I/O */
++ bfqd->word_depths[0][0] = max((1U<<bfqd->sb_shift)>>1, 1U);
++ /*
++ * no more than 75% of tags for sync writes (25% extra tags
++ * w.r.t. async I/O, to prevent async I/O from starving sync
++ * writes)
++ */
++ bfqd->word_depths[0][1] = max(((1U<<bfqd->sb_shift) * 3)>>2, 1U);
++
++ /*
++ * In-word depths in case some bfq_queue is being weight-
++ * raised: leaving ~63% of tags for sync reads. This is the
++ * highest percentage for which, in our tests, application
++ * start-up times didn't suffer from any regression due to tag
++ * shortage.
++ */
++ /* no more than ~18% of tags for async I/O */
++ bfqd->word_depths[1][0] = max(((1U<<bfqd->sb_shift) * 3)>>4, 1U);
++ /* no more than ~37% of tags for sync writes (~20% extra tags) */
++ bfqd->word_depths[1][1] = max(((1U<<bfqd->sb_shift) * 6)>>4, 1U);
++}
++
++/*
++ * Async I/O can easily starve sync I/O (both sync reads and sync
++ * writes), by consuming all tags. Similarly, storms of sync writes,
++ * such as those that sync(2) may trigger, can starve sync reads.
++ * Limit depths of async I/O and sync writes so as to counter both
++ * problems.
++ */
++static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
++{
++ struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
++ struct bfq_data *bfqd = data->q->elevator->elevator_data;
++ struct sbitmap_queue *bt;
++
++ if (op_is_sync(op) && !op_is_write(op))
++ return;
++
++ if (data->flags & BLK_MQ_REQ_RESERVED) {
++ if (unlikely(!tags->nr_reserved_tags)) {
++ WARN_ON_ONCE(1);
++ return;
++ }
++ bt = &tags->breserved_tags;
++ } else
++ bt = &tags->bitmap_tags;
++
++ if (unlikely(bfqd->sb_shift != bt->sb.shift))
++ bfq_update_depths(bfqd, bt);
++
++ data->shallow_depth =
++ bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
++
++ bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
++ __func__, bfqd->wr_busy_queues, op_is_sync(op),
++ data->shallow_depth);
++}
++
+ static struct bfq_queue *
+ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
+ sector_t sector, struct rb_node **ret_parent,
+@@ -5812,6 +5888,7 @@ static struct elv_fs_entry bfq_attrs[] = {
+
+ static struct elevator_type iosched_bfq_mq = {
+ .ops.mq = {
++ .limit_depth = bfq_limit_depth,
+ .prepare_request = bfq_prepare_request,
+ .finish_request = bfq_finish_request,
+ .exit_icq = bfq_exit_icq,
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index a5947b203ef2..458099ee0308 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -619,6 +619,18 @@ struct bfq_data {
+ struct bfq_queue *bio_bfqq;
+ /* Extra flag used only for TESTING */
+ bool bio_bfqq_set;
++
++ /*
++ * Cached sbitmap shift, used to compute depth limits in
++ * bfq_update_depths.
++ */
++ unsigned int sb_shift;
++
++ /*
++ * Depth limits used in bfq_limit_depth (see comments on the
++ * function)
++ */
++ unsigned int word_depths[2][2];
+ };
+
+ enum bfqq_state_flags {
+
+From 402e5f6b59662d290ab2b3c10b0016207a63ad21 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 21 Dec 2017 15:51:39 +0100
+Subject: [PATCH 11/23] bfq-sq, bfq-mq: limit sectors served with interactive
+ weight raising
+
+To maximise responsiveness, BFQ raises the weight, and performs device
+idling, for bfq_queues associated with processes deemed as
+interactive. In particular, weight raising has a maximum duration,
+equal to the time needed to start a large application. If a
+weight-raised process goes on doing I/O beyond this maximum duration,
+it loses weight-raising.
+
+This mechanism is evidently vulnerable to the following false
+positives: I/O-bound applications that will go on doing I/O for much
+longer than the duration of weight-raising. These applications have
+basically no benefit from being weight-raised at the beginning of
+their I/O. On the opposite end, while being weight-raised, these
+applications
+a) unjustly steal throughput to applications that may truly need
+low latency;
+b) make BFQ uselessly perform device idling; device idling results
+in loss of device throughput with most flash-based storage, and may
+increase latencies when used purposelessly.
+
+This commit adds a countermeasure to reduce both the above
+problems. To introduce this countermeasure, we provide the following
+extra piece of information (full details in the comments added by this
+commit). During the start-up of the large application used as a
+reference to set the duration of weight-raising, involved processes
+transfer at most ~110K sectors each. Accordingly, a process initially
+deemed as interactive has no right to be weight-raised any longer,
+once transferred 110K sectors or more.
+
+Basing on this consideration, this commit early-ends weight-raising
+for a bfq_queue if the latter happens to have received an amount of
+service at least equal to 110K sectors (actually, a little bit more,
+to keep a safety margin). I/O-bound applications that reach a high
+throughput, such as file copy, get to this threshold much before the
+allowed weight-raising period finishes. Thus this early ending of
+weight-raising reduces the amount of time during which these
+applications cause the problems described above.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 84 ++++++++++++++++++++++++++++++++++++++++++++------
+ block/bfq-mq.h | 5 +++
+ block/bfq-sched.c | 3 ++
+ block/bfq-sq-iosched.c | 84 ++++++++++++++++++++++++++++++++++++++++++++------
+ block/bfq.h | 5 +++
+ 5 files changed, 163 insertions(+), 18 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index f384f5566672..63fdd16dec3c 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -162,15 +162,17 @@ static struct kmem_cache *bfq_pool;
+ * interactive applications automatically, using the following formula:
+ * duration = (R / r) * T, where r is the peak rate of the device, and
+ * R and T are two reference parameters.
+- * In particular, R is the peak rate of the reference device (see below),
+- * and T is a reference time: given the systems that are likely to be
+- * installed on the reference device according to its speed class, T is
+- * about the maximum time needed, under BFQ and while reading two files in
+- * parallel, to load typical large applications on these systems.
+- * In practice, the slower/faster the device at hand is, the more/less it
+- * takes to load applications with respect to the reference device.
+- * Accordingly, the longer/shorter BFQ grants weight raising to interactive
+- * applications.
++ * In particular, R is the peak rate of the reference device (see
++ * below), and T is a reference time: given the systems that are
++ * likely to be installed on the reference device according to its
++ * speed class, T is about the maximum time needed, under BFQ and
++ * while reading two files in parallel, to load typical large
++ * applications on these systems (see the comments on
++ * max_service_from_wr below, for more details on how T is obtained).
++ * In practice, the slower/faster the device at hand is, the more/less
++ * it takes to load applications with respect to the reference device.
++ * Accordingly, the longer/shorter BFQ grants weight raising to
++ * interactive applications.
+ *
+ * BFQ uses four different reference pairs (R, T), depending on:
+ * . whether the device is rotational or non-rotational;
+@@ -207,6 +209,60 @@ static int T_slow[2];
+ static int T_fast[2];
+ static int device_speed_thresh[2];
+
++/*
++ * BFQ uses the above-detailed, time-based weight-raising mechanism to
++ * privilege interactive tasks. This mechanism is vulnerable to the
++ * following false positives: I/O-bound applications that will go on
++ * doing I/O for much longer than the duration of weight
++ * raising. These applications have basically no benefit from being
++ * weight-raised at the beginning of their I/O. On the opposite end,
++ * while being weight-raised, these applications
++ * a) unjustly steal throughput to applications that may actually need
++ * low latency;
++ * b) make BFQ uselessly perform device idling; device idling results
++ * in loss of device throughput with most flash-based storage, and may
++ * increase latencies when used purposelessly.
++ *
++ * BFQ tries to reduce these problems, by adopting the following
++ * countermeasure. To introduce this countermeasure, we need first to
++ * finish explaining how the duration of weight-raising for
++ * interactive tasks is computed.
++ *
++ * For a bfq_queue deemed as interactive, the duration of weight
++ * raising is dynamically adjusted, as a function of the estimated
++ * peak rate of the device, so as to be equal to the time needed to
++ * execute the 'largest' interactive task we benchmarked so far. By
++ * largest task, we mean the task for which each involved process has
++ * to do more I/O than for any of the other tasks we benchmarked. This
++ * reference interactive task is the start-up of LibreOffice Writer,
++ * and in this task each process/bfq_queue needs to have at most ~110K
++ * sectors transferred.
++ *
++ * This last piece of information enables BFQ to reduce the actual
++ * duration of weight-raising for at least one class of I/O-bound
++ * applications: those doing sequential or quasi-sequential I/O. An
++ * example is file copy. In fact, once started, the main I/O-bound
++ * processes of these applications usually consume the above 110K
++ * sectors in much less time than the processes of an application that
++ * is starting, because these I/O-bound processes will greedily devote
++ * almost all their CPU cycles only to their target,
++ * throughput-friendly I/O operations. This is even more true if BFQ
++ * happens to be underestimating the device peak rate, and thus
++ * overestimating the duration of weight raising. But, according to
++ * our measurements, once transferred 110K sectors, these processes
++ * have no right to be weight-raised any longer.
++ *
++ * Basing on the last consideration, BFQ ends weight-raising for a
++ * bfq_queue if the latter happens to have received an amount of
++ * service at least equal to the following constant. The constant is
++ * set to slightly more than 110K, to have a minimum safety margin.
++ *
++ * This early ending of weight-raising reduces the amount of time
++ * during which interactive false positives cause the two problems
++ * described at the beginning of these comments.
++ */
++static const unsigned long max_service_from_wr = 120000;
++
+ #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
+
+@@ -1361,6 +1417,7 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ if (old_wr_coeff == 1 && wr_or_deserves_wr) {
+ /* start a weight-raising period */
+ if (interactive) {
++ bfqq->service_from_wr = 0;
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+ } else {
+@@ -3980,6 +4037,15 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ "back to interactive wr");
+ }
+ }
++ if (bfqq->wr_coeff > 1 &&
++ bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
++ bfqq->service_from_wr > max_service_from_wr) {
++ /* see comments on max_service_from_wr */
++ bfq_bfqq_end_wr(bfqq);
++ bfq_log_bfqq(bfqd, bfqq,
++ "[%s] too much service",
++ __func__);
++ }
+ }
+ /*
+ * To improve latency (for this or other queues), immediately
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 458099ee0308..9a5ce1168ff5 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -331,6 +331,11 @@ struct bfq_queue {
+ * last transition from idle to backlogged.
+ */
+ unsigned long service_from_backlogged;
++ /*
++ * Cumulative service received from the @bfq_queue since its
++ * last transition to weight-raised state.
++ */
++ unsigned long service_from_wr;
+ /*
+ * Value of wr start time when switching to soft rt
+ */
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 9d261dd428e4..4e6c5232e2fb 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -942,6 +942,9 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
+ if (!bfqq->service_from_backlogged)
+ bfqq->first_IO_time = jiffies;
+
++ if (bfqq->wr_coeff > 1)
++ bfqq->service_from_wr += served;
++
+ bfqq->service_from_backlogged += served;
+ for_each_entity(entity) {
+ st = bfq_entity_service_tree(entity);
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index eff4c4edf5a0..486493aafaf8 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -156,15 +156,17 @@ static struct kmem_cache *bfq_pool;
+ * interactive applications automatically, using the following formula:
+ * duration = (R / r) * T, where r is the peak rate of the device, and
+ * R and T are two reference parameters.
+- * In particular, R is the peak rate of the reference device (see below),
+- * and T is a reference time: given the systems that are likely to be
+- * installed on the reference device according to its speed class, T is
+- * about the maximum time needed, under BFQ and while reading two files in
+- * parallel, to load typical large applications on these systems.
+- * In practice, the slower/faster the device at hand is, the more/less it
+- * takes to load applications with respect to the reference device.
+- * Accordingly, the longer/shorter BFQ grants weight raising to interactive
+- * applications.
++ * In particular, R is the peak rate of the reference device (see
++ * below), and T is a reference time: given the systems that are
++ * likely to be installed on the reference device according to its
++ * speed class, T is about the maximum time needed, under BFQ and
++ * while reading two files in parallel, to load typical large
++ * applications on these systems (see the comments on
++ * max_service_from_wr below, for more details on how T is obtained).
++ * In practice, the slower/faster the device at hand is, the more/less
++ * it takes to load applications with respect to the reference device.
++ * Accordingly, the longer/shorter BFQ grants weight raising to
++ * interactive applications.
+ *
+ * BFQ uses four different reference pairs (R, T), depending on:
+ * . whether the device is rotational or non-rotational;
+@@ -201,6 +203,60 @@ static int T_slow[2];
+ static int T_fast[2];
+ static int device_speed_thresh[2];
+
++/*
++ * BFQ uses the above-detailed, time-based weight-raising mechanism to
++ * privilege interactive tasks. This mechanism is vulnerable to the
++ * following false positives: I/O-bound applications that will go on
++ * doing I/O for much longer than the duration of weight
++ * raising. These applications have basically no benefit from being
++ * weight-raised at the beginning of their I/O. On the opposite end,
++ * while being weight-raised, these applications
++ * a) unjustly steal throughput to applications that may actually need
++ * low latency;
++ * b) make BFQ uselessly perform device idling; device idling results
++ * in loss of device throughput with most flash-based storage, and may
++ * increase latencies when used purposelessly.
++ *
++ * BFQ tries to reduce these problems, by adopting the following
++ * countermeasure. To introduce this countermeasure, we need first to
++ * finish explaining how the duration of weight-raising for
++ * interactive tasks is computed.
++ *
++ * For a bfq_queue deemed as interactive, the duration of weight
++ * raising is dynamically adjusted, as a function of the estimated
++ * peak rate of the device, so as to be equal to the time needed to
++ * execute the 'largest' interactive task we benchmarked so far. By
++ * largest task, we mean the task for which each involved process has
++ * to do more I/O than for any of the other tasks we benchmarked. This
++ * reference interactive task is the start-up of LibreOffice Writer,
++ * and in this task each process/bfq_queue needs to have at most ~110K
++ * sectors transfered.
++ *
++ * This last piece of information enables BFQ to reduce the actual
++ * duration of weight-raising for at least one class of I/O-bound
++ * applications: those doing sequential or quasi-sequential I/O. An
++ * example is file copy. In fact, once started, the main I/O-bound
++ * processes of these applications usually consume the above 110K
++ * sectors in much less time than the processes of an application that
++ * is starting, because these I/O-bound processes will greedily devote
++ * almost all their CPU cycles only to their target,
++ * throughput-friendly I/O operations. This is even more true if BFQ
++ * happens to be underestimating the device peak rate, and thus
++ * overestimating the duration of weight raising. But, according to
++ * our measurements, once transferred 110K sectors, these processes
++ * have no right to be weight-raised any longer.
++ *
++ * Basing on the last consideration, BFQ ends weight-raising for a
++ * bfq_queue if the latter happens to have received an amount of
++ * service at least equal to the following constant. The constant is
++ * set to slightly more than 110K, to have a minimum safety margin.
++ *
++ * This early ending of weight-raising reduces the amount of time
++ * during which interactive false positives cause the two problems
++ * described at the beginning of these comments.
++ */
++static const unsigned long max_service_from_wr = 120000;
++
+ #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
+
+@@ -1246,6 +1302,7 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ if (old_wr_coeff == 1 && wr_or_deserves_wr) {
+ /* start a weight-raising period */
+ if (interactive) {
++ bfqq->service_from_wr = 0;
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+ } else {
+@@ -3794,6 +3851,15 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ "back to interactive wr");
+ }
+ }
++ if (bfqq->wr_coeff > 1 &&
++ bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
++ bfqq->service_from_wr > max_service_from_wr) {
++ /* see comments on max_service_from_wr */
++ bfq_bfqq_end_wr(bfqq);
++ bfq_log_bfqq(bfqd, bfqq,
++ "[%s] too much service",
++ __func__);
++ }
+ }
+ /*
+ * To improve latency (for this or other queues), immediately
+diff --git a/block/bfq.h b/block/bfq.h
+index 59539adc00a5..0cd7a3f251a7 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -323,6 +323,11 @@ struct bfq_queue {
+ * last transition from idle to backlogged.
+ */
+ unsigned long service_from_backlogged;
++ /*
++ * Cumulative service received from the @bfq_queue since its
++ * last transition to weight-raised state.
++ */
++ unsigned long service_from_wr;
+ /*
+ * Value of wr start time when switching to soft rt
+ */
+
+From 59efebb94b2f9bac653faf62dadb45b83bd27fa7 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Thu, 4 Jan 2018 16:29:58 +0100
+Subject: [PATCH 12/23] bfq-sq, bfq-mq: put async queues for root bfq groups
+ too
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+For each pair [device for which bfq is selected as I/O scheduler,
+group in blkio/io], bfq maintains a corresponding bfq group. Each such
+bfq group contains a set of async queues, with each async queue
+created on demand, i.e., when some I/O request arrives for it. On
+creation, an async queue gets an extra reference, to make sure that
+the queue is not freed as long as its bfq group exists. Accordingly,
+to allow the queue to be freed after the group exited, this extra
+reference must released on group exit.
+
+The above holds also for a bfq root group, i.e., for the bfq group
+corresponding to the root blkio/io root for a given device. Yet, by
+mistake, the references to the existing async queues of a root group
+are not released when the latter exits. This causes a memory leak when
+the instance of bfq for a given device exits. In a similar vein,
+bfqg_stats_xfer_dead is not executed for a root group.
+
+This commit fixes bfq_pd_offline so that the latter executes the above
+missing operations for a root group too.
+
+Reported-by: Holger Hoffstätte <holger@applied-asynchrony.com>
+Reported-by: Guoqing Jiang <gqjiang@suse.com>
+Signed-off-by: Davide Ferrari <davideferrari8@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 562b0ce581a7..45fefb2e2d57 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -885,13 +885,13 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+
+ entity = bfqg->my_entity;
+
+- if (!entity) /* root group */
+- return;
+-
+ #ifdef BFQ_MQ
+ spin_lock_irqsave(&bfqd->lock, flags);
+ #endif
+
++ if (!entity) /* root group */
++ goto put_async_queues;
++
+ /*
+ * Empty all service_trees belonging to this group before
+ * deactivating the group itself.
+@@ -926,6 +926,8 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
+ BUG_ON(bfqg->sched_data.in_service_entity);
+
+ __bfq_deactivate_entity(entity, false);
++
++put_async_queues:
+ bfq_put_async_queues(bfqd, bfqg);
+
+ #ifdef BFQ_MQ
+
+From 2dfbaaaf95054e2da3ededc0deb1ba5a4f589e53 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 8 Jan 2018 19:38:45 +0100
+Subject: [PATCH 13/23] bfq-sq, bfq-mq: release oom-queue ref to root group on
+ exit
+
+On scheduler init, a reference to the root group, and a reference to
+its corresponding blkg are taken for the oom queue. Yet these
+references are not released on scheduler exit, which prevents these
+objects from be freed. This commit adds the missing reference
+releases.
+
+Reported-by: Davide Ferrari <davideferrari8@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 3 +++
+ block/bfq-sq-iosched.c | 3 +++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 63fdd16dec3c..b82c52fabf91 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -5507,6 +5507,9 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
++ /* release oom-queue reference to root group */
++ bfqg_and_blkg_put(bfqd->root_group);
++
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
+ #else
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 486493aafaf8..851af055664d 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -5052,6 +5052,9 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
++ /* release oom-queue reference to root group */
++ bfqg_put(bfqd->root_group);
++
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
+ #else
+
+From 13efe00c8292d78d223e1090a7f36426e360eb38 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 8 Jan 2018 19:40:38 +0100
+Subject: [PATCH 14/23] block, bfq-sq, bfq-mq: trace get and put of bfq groups
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 15 +++++++++++++++
+ block/bfq-mq-iosched.c | 3 ++-
+ 2 files changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index 45fefb2e2d57..f94743fb2e7d 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -267,6 +267,8 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
+
+ static void bfqg_get(struct bfq_group *bfqg)
+ {
++ trace_printk("bfqg %p\n", bfqg);
++
+ #ifdef BFQ_MQ
+ bfqg->ref++;
+ #else
+@@ -280,6 +282,9 @@ static void bfqg_put(struct bfq_group *bfqg)
+ bfqg->ref--;
+
+ BUG_ON(bfqg->ref < 0);
++ trace_printk("putting bfqg %p %s\n", bfqg,
++ bfqg->ref == 0 ? "and freeing it" : "");
++
+ if (bfqg->ref == 0)
+ kfree(bfqg);
+ #else
+@@ -293,6 +298,7 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
+ /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
+ bfqg_get(bfqg);
+
++ trace_printk("getting blkg for bfqg %p\n", bfqg);
+ blkg_get(bfqg_to_blkg(bfqg));
+ }
+
+@@ -300,6 +306,7 @@ static void bfqg_and_blkg_put(struct bfq_group *bfqg)
+ {
+ bfqg_put(bfqg);
+
++ trace_printk("putting blkg for bfqg %p\n", bfqg);
+ blkg_put(bfqg_to_blkg(bfqg));
+ }
+ #endif
+@@ -382,6 +389,8 @@ static void bfq_init_entity(struct bfq_entity *entity,
+ * Make sure that bfqg and its associated blkg do not
+ * disappear before entity.
+ */
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] getting bfqg %p and blkg\n", __func__, bfqg);
++
+ bfqg_and_blkg_get(bfqg);
+ #else
+ bfqg_get(bfqg);
+@@ -475,6 +484,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
+ kfree(bfqg);
+ return NULL;
+ }
++ trace_printk("bfqg %p\n", bfqg);
+
+ #ifdef BFQ_MQ
+ /* see comments in bfq_bic_update_cgroup for why refcounting */
+@@ -513,6 +523,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
+ static void bfq_pd_free(struct blkg_policy_data *pd)
+ {
+ struct bfq_group *bfqg = pd_to_bfqg(pd);
++ trace_printk("bfqg %p\n", bfqg);
+
+ bfqg_stats_exit(&bfqg->stats);
+ #ifdef BFQ_MQ
+@@ -650,6 +661,8 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
+ }
+ #ifdef BFQ_MQ
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] putting blkg and bfqg %p\n", __func__, bfqg);
++
+ bfqg_and_blkg_put(bfqq_group(bfqq));
+ #else
+ bfqg_put(bfqq_group(bfqq));
+@@ -658,6 +671,8 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ entity->parent = bfqg->my_entity;
+ entity->sched_data = &bfqg->sched_data;
+ #ifdef BFQ_MQ
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] getting blkg and bfqg %p\n", __func__, bfqg);
++
+ /* pin down bfqg and its associated blkg */
+ bfqg_and_blkg_get(bfqg);
+ #else
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index b82c52fabf91..d5b7a6b985d7 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4385,10 +4385,11 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ if (bfqq->bfqd)
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
+
+- kmem_cache_free(bfq_pool, bfqq);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] putting blkg and bfqg %p\n", __func__, bfqg);
+ bfqg_and_blkg_put(bfqg);
+ #endif
++ kmem_cache_free(bfq_pool, bfqq);
+ }
+
+ static void bfq_put_cooperator(struct bfq_queue *bfqq)
+
+From 816b77fba966171974eb5ee25d81bc4e19eaf1b4 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 10 Jan 2018 09:08:22 +0100
+Subject: [PATCH 15/23] bfq-sq, bfq-mq: compile group put for oom queue only if
+ BFQ_GROUP_IOSCHED is set
+
+Commit ("bfq-sq, bfq-mq: release oom-queue ref to root group on exit")
+added a missing put of the root bfq group for the oom queue. That put
+has to be, and can be, performed only if CONFIG_BFQ_GROUP_IOSCHED is
+defined: the function doing the put is even not defined at all if
+CONFIG_BFQ_GROUP_IOSCHED is not defined. But that commit makes that
+put be invoked regardless of whether CONFIG_BFQ_GROUP_IOSCHED is
+defined. This commit fixes this mistake, by making that invocation be
+compiled only if CONFIG_BFQ_GROUP_IOSCHED is actually defined.
+
+Fixes ("block, bfq: release oom-queue ref to root group on exit")
+Reported-by: Jan Alexander Steffens <jan.steffens@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 2 +-
+ block/bfq-sq-iosched.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index d5b7a6b985d7..2581fe0f6f2f 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -5508,10 +5508,10 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ /* release oom-queue reference to root group */
+ bfqg_and_blkg_put(bfqd->root_group);
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
+ #else
+ spin_lock_irq(&bfqd->lock);
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 851af055664d..c4df156b1fb4 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -5052,10 +5052,10 @@ static void bfq_exit_queue(struct elevator_queue *e)
+
+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer));
+
++#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ /* release oom-queue reference to root group */
+ bfqg_put(bfqd->root_group);
+
+-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
+ #else
+ bfq_put_async_queues(bfqd, bfqd->root_group);
+
+From 643a89c659172b2c9ae16adfe03af4e3e88e1326 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Sat, 13 Jan 2018 18:48:41 +0100
+Subject: [PATCH 16/23] block, bfq-sq, bfq-mq: remove trace_printks
+
+Commit ("block, bfq-sq, bfq-mq: trace get and put of bfq groups")
+unwisely added some invocations of the function trace_printk, which
+is inappropriate in production kernels. This commit removes those
+invocations.
+
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index f94743fb2e7d..a4f8a03edfc9 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -267,8 +267,6 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
+
+ static void bfqg_get(struct bfq_group *bfqg)
+ {
+- trace_printk("bfqg %p\n", bfqg);
+-
+ #ifdef BFQ_MQ
+ bfqg->ref++;
+ #else
+@@ -282,9 +280,6 @@ static void bfqg_put(struct bfq_group *bfqg)
+ bfqg->ref--;
+
+ BUG_ON(bfqg->ref < 0);
+- trace_printk("putting bfqg %p %s\n", bfqg,
+- bfqg->ref == 0 ? "and freeing it" : "");
+-
+ if (bfqg->ref == 0)
+ kfree(bfqg);
+ #else
+@@ -298,7 +293,6 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
+ /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
+ bfqg_get(bfqg);
+
+- trace_printk("getting blkg for bfqg %p\n", bfqg);
+ blkg_get(bfqg_to_blkg(bfqg));
+ }
+
+@@ -306,7 +300,6 @@ static void bfqg_and_blkg_put(struct bfq_group *bfqg)
+ {
+ bfqg_put(bfqg);
+
+- trace_printk("putting blkg for bfqg %p\n", bfqg);
+ blkg_put(bfqg_to_blkg(bfqg));
+ }
+ #endif
+@@ -484,8 +477,6 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
+ kfree(bfqg);
+ return NULL;
+ }
+- trace_printk("bfqg %p\n", bfqg);
+-
+ #ifdef BFQ_MQ
+ /* see comments in bfq_bic_update_cgroup for why refcounting */
+ bfqg_get(bfqg);
+@@ -523,7 +514,6 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
+ static void bfq_pd_free(struct blkg_policy_data *pd)
+ {
+ struct bfq_group *bfqg = pd_to_bfqg(pd);
+- trace_printk("bfqg %p\n", bfqg);
+
+ bfqg_stats_exit(&bfqg->stats);
+ #ifdef BFQ_MQ
+
+From ce050275e24fecec800f346c09d9494563e9fc8a Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Mon, 15 Jan 2018 15:07:05 +0100
+Subject: [PATCH 17/23] block, bfq-mq: add requeue-request hook
+
+Commit 'a6a252e64914 ("blk-mq-sched: decide how to handle flush rq via
+RQF_FLUSH_SEQ")' makes all non-flush re-prepared requests for a device
+be re-inserted into the active I/O scheduler for that device. As a
+consequence, I/O schedulers may get the same request inserted again,
+even several times, without a finish_request invoked on that request
+before each re-insertion.
+
+This fact is the cause of the failure reported in [1]. For an I/O
+scheduler, every re-insertion of the same re-prepared request is
+equivalent to the insertion of a new request. For schedulers like
+mq-deadline or kyber, this fact causes no harm. In contrast, it
+confuses a stateful scheduler like BFQ, which keeps state for an I/O
+request, until the finish_request hook is invoked on the request. In
+particular, BFQ may get stuck, waiting forever for the number of
+request dispatches, of the same request, to be balanced by an equal
+number of request completions (while there will be one completion for
+that request). In this state, BFQ may refuse to serve I/O requests
+from other bfq_queues. The hang reported in [1] then follows.
+
+However, the above re-prepared requests undergo a requeue, thus the
+requeue_request hook of the active elevator is invoked for these
+requests, if set. This commit then addresses the above issue by
+properly implementing the hook requeue_request in BFQ.
+
+[1] https://marc.info/?l=linux-block&m=151211117608676
+
+Reported-by: Ivan Kozik <ivan@ludios.org>
+Reported-by: Alban Browaeys <alban.browaeys@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+Signed-off-by: Serena Ziviani <ziviani.serena@gmail.com>
+---
+ block/bfq-mq-iosched.c | 90 ++++++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 73 insertions(+), 17 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 2581fe0f6f2f..bb7ccc2f1165 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4162,9 +4162,9 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ * TESTING: reset DISP_LIST flag, because: 1)
+ * this rq this request has passed through
+ * bfq_prepare_request, 2) then it will have
+- * bfq_finish_request invoked on it, and 3) in
+- * bfq_finish_request we use this flag to check
+- * that bfq_finish_request is not invoked on
++ * bfq_finish_requeue_request invoked on it, and 3) in
++ * bfq_finish_requeue_request we use this flag to check
++ * that bfq_finish_requeue_request is not invoked on
+ * requests for which bfq_prepare_request has
+ * been invoked.
+ */
+@@ -4173,8 +4173,8 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ }
+
+ /*
+- * We exploit the bfq_finish_request hook to decrement
+- * rq_in_driver, but bfq_finish_request will not be
++ * We exploit the bfq_finish_requeue_request hook to decrement
++ * rq_in_driver, but bfq_finish_requeue_request will not be
+ * invoked on this request. So, to avoid unbalance,
+ * just start this request, without incrementing
+ * rq_in_driver. As a negative consequence,
+@@ -4183,10 +4183,10 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ * bfq_schedule_dispatch to be invoked uselessly.
+ *
+ * As for implementing an exact solution, the
+- * bfq_finish_request hook, if defined, is probably
++ * bfq_finish_requeue_request hook, if defined, is probably
+ * invoked also on this request. So, by exploiting
+ * this hook, we could 1) increment rq_in_driver here,
+- * and 2) decrement it in bfq_finish_request. Such a
++ * and 2) decrement it in bfq_finish_requeue_request. Such a
+ * solution would let the value of the counter be
+ * always accurate, but it would entail using an extra
+ * interface function. This cost seems higher than the
+@@ -4878,6 +4878,8 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ return idle_timer_disabled;
+ }
+
++static void bfq_prepare_request(struct request *rq, struct bio *bio);
++
+ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ bool at_head)
+ {
+@@ -4919,6 +4921,20 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ BUG_ON(!(rq->rq_flags & RQF_GOT));
+ rq->rq_flags &= ~RQF_GOT;
+
++ if (!bfqq) {
++ /*
++ * This should never happen. Most likely rq is
++ * a requeued regular request, being
++ * re-inserted without being first
++ * re-prepared. Do a prepare, to avoid
++ * failure.
++ */
++ pr_warn("Regular request associated with no queue");
++ WARN_ON(1);
++ bfq_prepare_request(rq, rq->bio);
++ bfqq = RQ_BFQQ(rq);
++ }
++
+ #if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ idle_timer_disabled = __bfq_insert_request(bfqd, rq);
+ /*
+@@ -5110,7 +5126,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ }
+ }
+
+-static void bfq_finish_request_body(struct bfq_queue *bfqq)
++static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "put_request_body: allocated %d", bfqq->allocated);
+@@ -5120,7 +5136,13 @@ static void bfq_finish_request_body(struct bfq_queue *bfqq)
+ bfq_put_queue(bfqq);
+ }
+
+-static void bfq_finish_request(struct request *rq)
++/*
++ * Handle either a requeue or a finish for rq. The things to do are
++ * the same in both cases: all references to rq are to be dropped. In
++ * particular, rq is considered completed from the point of view of
++ * the scheduler.
++ */
++static void bfq_finish_requeue_request(struct request *rq)
+ {
+ struct bfq_queue *bfqq;
+ struct bfq_data *bfqd;
+@@ -5128,11 +5150,27 @@ static void bfq_finish_request(struct request *rq)
+
+ BUG_ON(!rq);
+
+- if (!rq->elv.icq)
++ bfqq = RQ_BFQQ(rq);
++
++ /*
++ * Requeue and finish hooks are invoked in blk-mq without
++ * checking whether the involved request is actually still
++ * referenced in the scheduler. To handle this fact, the
++ * following two checks make this function exit in case of
++ * spurious invocations, for which there is nothing to do.
++ *
++ * First, check whether rq has nothing to do with an elevator.
++ */
++ if (unlikely(!(rq->rq_flags & RQF_ELVPRIV)))
+ return;
+
+- bfqq = RQ_BFQQ(rq);
+- BUG_ON(!bfqq);
++ /*
++ * rq either is not associated with any icq, or is an already
++ * requeued request that has not (yet) been re-inserted into
++ * a bfq_queue.
++ */
++ if (!rq->elv.icq || !bfqq)
++ return;
+
+ bic = RQ_BIC(rq);
+ BUG_ON(!bic);
+@@ -5145,7 +5183,6 @@ static void bfq_finish_request(struct request *rq)
+ BUG();
+ }
+ BUG_ON(rq->rq_flags & RQF_QUEUED);
+- BUG_ON(!(rq->rq_flags & RQF_ELVPRIV));
+
+ bfq_log_bfqq(bfqd, bfqq,
+ "putting rq %p with %u sects left, STARTED %d",
+@@ -5166,13 +5203,14 @@ static void bfq_finish_request(struct request *rq)
+ spin_lock_irqsave(&bfqd->lock, flags);
+
+ bfq_completed_request(bfqq, bfqd);
+- bfq_finish_request_body(bfqq);
++ bfq_finish_requeue_request_body(bfqq);
+
+ spin_unlock_irqrestore(&bfqd->lock, flags);
+ } else {
+ /*
+ * Request rq may be still/already in the scheduler,
+- * in which case we need to remove it. And we cannot
++ * in which case we need to remove it (this should
++ * never happen in case of requeue). And we cannot
+ * defer such a check and removal, to avoid
+ * inconsistencies in the time interval from the end
+ * of this function to the start of the deferred work.
+@@ -5189,9 +5227,26 @@ static void bfq_finish_request(struct request *rq)
+ bfqg_stats_update_io_remove(bfqq_group(bfqq),
+ rq->cmd_flags);
+ }
+- bfq_finish_request_body(bfqq);
++ bfq_finish_requeue_request_body(bfqq);
+ }
+
++ /*
++ * Reset private fields. In case of a requeue, this allows
++ * this function to correctly do nothing if it is spuriously
++ * invoked again on this same request (see the check at the
++ * beginning of the function). Probably, a better general
++ * design would be to prevent blk-mq from invoking the requeue
++ * or finish hooks of an elevator, for a request that is not
++ * referred by that elevator.
++ *
++ * Resetting the following fields would break the
++ * request-insertion logic if rq is re-inserted into a bfq
++ * internal queue, without a re-preparation. Here we assume
++ * that re-insertions of requeued requests, without
++ * re-preparation, can happen only for pass_through or at_head
++ * requests (which are not re-inserted into bfq internal
++ * queues).
++ */
+ rq->elv.priv[0] = NULL;
+ rq->elv.priv[1] = NULL;
+ }
+@@ -5960,7 +6015,8 @@ static struct elevator_type iosched_bfq_mq = {
+ .ops.mq = {
+ .limit_depth = bfq_limit_depth,
+ .prepare_request = bfq_prepare_request,
+- .finish_request = bfq_finish_request,
++ .requeue_request = bfq_finish_requeue_request,
++ .finish_request = bfq_finish_requeue_request,
+ .exit_icq = bfq_exit_icq,
+ .insert_requests = bfq_insert_requests,
+ .dispatch_request = bfq_dispatch_request,
+
+From 3e4f292191cc62b3844316b9741534c3f1b36f0a Mon Sep 17 00:00:00 2001
+From: Davide Paganelli <paga.david@gmail.com>
+Date: Thu, 8 Feb 2018 12:19:24 +0100
+Subject: [PATCH 18/23] block, bfq-mq, bfq-sq: make log functions print names
+ of calling functions
+
+Add the macro __func__ as a parameter to the invocations of the functions
+pr_crit, blk_add_trace_msg and blk_add_cgroup_trace_msg in bfq_log*
+functions, in order to include automatically in the log messages
+the names of the functions that call the log functions.
+The programmer can then avoid doing it.
+
+Signed-off-by: Davide Paganelli <paga.david@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-cgroup-included.c | 9 +--
+ block/bfq-mq-iosched.c | 167 ++++++++++++++++++++++----------------------
+ block/bfq-mq.h | 33 ++++-----
+ block/bfq-sched.c | 54 +++++++-------
+ block/bfq-sq-iosched.c | 134 +++++++++++++++++------------------
+ block/bfq.h | 33 ++++-----
+ 6 files changed, 214 insertions(+), 216 deletions(-)
+
+diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c
+index a4f8a03edfc9..613f154e9da5 100644
+--- a/block/bfq-cgroup-included.c
++++ b/block/bfq-cgroup-included.c
+@@ -382,7 +382,8 @@ static void bfq_init_entity(struct bfq_entity *entity,
+ * Make sure that bfqg and its associated blkg do not
+ * disappear before entity.
+ */
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] getting bfqg %p and blkg\n", __func__, bfqg);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "getting bfqg %p and blkg\n",
++ bfqg);
+
+ bfqg_and_blkg_get(bfqg);
+ #else
+@@ -651,7 +652,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
+ }
+ #ifdef BFQ_MQ
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] putting blkg and bfqg %p\n", __func__, bfqg);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "putting blkg and bfqg %p\n", bfqg);
+
+ bfqg_and_blkg_put(bfqq_group(bfqq));
+ #else
+@@ -661,7 +662,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ entity->parent = bfqg->my_entity;
+ entity->sched_data = &bfqg->sched_data;
+ #ifdef BFQ_MQ
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] getting blkg and bfqg %p\n", __func__, bfqg);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "getting blkg and bfqg %p\n", bfqg);
+
+ /* pin down bfqg and its associated blkg */
+ bfqg_and_blkg_get(bfqg);
+@@ -721,7 +722,7 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
+ if (entity->sched_data != &bfqg->sched_data) {
+ bic_set_bfqq(bic, NULL, 0);
+ bfq_log_bfqq(bfqd, async_bfqq,
+- "bic_change_group: %p %d",
++ "%p %d",
+ async_bfqq,
+ async_bfqq->ref);
+ bfq_put_queue(async_bfqq);
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index bb7ccc2f1165..edc93b6af186 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -310,7 +310,7 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
+ static void bfq_schedule_dispatch(struct bfq_data *bfqd)
+ {
+ if (bfqd->queued != 0) {
+- bfq_log(bfqd, "schedule dispatch");
++ bfq_log(bfqd, "");
+ blk_mq_run_hw_queues(bfqd->queue, true);
+ }
+ }
+@@ -489,8 +489,8 @@ static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
+ data->shallow_depth =
+ bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
+
+- bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
+- __func__, bfqd->wr_busy_queues, op_is_sync(op),
++ bfq_log(bfqd, "wr_busy %d sync %d depth %u",
++ bfqd->wr_busy_queues, op_is_sync(op),
+ data->shallow_depth);
+ }
+
+@@ -528,7 +528,7 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
+ if (rb_link)
+ *rb_link = p;
+
+- bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++ bfq_log(bfqd, "%llu: returning %d",
+ (unsigned long long) sector,
+ bfqq ? bfqq->pid : 0);
+
+@@ -749,7 +749,7 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
+ if (rq == last || ktime_get_ns() < rq->fifo_time)
+ return NULL;
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "returned %p", rq);
+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
+ return rq;
+ }
+@@ -842,7 +842,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd,
+ bfq_serv_to_charge(next_rq, bfqq));
+ if (entity->budget != new_budget) {
+ entity->budget = new_budget;
+- bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
++ bfq_log_bfqq(bfqd, bfqq, "new budget %lu",
+ new_budget);
+ bfq_requeue_bfqq(bfqd, bfqq, false);
+ }
+@@ -915,8 +915,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
+- __func__,
++ "bic %p wr_coeff %d start_finish %lu max_time %lu",
+ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish,
+ bfqq->wr_cur_max_time);
+
+@@ -929,11 +928,11 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ bfq_wr_duration(bfqd))) {
+ switch_back_to_interactive_wr(bfqq, bfqd);
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "resume state: switching back to interactive");
++ "switching back to interactive");
+ } else {
+ bfqq->wr_coeff = 1;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "resume state: switching off wr (%lu + %lu < %lu)",
++ "switching off wr (%lu + %lu < %lu)",
+ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time,
+ jiffies);
+ }
+@@ -985,7 +984,7 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ /* Increment burst size to take into account also bfqq */
+ bfqd->burst_size++;
+
+- bfq_log_bfqq(bfqd, bfqq, "add_to_burst %d", bfqd->burst_size);
++ bfq_log_bfqq(bfqd, bfqq, "%d", bfqd->burst_size);
+
+ BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh);
+
+@@ -998,7 +997,7 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ * other to consider this burst as large.
+ */
+ bfqd->large_burst = true;
+- bfq_log_bfqq(bfqd, bfqq, "add_to_burst: large burst started");
++ bfq_log_bfqq(bfqd, bfqq, "large burst started");
+
+ /*
+ * We can now mark all queues in the burst list as
+@@ -1170,7 +1169,7 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfqd->large_burst = false;
+ bfq_reset_burst_list(bfqd, bfqq);
+ bfq_log_bfqq(bfqd, bfqq,
+- "handle_burst: late activation or different group");
++ "late activation or different group");
+ goto end;
+ }
+
+@@ -1180,7 +1179,7 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ * bfqq as belonging to this large burst immediately.
+ */
+ if (bfqd->large_burst) {
+- bfq_log_bfqq(bfqd, bfqq, "handle_burst: marked in burst");
++ bfq_log_bfqq(bfqd, bfqq, "marked in burst");
+ bfq_mark_bfqq_in_large_burst(bfqq);
+ goto end;
+ }
+@@ -1686,7 +1685,7 @@ static void bfq_add_request(struct request *rq)
+ unsigned int old_wr_coeff = bfqq->wr_coeff;
+ bool interactive = false;
+
+- bfq_log_bfqq(bfqd, bfqq, "add_request: size %u %s",
++ bfq_log_bfqq(bfqd, bfqq, "size %u %s",
+ blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A");
+
+ if (bfqq->wr_coeff > 1) /* queue is being weight-raised */
+@@ -1952,7 +1951,7 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
+ __rq = bfq_find_rq_fmerge(bfqd, bio, q);
+ if (__rq && elv_bio_merge_ok(__rq, bio)) {
+ *req = __rq;
+- bfq_log(bfqd, "request_merge: req %p", __rq);
++ bfq_log(bfqd, "req %p", __rq);
+
+ return ELEVATOR_FRONT_MERGE;
+ }
+@@ -1989,7 +1988,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ bfqq->next_rq = next_rq;
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "request_merged: req %p prev %p next_rq %p bfqq %p",
++ "req %p prev %p next_rq %p bfqq %p",
+ req, prev, next_rq, bfqq);
+
+ /*
+@@ -2018,7 +2017,7 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+ goto end;
+
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "requests_merged: rq %p next %p bfqq %p next_bfqq %p",
++ "rq %p next %p bfqq %p next_bfqq %p",
+ rq, next, bfqq, next_bfqq);
+
+ spin_lock_irq(&bfqq->bfqd->lock);
+@@ -2069,10 +2068,10 @@ static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
+ */
+ bfqq->entity.prio_changed = 1;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "end_wr: wrais ending at %lu, rais_max_time %u",
++ "wrais ending at %lu, rais_max_time %u",
+ bfqq->last_wr_start_finish,
+ jiffies_to_msecs(bfqq->wr_cur_max_time));
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "end_wr: wr_busy %d",
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "wr_busy %d",
+ bfqq->bfqd->wr_busy_queues);
+ }
+
+@@ -2245,8 +2244,8 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
+ {
+ if (bfq_too_late_for_merging(new_bfqq)) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "[%s] too late for bfq%d to be merged",
+- __func__, new_bfqq->pid);
++ "too late for bfq%d to be merged",
++ new_bfqq->pid);
+ return false;
+ }
+
+@@ -2395,8 +2394,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
+ }
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
+- __func__,
++ "bic %p wr_coeff %d start_finish %lu max_time %lu",
+ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish,
+ bfqq->wr_cur_max_time);
+ }
+@@ -2453,7 +2451,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+
+ }
+
+- bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
++ bfq_log_bfqq(bfqd, new_bfqq, "wr_busy %d",
+ bfqd->wr_busy_queues);
+
+ /*
+@@ -2554,7 +2552,7 @@ static void bfq_set_budget_timeout(struct bfq_data *bfqd,
+ bfqq->budget_timeout = jiffies +
+ bfqd->bfq_timeout * timeout_coeff;
+
+- bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++ bfq_log_bfqq(bfqd, bfqq, "%u",
+ jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff));
+ }
+
+@@ -2620,10 +2618,10 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
+
+ bfq_set_budget_timeout(bfqd, bfqq);
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_in_service_queue, cur-budget = %d",
++ "cur-budget = %d",
+ bfqq->entity.budget);
+ } else
+- bfq_log(bfqd, "set_in_service_queue: NULL");
++ bfq_log(bfqd, "NULL");
+
+ bfqd->in_service_queue = bfqq;
+ }
+@@ -2746,7 +2744,7 @@ static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq
+ bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
+
+ bfq_log(bfqd,
+- "reset_rate_computation at end, sample %u/%u tot_sects %llu",
++ "at end, sample %u/%u tot_sects %llu",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ bfqd->tot_sectors_dispatched);
+ }
+@@ -2766,7 +2764,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
+ bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) {
+ bfq_log(bfqd,
+- "update_rate_reset: only resetting, delta_first %lluus samples %d",
++ "only resetting, delta_first %lluus samples %d",
+ bfqd->delta_from_first>>10, bfqd->peak_rate_samples);
+ goto reset_computation;
+ }
+@@ -2790,7 +2788,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
+
+ bfq_log(bfqd,
+-"update_rate_reset: tot_sects %llu delta_first %lluus rate %llu sects/s (%d)",
++"tot_sects %llu delta_first %lluus rate %llu sects/s (%d)",
+ bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10,
+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
+ rate > 20<<BFQ_RATE_SHIFT);
+@@ -2805,14 +2803,14 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ rate <= bfqd->peak_rate) ||
+ rate > 20<<BFQ_RATE_SHIFT) {
+ bfq_log(bfqd,
+- "update_rate_reset: goto reset, samples %u/%u rate/peak %llu/%llu",
++ "goto reset, samples %u/%u rate/peak %llu/%llu",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
+ goto reset_computation;
+ } else {
+ bfq_log(bfqd,
+- "update_rate_reset: do update, samples %u/%u rate/peak %llu/%llu",
++ "do update, samples %u/%u rate/peak %llu/%llu",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
+@@ -2868,7 +2866,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ rate /= divisor; /* smoothing constant alpha = 1/divisor */
+
+ bfq_log(bfqd,
+- "update_rate_reset: divisor %d tmp_peak_rate %llu tmp_rate %u",
++ "divisor %d tmp_peak_rate %llu tmp_rate %u",
+ divisor,
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT),
+ (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT));
+@@ -2922,7 +2920,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+
+ if (bfqd->peak_rate_samples == 0) { /* first dispatch */
+ bfq_log(bfqd,
+- "update_peak_rate: goto reset, samples %d",
++ "goto reset, samples %d",
+ bfqd->peak_rate_samples) ;
+ bfq_reset_rate_computation(bfqd, rq);
+ goto update_last_values; /* will add one sample */
+@@ -2943,7 +2941,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
+ bfqd->rq_in_driver == 0) {
+ bfq_log(bfqd,
+-"update_peak_rate: jumping to updating&resetting delta_last %lluus samples %d",
++"jumping to updating&resetting delta_last %lluus samples %d",
+ (now_ns - bfqd->last_dispatch)>>10,
+ bfqd->peak_rate_samples) ;
+ goto update_rate_and_reset;
+@@ -2969,7 +2967,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
+
+ bfq_log(bfqd,
+- "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus",
++ "added samples %u/%u tot_sects %llu delta_first %lluus",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ bfqd->tot_sectors_dispatched,
+ bfqd->delta_from_first>>10);
+@@ -2985,12 +2983,12 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ bfqd->last_dispatch = now_ns;
+
+ bfq_log(bfqd,
+- "update_peak_rate: delta_first %lluus last_pos %llu peak_rate %llu",
++ "delta_first %lluus last_pos %llu peak_rate %llu",
+ (now_ns - bfqd->first_dispatch)>>10,
+ (unsigned long long) bfqd->last_position,
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
+ bfq_log(bfqd,
+- "update_peak_rate: samples at end %d", bfqd->peak_rate_samples);
++ "samples at end %d", bfqd->peak_rate_samples);
+ }
+
+ /*
+@@ -3088,11 +3086,11 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
+ */
+ budget = 2 * min_budget;
+
+- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
++ bfq_log_bfqq(bfqd, bfqq, "last budg %d, budg left %d",
+ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
+- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
++ bfq_log_bfqq(bfqd, bfqq, "last max_budg %d, min budg %d",
+ budget, bfq_min_budget(bfqd));
+- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++ bfq_log_bfqq(bfqd, bfqq, "sync %d, seeky %d",
+ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
+
+ if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
+@@ -3294,7 +3292,7 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ else /* charge at least one seek */
+ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
+
+- bfq_log(bfqd, "bfq_bfqq_is_slow: too short %u", delta_usecs);
++ bfq_log(bfqd, "too short %u", delta_usecs);
+
+ return slow;
+ }
+@@ -3317,11 +3315,11 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * peak rate.
+ */
+ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
+- bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d",
++ bfq_log(bfqd, "relative rate %d/%d",
+ bfqq->entity.service, bfqd->bfq_max_budget);
+ }
+
+- bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
++ bfq_log_bfqq(bfqd, bfqq, "slow %d", slow);
+
+ return slow;
+ }
+@@ -3423,7 +3421,7 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqd, bfqq,
+-"softrt_next_start: service_blkg %lu soft_rate %u sects/sec interval %u",
++"service_blkg %lu soft_rate %u sects/sec interval %u",
+ bfqq->service_from_backlogged,
+ bfqd->bfq_wr_max_softrt_rate,
+ jiffies_to_msecs(HZ * bfqq->service_from_backlogged /
+@@ -3602,7 +3600,7 @@ static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
+ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "may_budget_timeout: wait_request %d left %d timeout %d",
++ "wait_request %d left %d timeout %d",
+ bfq_bfqq_wait_request(bfqq),
+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
+ bfq_bfqq_budget_timeout(bfqq));
+@@ -3863,11 +3861,11 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ * either boosts the throughput (without issues), or is
+ * necessary to preserve service guarantees.
+ */
+- bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d",
++ bfq_log_bfqq(bfqd, bfqq, "sync %d idling_boosts_thr %d",
+ bfq_bfqq_sync(bfqq), idling_boosts_thr);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "may_idle: wr_busy %d boosts %d IO-bound %d guar %d",
++ "wr_busy %d boosts %d IO-bound %d guar %d",
+ bfqd->wr_busy_queues,
+ idling_boosts_thr_without_issues,
+ bfq_bfqq_IO_bound(bfqq),
+@@ -3907,7 +3905,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ if (!bfqq)
+ goto new_queue;
+
+- bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
++ bfq_log_bfqq(bfqd, bfqq, "already in-service queue");
+
+ if (bfq_may_expire_for_budg_timeout(bfqq) &&
+ !bfq_bfqq_wait_request(bfqq) &&
+@@ -3983,14 +3981,14 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ new_queue:
+ bfqq = bfq_set_in_service_queue(bfqd);
+ if (bfqq) {
+- bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
++ bfq_log_bfqq(bfqd, bfqq, "checking new queue");
+ goto check_queue;
+ }
+ keep_queue:
+ if (bfqq)
+- bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
++ bfq_log_bfqq(bfqd, bfqq, "returned this queue");
+ else
+- bfq_log(bfqd, "select_queue: no queue returned");
++ bfq_log(bfqd, "no queue returned");
+
+ return bfqq;
+ }
+@@ -4043,8 +4041,7 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ /* see comments on max_service_from_wr */
+ bfq_bfqq_end_wr(bfqq);
+ bfq_log_bfqq(bfqd, bfqq,
+- "[%s] too much service",
+- __func__);
++ "too much service");
+ }
+ }
+ /*
+@@ -4122,7 +4119,7 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
+ {
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+
+- bfq_log(bfqd, "has_work, dispatch_non_empty %d busy_queues %d",
++ bfq_log(bfqd, "dispatch_non_empty %d busy_queues %d",
+ !list_empty_careful(&bfqd->dispatch), bfqd->busy_queues > 0);
+
+ /*
+@@ -4146,7 +4143,7 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ rq->rq_flags &= ~RQF_DISP_LIST;
+
+ bfq_log(bfqd,
+- "dispatch requests: picked %p from dispatch list", rq);
++ "picked %p from dispatch list", rq);
+ bfqq = RQ_BFQQ(rq);
+
+ if (bfqq) {
+@@ -4196,7 +4193,7 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ goto start_rq;
+ }
+
+- bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++ bfq_log(bfqd, "%d busy queues", bfqd->busy_queues);
+
+ if (bfqd->busy_queues == 0)
+ goto exit;
+@@ -4236,13 +4233,13 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ rq->rq_flags |= RQF_STARTED;
+ if (bfqq)
+ bfq_log_bfqq(bfqd, bfqq,
+- "dispatched %s request %p, rq_in_driver %d",
++ "%s request %p, rq_in_driver %d",
+ bfq_bfqq_sync(bfqq) ? "sync" : "async",
+ rq,
+ bfqd->rq_in_driver);
+ else
+ bfq_log(bfqd,
+- "dispatched request %p from dispatch list, rq_in_driver %d",
++ "request %p from dispatch list, rq_in_driver %d",
+ rq, bfqd->rq_in_driver);
+ } else
+ bfq_log(bfqd,
+@@ -4339,7 +4336,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ BUG_ON(bfqq->ref <= 0);
+
+ if (bfqq->bfqd)
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p %d", bfqq, bfqq->ref);
+
+ bfqq->ref--;
+ if (bfqq->ref)
+@@ -4383,10 +4380,10 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ }
+
+ if (bfqq->bfqd)
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p freed", bfqq);
+
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] putting blkg and bfqg %p\n", __func__, bfqg);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "putting blkg and bfqg %p\n", bfqg);
+ bfqg_and_blkg_put(bfqg);
+ #endif
+ kmem_cache_free(bfq_pool, bfqq);
+@@ -4418,7 +4415,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_schedule_dispatch(bfqd);
+ }
+
+- bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "%p, %d", bfqq, bfqq->ref);
+
+ bfq_put_cooperator(bfqq);
+
+@@ -4502,7 +4499,7 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
+ bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
+ bfqq->entity.prio_changed = 1;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "set_next_ioprio_data: bic_class %d prio %d class %d",
++ "bic_class %d prio %d class %d",
+ ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class);
+ }
+
+@@ -4529,7 +4526,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
+ bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
+ bic_set_bfqq(bic, bfqq, false);
+ bfq_log_bfqq(bfqd, bfqq,
+- "check_ioprio_change: bfqq %p %d",
++ "bfqq %p %d",
+ bfqq, bfqq->ref);
+ }
+
+@@ -4667,14 +4664,14 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
+ * guarantee that this queue is not freed
+ * until its group goes away.
+ */
+- bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++ bfq_log_bfqq(bfqd, bfqq, "bfqq not in async: %p, %d",
+ bfqq, bfqq->ref);
+ *async_bfqq = bfqq;
+ }
+
+ out:
+ bfqq->ref++; /* get a process reference to this queue */
+- bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "at end: %p, %d", bfqq, bfqq->ref);
+ rcu_read_unlock();
+ return bfqq;
+ }
+@@ -4733,7 +4730,7 @@ static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
+ bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
+ has_short_ttime = false;
+
+- bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
++ bfq_log_bfqq(bfqd, bfqq, "has_short_ttime %d",
+ has_short_ttime);
+
+ if (has_short_ttime)
+@@ -4759,7 +4756,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfq_update_io_seektime(bfqd, bfqq, rq);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "rq_enqueued: has_short_ttime=%d (seeky %d)",
++ "has_short_ttime=%d (seeky %d)",
+ bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
+
+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
+@@ -4818,7 +4815,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+
+ assert_spin_locked(&bfqd->lock);
+
+- bfq_log_bfqq(bfqd, bfqq, "__insert_req: rq %p bfqq %p", rq, bfqq);
++ bfq_log_bfqq(bfqd, bfqq, "rq %p bfqq %p", rq, bfqq);
+
+ /*
+ * An unplug may trigger a requeue of a request from the device
+@@ -4837,9 +4834,9 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ new_bfqq->allocated++;
+ bfqq->allocated--;
+ bfq_log_bfqq(bfqd, bfqq,
+- "insert_request: new allocated %d", bfqq->allocated);
++ "new allocated %d", bfqq->allocated);
+ bfq_log_bfqq(bfqd, new_bfqq,
+- "insert_request: new_bfqq new allocated %d",
++ "new_bfqq new allocated %d",
+ bfqq->allocated);
+
+ new_bfqq->ref++;
+@@ -4911,11 +4908,11 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ rq->rq_flags |= RQF_DISP_LIST;
+ if (bfqq)
+ bfq_log_bfqq(bfqd, bfqq,
+- "insert_request %p in disp: at_head %d",
++ "%p in disp: at_head %d",
+ rq, at_head);
+ else
+ bfq_log(bfqd,
+- "insert_request %p in disp: at_head %d",
++ "%p in disp: at_head %d",
+ rq, at_head);
+ } else {
+ BUG_ON(!(rq->rq_flags & RQF_GOT));
+@@ -5033,7 +5030,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ bfqq->dispatched--;
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "completed_requests: new disp %d, new rq_in_driver %d",
++ "new disp %d, new rq_in_driver %d",
+ bfqq->dispatched, bfqd->rq_in_driver);
+
+ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
+@@ -5061,7 +5058,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
++ "delta %uus/%luus max_size %u rate %llu/%llu",
+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
+ delta_us > 0 ?
+ (USEC_PER_SEC*
+@@ -5129,7 +5126,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
+ static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "put_request_body: allocated %d", bfqq->allocated);
++ "allocated %d", bfqq->allocated);
+ BUG_ON(!bfqq->allocated);
+ bfqq->allocated--;
+
+@@ -5406,10 +5403,10 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
+
+ bfqq->allocated++;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "get_request: new allocated %d", bfqq->allocated);
++ "new allocated %d", bfqq->allocated);
+
+ bfqq->ref++;
+- bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", rq, bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "%p: bfqq %p, %d", rq, bfqq, bfqq->ref);
+
+ rq->elv.priv[0] = bic;
+ rq->elv.priv[1] = bfqq;
+@@ -5493,7 +5490,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
+ idle_slice_timer);
+ struct bfq_queue *bfqq = bfqd->in_service_queue;
+
+- bfq_log(bfqd, "slice_timer expired");
++ bfq_log(bfqd, "expired");
+
+ /*
+ * Theoretical race here: the in-service queue can be NULL or
+@@ -5515,10 +5512,10 @@ static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
+ struct bfq_group *root_group = bfqd->root_group;
+ struct bfq_queue *bfqq = *bfqq_ptr;
+
+- bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++ bfq_log(bfqd, "%p", bfqq);
+ if (bfqq) {
+ bfq_bfqq_move(bfqd, bfqq, root_group);
+- bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++ bfq_log_bfqq(bfqd, bfqq, "putting %p, %d",
+ bfqq, bfqq->ref);
+ bfq_put_queue(bfqq);
+ *bfqq_ptr = NULL;
+@@ -5547,7 +5544,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ struct bfq_data *bfqd = e->elevator_data;
+ struct bfq_queue *bfqq, *n;
+
+- bfq_log(bfqd, "exit_queue: starting ...");
++ bfq_log(bfqd, "starting ...");
+
+ hrtimer_cancel(&bfqd->idle_slice_timer);
+
+@@ -5575,7 +5572,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ spin_unlock_irq(&bfqd->lock);
+ #endif
+
+- bfq_log(bfqd, "exit_queue: finished ...");
++ bfq_log(bfqd, "finished ...");
+ kfree(bfqd);
+ }
+
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index 9a5ce1168ff5..e2ae11bf8f76 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -712,34 +712,34 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+- pr_crit("%s bfq%d%c %s " fmt "\n", \
++ pr_crit("%s bfq%d%c %s [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+ (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- bfqq_group(bfqq)->blkg_path, ##args); \
++ bfqq_group(bfqq)->blkg_path, __func__, ##args); \
+ } while (0)
+
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
+- pr_crit("%s %s " fmt "\n", \
++ pr_crit("%s %s [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+- bfqg->blkg_path, ##args); \
++ bfqg->blkg_path, __func__, ##args); \
+ } while (0)
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
+- pr_crit("%s bfq%d%c " fmt "\n", \
++ pr_crit("%s bfq%d%c [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+ (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- ##args)
++ __func__, ##args)
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
+
+ #endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log(bfqd, fmt, args...) \
+- pr_crit("%s bfq " fmt "\n", \
++ pr_crit("%s bfq [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+- ##args)
++ __func__, ##args)
+
+ #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+
+@@ -762,28 +762,29 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+- blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s [%s] " fmt, \
+ (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- bfqq_group(bfqq)->blkg_path, ##args); \
++ bfqq_group(bfqq)->blkg_path, __func__, ##args); \
+ } while (0)
+
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
+- blk_add_trace_msg((bfqd)->queue, "%s " fmt, bfqg->blkg_path, ##args);\
++ blk_add_trace_msg((bfqd)->queue, "%s [%s] " fmt, bfqg->blkg_path, \
++ __func__, ##args);\
+ } while (0)
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
+- blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c [%s] " fmt, (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- ##args)
++ __func__, ##args)
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
+
+ #endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log(bfqd, fmt, args...) \
+- blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++ blk_add_trace_msg((bfqd)->queue, "bfq [%s] " fmt, __func__, ##args)
+
+ #endif /* CONFIG_BLK_DEV_IO_TRACE */
+ #endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+@@ -938,7 +939,7 @@ bfq_entity_service_tree(struct bfq_entity *entity)
+
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "entity_service_tree %p %d",
++ "%p %d",
+ sched_data->service_tree + idx, idx);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+@@ -946,7 +947,7 @@ bfq_entity_service_tree(struct bfq_entity *entity)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "entity_service_tree %p %d",
++ "%p %d",
+ sched_data->service_tree + idx, idx);
+ }
+ #endif
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 4e6c5232e2fb..ead34c30a7c2 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -119,7 +119,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "update_next_in_service: chose without lookup");
++ "chose without lookup");
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+@@ -127,7 +127,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data*)bfqg->bfqd, bfqg,
+- "update_next_in_service: chose without lookup");
++ "chose without lookup");
+ }
+ #endif
+ }
+@@ -148,7 +148,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ bfqq = bfq_entity_to_bfqq(next_in_service);
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "update_next_in_service: chosen this queue");
++ "chosen this queue");
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+ struct bfq_group *bfqg =
+@@ -156,7 +156,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
+ struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "update_next_in_service: chosen this entity");
++ "chosen this entity");
+ }
+ #endif
+ return parent_sched_may_change;
+@@ -331,10 +331,10 @@ static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
+
+ if (bfqq) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "calc_finish: serv %lu, w %d",
++ "serv %lu, w %d",
+ service, entity->weight);
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "calc_finish: start %llu, finish %llu, delta %llu",
++ "start %llu, finish %llu, delta %llu",
+ start, finish, delta);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+@@ -342,10 +342,10 @@ static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "calc_finish group: serv %lu, w %d",
++ "group: serv %lu, w %d",
+ service, entity->weight);
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "calc_finish group: start %llu, finish %llu, delta %llu",
++ "group: start %llu, finish %llu, delta %llu",
+ start, finish, delta);
+ #endif
+ }
+@@ -484,7 +484,7 @@ static void bfq_update_active_node(struct rb_node *node)
+
+ if (bfqq) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "update_active_node: new min_start %llu",
++ "new min_start %llu",
+ ((entity->min_start>>10)*1000)>>12);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+@@ -492,7 +492,7 @@ static void bfq_update_active_node(struct rb_node *node)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "update_active_node: new min_start %llu",
++ "new min_start %llu",
+ ((entity->min_start>>10)*1000)>>12);
+ #endif
+ }
+@@ -620,7 +620,7 @@ static void bfq_get_entity(struct bfq_entity *entity)
+
+ if (bfqq) {
+ bfqq->ref++;
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p %d",
+ bfqq, bfqq->ref);
+ }
+ }
+@@ -748,7 +748,7 @@ static void bfq_forget_entity(struct bfq_service_tree *st,
+ entity->on_st = false;
+ st->wsum -= entity->weight;
+ if (bfqq && !is_in_service) {
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity (before): %p %d",
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "(before): %p %d",
+ bfqq, bfqq->ref);
+ bfq_put_queue(bfqq);
+ }
+@@ -1008,7 +1008,7 @@ static void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ tot_serv_to_charge = entity->service;
+
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "charge_time: %lu/%u ms, %d/%d/%d sectors",
++ "%lu/%u ms, %d/%d/%d sectors",
+ time_ms, timeout_ms, entity->service,
+ tot_serv_to_charge, entity->budget);
+
+@@ -1080,7 +1080,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+
+ if (bfqq) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "update_fin_time_enqueue: new queue finish %llu",
++ "new queue finish %llu",
+ ((entity->finish>>10)*1000)>>12);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+@@ -1088,7 +1088,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "update_fin_time_enqueue: new group finish %llu",
++ "new group finish %llu",
+ ((entity->finish>>10)*1000)>>12);
+ #endif
+ }
+@@ -1098,7 +1098,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+
+ if (bfqq) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "update_fin_time_enqueue: queue %seligible in st %p",
++ "queue %seligible in st %p",
+ entity->start <= st->vtime ? "" : "non ", st);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ } else {
+@@ -1106,7 +1106,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "update_fin_time_enqueue: group %seligible in st %p",
++ "group %seligible in st %p",
+ entity->start <= st->vtime ? "" : "non ", st);
+ #endif
+ }
+@@ -1550,7 +1550,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
+
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "calc_vtime_jump: new value %llu",
++ "new value %llu",
+ ((root_entity->min_start>>10)*1000)>>12);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+@@ -1559,7 +1559,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
+ entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "calc_vtime_jump: new value %llu",
++ "new value %llu",
+ ((root_entity->min_start>>10)*1000)>>12);
+ }
+ #endif
+@@ -1677,7 +1677,7 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
+ bfqq = bfq_entity_to_bfqq(entity);
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "__lookup_next: start %llu vtime %llu st %p",
++ "start %llu vtime %llu st %p",
+ ((entity->start>>10)*1000)>>12,
+ ((new_vtime>>10)*1000)>>12, st);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+@@ -1686,7 +1686,7 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "__lookup_next: start %llu vtime %llu (%llu) st %p",
++ "start %llu vtime %llu (%llu) st %p",
+ ((entity->start>>10)*1000)>>12,
+ ((st->vtime>>10)*1000)>>12,
+ ((new_vtime>>10)*1000)>>12, st);
+@@ -1821,14 +1821,14 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg(bfqd, bfqg,
+- "get_next_queue: lookup in this group");
++ "lookup in this group");
+ if (!sd->next_in_service)
+- pr_crit("get_next_queue: lookup in this group");
++ pr_crit("lookup in this group");
+ } else {
+ bfq_log_bfqg(bfqd, bfqd->root_group,
+- "get_next_queue: lookup in root group");
++ "lookup in root group");
+ if (!sd->next_in_service)
+- pr_crit("get_next_queue: lookup in root group");
++ pr_crit("lookup in root group");
+ }
+ #endif
+
+@@ -1903,7 +1903,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ bfqq = bfq_entity_to_bfqq(entity);
+ if (bfqq)
+ bfq_log_bfqq(bfqd, bfqq,
+- "get_next_queue: this queue, finish %llu",
++ "this queue, finish %llu",
+ (((entity->finish>>10)*1000)>>10)>>2);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+@@ -1911,7 +1911,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg(bfqd, bfqg,
+- "get_next_queue: this entity, finish %llu",
++ "this entity, finish %llu",
+ (((entity->finish>>10)*1000)>>10)>>2);
+ }
+ #endif
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index c4df156b1fb4..e49e8ac882b3 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -281,7 +281,7 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd);
+ static void bfq_schedule_dispatch(struct bfq_data *bfqd)
+ {
+ if (bfqd->queued != 0) {
+- bfq_log(bfqd, "schedule dispatch");
++ bfq_log(bfqd, "");
+ kblockd_schedule_work(&bfqd->unplug_work);
+ }
+ }
+@@ -414,7 +414,7 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
+ if (rb_link)
+ *rb_link = p;
+
+- bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++ bfq_log(bfqd, "%llu: returning %d",
+ (unsigned long long) sector,
+ bfqq ? bfqq->pid : 0);
+
+@@ -635,7 +635,7 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
+ if (rq == last || ktime_get_ns() < rq->fifo_time)
+ return NULL;
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "returned %p", rq);
+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
+ return rq;
+ }
+@@ -728,7 +728,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd,
+ bfq_serv_to_charge(next_rq, bfqq));
+ if (entity->budget != new_budget) {
+ entity->budget = new_budget;
+- bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
++ bfq_log_bfqq(bfqd, bfqq, "new budget %lu",
+ new_budget);
+ bfq_requeue_bfqq(bfqd, bfqq, false);
+ }
+@@ -800,8 +800,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish));
+
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu",
+- __func__,
++ "bic %p wr_coeff %d start_finish %lu max_time %lu",
+ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish,
+ bfqq->wr_cur_max_time);
+
+@@ -814,11 +813,11 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+ bfq_wr_duration(bfqd))) {
+ switch_back_to_interactive_wr(bfqq, bfqd);
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "resume state: switching back to interactive");
++ "switching back to interactive");
+ } else {
+ bfqq->wr_coeff = 1;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "resume state: switching off wr (%lu + %lu < %lu)",
++ "switching off wr (%lu + %lu < %lu)",
+ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time,
+ jiffies);
+ }
+@@ -870,7 +869,7 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ /* Increment burst size to take into account also bfqq */
+ bfqd->burst_size++;
+
+- bfq_log_bfqq(bfqd, bfqq, "add_to_burst %d", bfqd->burst_size);
++ bfq_log_bfqq(bfqd, bfqq, "%d", bfqd->burst_size);
+
+ BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh);
+
+@@ -883,7 +882,7 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ * other to consider this burst as large.
+ */
+ bfqd->large_burst = true;
+- bfq_log_bfqq(bfqd, bfqq, "add_to_burst: large burst started");
++ bfq_log_bfqq(bfqd, bfqq, "large burst started");
+
+ /*
+ * We can now mark all queues in the burst list as
+@@ -1055,7 +1054,7 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfqd->large_burst = false;
+ bfq_reset_burst_list(bfqd, bfqq);
+ bfq_log_bfqq(bfqd, bfqq,
+- "handle_burst: late activation or different group");
++ "late activation or different group");
+ goto end;
+ }
+
+@@ -1065,7 +1064,7 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ * bfqq as belonging to this large burst immediately.
+ */
+ if (bfqd->large_burst) {
+- bfq_log_bfqq(bfqd, bfqq, "handle_burst: marked in burst");
++ bfq_log_bfqq(bfqd, bfqq, "marked in burst");
+ bfq_mark_bfqq_in_large_burst(bfqq);
+ goto end;
+ }
+@@ -1572,7 +1571,7 @@ static void bfq_add_request(struct request *rq)
+ unsigned int old_wr_coeff = bfqq->wr_coeff;
+ bool interactive = false;
+
+- bfq_log_bfqq(bfqd, bfqq, "add_request: size %u %s",
++ bfq_log_bfqq(bfqd, bfqq, "size %u %s",
+ blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A");
+
+ if (bfqq->wr_coeff > 1) /* queue is being weight-raised */
+@@ -1870,10 +1869,10 @@ static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
+ */
+ bfqq->entity.prio_changed = 1;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "end_wr: wrais ending at %lu, rais_max_time %u",
++ "wrais ending at %lu, rais_max_time %u",
+ bfqq->last_wr_start_finish,
+ jiffies_to_msecs(bfqq->wr_cur_max_time));
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "end_wr: wr_busy %d",
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "wr_busy %d",
+ bfqq->bfqd->wr_busy_queues);
+ }
+
+@@ -2048,8 +2047,8 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
+ {
+ if (bfq_too_late_for_merging(new_bfqq)) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "[%s] too late for bfq%d to be merged",
+- __func__, new_bfqq->pid);
++ "too late for bfq%d to be merged",
++ new_bfqq->pid);
+ return false;
+ }
+
+@@ -2258,7 +2257,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+
+ }
+
+- bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
++ bfq_log_bfqq(bfqd, new_bfqq, "wr_busy %d",
+ bfqd->wr_busy_queues);
+
+ /*
+@@ -2359,7 +2358,7 @@ static void bfq_set_budget_timeout(struct bfq_data *bfqd,
+ bfqq->budget_timeout = jiffies +
+ bfqd->bfq_timeout * timeout_coeff;
+
+- bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++ bfq_log_bfqq(bfqd, bfqq, "%u",
+ jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff));
+ }
+
+@@ -2427,10 +2426,10 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
+
+ bfq_set_budget_timeout(bfqd, bfqq);
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_in_service_queue, cur-budget = %d",
++ "cur-budget = %d",
+ bfqq->entity.budget);
+ } else
+- bfq_log(bfqd, "set_in_service_queue: NULL");
++ bfq_log(bfqd, "NULL");
+
+ bfqd->in_service_queue = bfqq;
+ }
+@@ -2559,7 +2558,7 @@ static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq
+ bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
+
+ bfq_log(bfqd,
+- "reset_rate_computation at end, sample %u/%u tot_sects %llu",
++ "at end, sample %u/%u tot_sects %llu",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ bfqd->tot_sectors_dispatched);
+ }
+@@ -2579,7 +2578,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
+ bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) {
+ bfq_log(bfqd,
+- "update_rate_reset: only resetting, delta_first %lluus samples %d",
++ "only resetting, delta_first %lluus samples %d",
+ bfqd->delta_from_first>>10, bfqd->peak_rate_samples);
+ goto reset_computation;
+ }
+@@ -2603,7 +2602,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
+
+ bfq_log(bfqd,
+-"update_rate_reset: tot_sects %llu delta_first %lluus rate %llu sects/s (%d)",
++"tot_sects %llu delta_first %lluus rate %llu sects/s (%d)",
+ bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10,
+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
+ rate > 20<<BFQ_RATE_SHIFT);
+@@ -2618,14 +2617,14 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ rate <= bfqd->peak_rate) ||
+ rate > 20<<BFQ_RATE_SHIFT) {
+ bfq_log(bfqd,
+- "update_rate_reset: goto reset, samples %u/%u rate/peak %llu/%llu",
++ "goto reset, samples %u/%u rate/peak %llu/%llu",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
+ goto reset_computation;
+ } else {
+ bfq_log(bfqd,
+- "update_rate_reset: do update, samples %u/%u rate/peak %llu/%llu",
++ "do update, samples %u/%u rate/peak %llu/%llu",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT),
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
+@@ -2681,7 +2680,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ rate /= divisor; /* smoothing constant alpha = 1/divisor */
+
+ bfq_log(bfqd,
+- "update_rate_reset: divisor %d tmp_peak_rate %llu tmp_rate %u",
++ "divisor %d tmp_peak_rate %llu tmp_rate %u",
+ divisor,
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT),
+ (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT));
+@@ -2735,7 +2734,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+
+ if (bfqd->peak_rate_samples == 0) { /* first dispatch */
+ bfq_log(bfqd,
+- "update_peak_rate: goto reset, samples %d",
++ "goto reset, samples %d",
+ bfqd->peak_rate_samples) ;
+ bfq_reset_rate_computation(bfqd, rq);
+ goto update_last_values; /* will add one sample */
+@@ -2756,7 +2755,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
+ bfqd->rq_in_driver == 0) {
+ bfq_log(bfqd,
+-"update_peak_rate: jumping to updating&resetting delta_last %lluus samples %d",
++"jumping to updating&resetting delta_last %lluus samples %d",
+ (now_ns - bfqd->last_dispatch)>>10,
+ bfqd->peak_rate_samples) ;
+ goto update_rate_and_reset;
+@@ -2782,7 +2781,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
+
+ bfq_log(bfqd,
+- "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus",
++ "added samples %u/%u tot_sects %llu delta_first %lluus",
+ bfqd->peak_rate_samples, bfqd->sequential_samples,
+ bfqd->tot_sectors_dispatched,
+ bfqd->delta_from_first>>10);
+@@ -2798,12 +2797,12 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
+ bfqd->last_dispatch = now_ns;
+
+ bfq_log(bfqd,
+- "update_peak_rate: delta_first %lluus last_pos %llu peak_rate %llu",
++ "delta_first %lluus last_pos %llu peak_rate %llu",
+ (now_ns - bfqd->first_dispatch)>>10,
+ (unsigned long long) bfqd->last_position,
+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT));
+ bfq_log(bfqd,
+- "update_peak_rate: samples at end %d", bfqd->peak_rate_samples);
++ "samples at end %d", bfqd->peak_rate_samples);
+ }
+
+ /*
+@@ -2900,11 +2899,11 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
+ */
+ budget = 2 * min_budget;
+
+- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
++ bfq_log_bfqq(bfqd, bfqq, "last budg %d, budg left %d",
+ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
+- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
++ bfq_log_bfqq(bfqd, bfqq, "last max_budg %d, min budg %d",
+ budget, bfq_min_budget(bfqd));
+- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++ bfq_log_bfqq(bfqd, bfqq, "sync %d, seeky %d",
+ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
+
+ if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
+@@ -3106,7 +3105,7 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ else /* charge at least one seek */
+ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
+
+- bfq_log(bfqd, "bfq_bfqq_is_slow: too short %u", delta_usecs);
++ bfq_log(bfqd, "too short %u", delta_usecs);
+
+ return slow;
+ }
+@@ -3129,11 +3128,11 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * peak rate.
+ */
+ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
+- bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d",
++ bfq_log(bfqd, "relative rate %d/%d",
+ bfqq->entity.service, bfqd->bfq_max_budget);
+ }
+
+- bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
++ bfq_log_bfqq(bfqd, bfqq, "slow %d", slow);
+
+ return slow;
+ }
+@@ -3235,7 +3234,7 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqd, bfqq,
+-"softrt_next_start: service_blkg %lu soft_rate %u sects/sec interval %u",
++"service_blkg %lu soft_rate %u sects/sec interval %u",
+ bfqq->service_from_backlogged,
+ bfqd->bfq_wr_max_softrt_rate,
+ jiffies_to_msecs(HZ * bfqq->service_from_backlogged /
+@@ -3414,7 +3413,7 @@ static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
+ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "may_budget_timeout: wait_request %d left %d timeout %d",
++ "wait_request %d left %d timeout %d",
+ bfq_bfqq_wait_request(bfqq),
+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
+ bfq_bfqq_budget_timeout(bfqq));
+@@ -3675,11 +3674,11 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
+ * either boosts the throughput (without issues), or is
+ * necessary to preserve service guarantees.
+ */
+- bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d",
++ bfq_log_bfqq(bfqd, bfqq, "sync %d idling_boosts_thr %d",
+ bfq_bfqq_sync(bfqq), idling_boosts_thr);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "may_idle: wr_busy %d boosts %d IO-bound %d guar %d",
++ "wr_busy %d boosts %d IO-bound %d guar %d",
+ bfqd->wr_busy_queues,
+ idling_boosts_thr_without_issues,
+ bfq_bfqq_IO_bound(bfqq),
+@@ -3719,7 +3718,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ if (!bfqq)
+ goto new_queue;
+
+- bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
++ bfq_log_bfqq(bfqd, bfqq, "already in-service queue");
+
+ if (bfq_may_expire_for_budg_timeout(bfqq) &&
+ !hrtimer_active(&bfqd->idle_slice_timer) &&
+@@ -3797,14 +3796,14 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ new_queue:
+ bfqq = bfq_set_in_service_queue(bfqd);
+ if (bfqq) {
+- bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
++ bfq_log_bfqq(bfqd, bfqq, "checking new queue");
+ goto check_queue;
+ }
+ keep_queue:
+ if (bfqq)
+- bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
++ bfq_log_bfqq(bfqd, bfqq, "returned this queue");
+ else
+- bfq_log(bfqd, "select_queue: no queue returned");
++ bfq_log(bfqd, "no queue returned");
+
+ return bfqq;
+ }
+@@ -3857,8 +3856,7 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ /* see comments on max_service_from_wr */
+ bfq_bfqq_end_wr(bfqq);
+ bfq_log_bfqq(bfqd, bfqq,
+- "[%s] too much service",
+- __func__);
++ "too much service");
+ }
+ }
+ /*
+@@ -3987,7 +3985,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_queue *bfqq;
+
+- bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++ bfq_log(bfqd, "%d busy queues", bfqd->busy_queues);
+
+ if (bfqd->busy_queues == 0)
+ return 0;
+@@ -4021,7 +4019,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force)
+ if (!bfq_dispatch_request(bfqd, bfqq))
+ return 0;
+
+- bfq_log_bfqq(bfqd, bfqq, "dispatched %s request",
++ bfq_log_bfqq(bfqd, bfqq, "%s request",
+ bfq_bfqq_sync(bfqq) ? "sync" : "async");
+
+ BUG_ON(bfqq->next_rq == NULL &&
+@@ -4044,7 +4042,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+
+ BUG_ON(bfqq->ref <= 0);
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p %d", bfqq, bfqq->ref);
+ bfqq->ref--;
+ if (bfqq->ref)
+ return;
+@@ -4086,7 +4084,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
+ bfqq->bfqd->burst_size--;
+ }
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p freed", bfqq);
+
+ kmem_cache_free(bfq_pool, bfqq);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+@@ -4120,7 +4118,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_schedule_dispatch(bfqd);
+ }
+
+- bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "%p, %d", bfqq, bfqq->ref);
+
+ bfq_put_cooperator(bfqq);
+
+@@ -4200,7 +4198,7 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq,
+ bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
+ bfqq->entity.prio_changed = 1;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "set_next_ioprio_data: bic_class %d prio %d class %d",
++ "bic_class %d prio %d class %d",
+ ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class);
+ }
+
+@@ -4227,7 +4225,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
+ bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
+ bic_set_bfqq(bic, bfqq, false);
+ bfq_log_bfqq(bfqd, bfqq,
+- "check_ioprio_change: bfqq %p %d",
++ "bfqq %p %d",
+ bfqq, bfqq->ref);
+ }
+
+@@ -4362,14 +4360,14 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
+ * guarantee that this queue is not freed
+ * until its group goes away.
+ */
+- bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++ bfq_log_bfqq(bfqd, bfqq, "bfqq not in async: %p, %d",
+ bfqq, bfqq->ref);
+ *async_bfqq = bfqq;
+ }
+
+ out:
+ bfqq->ref++; /* get a process reference to this queue */
+- bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "at end: %p, %d", bfqq, bfqq->ref);
+ rcu_read_unlock();
+ return bfqq;
+ }
+@@ -4428,7 +4426,7 @@ static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
+ bic->ttime.ttime_mean > bfqd->bfq_slice_idle))
+ has_short_ttime = false;
+
+- bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
++ bfq_log_bfqq(bfqd, bfqq, "has_short_ttime %d",
+ has_short_ttime);
+
+ if (has_short_ttime)
+@@ -4454,7 +4452,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ bfq_update_io_seektime(bfqd, bfqq, rq);
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "rq_enqueued: has_short_ttime=%d (seeky %d)",
++ "has_short_ttime=%d (seeky %d)",
+ bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
+
+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
+@@ -4629,7 +4627,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq)
+ */
+ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
+
+- bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu",
++ bfq_log(bfqd, "delta %uus/%luus max_size %u rate %llu/%llu",
+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size,
+ delta_us > 0 ?
+ (USEC_PER_SEC*
+@@ -4750,7 +4748,7 @@ static void bfq_put_request(struct request *rq)
+ rq->elv.priv[0] = NULL;
+ rq->elv.priv[1] = NULL;
+
+- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p, %d",
+ bfqq, bfqq->ref);
+ bfq_put_queue(bfqq);
+ }
+@@ -4816,7 +4814,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ bic_set_bfqq(bic, bfqq, is_sync);
+ if (split && is_sync) {
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_request: was_in_list %d "
++ "was_in_list %d "
+ "was_in_large_burst %d "
+ "large burst in progress %d",
+ bic->was_in_burst_list,
+@@ -4826,12 +4824,12 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ if ((bic->was_in_burst_list && bfqd->large_burst) ||
+ bic->saved_in_large_burst) {
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_request: marking in "
++ "marking in "
+ "large burst");
+ bfq_mark_bfqq_in_large_burst(bfqq);
+ } else {
+ bfq_log_bfqq(bfqd, bfqq,
+- "set_request: clearing in "
++ "clearing in "
+ "large burst");
+ bfq_clear_bfqq_in_large_burst(bfqq);
+ if (bic->was_in_burst_list)
+@@ -4888,7 +4886,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+
+ bfqq->allocated[rw]++;
+ bfqq->ref++;
+- bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "bfqq %p, %d", bfqq, bfqq->ref);
+
+ rq->elv.priv[0] = bic;
+ rq->elv.priv[1] = bfqq;
+@@ -4962,7 +4960,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
+ * case we just expire a queue too early.
+ */
+ if (bfqq) {
+- bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
++ bfq_log_bfqq(bfqd, bfqq, "expired");
+ bfq_clear_bfqq_wait_request(bfqq);
+
+ if (bfq_bfqq_budget_timeout(bfqq))
+@@ -5005,10 +5003,10 @@ static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
+ struct bfq_group *root_group = bfqd->root_group;
+ struct bfq_queue *bfqq = *bfqq_ptr;
+
+- bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++ bfq_log(bfqd, "%p", bfqq);
+ if (bfqq) {
+ bfq_bfqq_move(bfqd, bfqq, root_group);
+- bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++ bfq_log_bfqq(bfqd, bfqq, "putting %p, %d",
+ bfqq, bfqq->ref);
+ bfq_put_queue(bfqq);
+ *bfqq_ptr = NULL;
+diff --git a/block/bfq.h b/block/bfq.h
+index 0cd7a3f251a7..4d2fe7f77af1 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -698,37 +698,37 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ \
+ assert_spin_locked((bfqd)->queue->queue_lock); \
+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
+- pr_crit("%s bfq%d%c %s " fmt "\n", \
++ pr_crit("%s bfq%d%c %s [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+ (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- __pbuf, ##args); \
++ __pbuf, __func__, ##args); \
+ } while (0)
+
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
+- pr_crit("%s %s " fmt "\n", \
++ pr_crit("%s %s [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+- __pbuf, ##args); \
++ __pbuf, __func__, ##args); \
+ } while (0)
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
+- pr_crit("%s bfq%d%c " fmt "\n", \
++ pr_crit("%s bfq%d%c [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+ (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- ##args)
++ __func__, ##args)
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
+
+ #endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log(bfqd, fmt, args...) \
+- pr_crit("%s bfq " fmt "\n", \
++ pr_crit("%s bfq [%s] " fmt "\n", \
+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \
+- ##args)
++ __func__, ##args)
+
+ #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+
+@@ -755,31 +755,32 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ \
+ assert_spin_locked((bfqd)->queue->queue_lock); \
+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
+- blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s [%s] " fmt, \
+ (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- __pbuf, ##args); \
++ __pbuf, __func__, ##args); \
+ } while (0)
+
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
+- blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
++ blk_add_trace_msg((bfqd)->queue, "%s [%s] " fmt, __pbuf, \
++ __func__, ##args); \
+ } while (0)
+
+ #else /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
+- blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d%c [%s] " fmt, (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
+- ##args)
++ __func__, ##args)
+ #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
+
+ #endif /* BFQ_GROUP_IOSCHED_ENABLED */
+
+ #define bfq_log(bfqd, fmt, args...) \
+- blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++ blk_add_trace_msg((bfqd)->queue, "bfq [%s] " fmt, __func__, ##args)
+
+ #endif /* CONFIG_BLK_DEV_IO_TRACE */
+ #endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */
+@@ -928,7 +929,7 @@ bfq_entity_service_tree(struct bfq_entity *entity)
+
+ if (bfqq)
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+- "entity_service_tree %p %d",
++ "%p %d",
+ sched_data->service_tree + idx, idx);
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ else {
+@@ -936,7 +937,7 @@ bfq_entity_service_tree(struct bfq_entity *entity)
+ container_of(entity, struct bfq_group, entity);
+
+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg,
+- "entity_service_tree %p %d",
++ "%p %d",
+ sched_data->service_tree + idx, idx);
+ }
+ #endif
+
+From 673a457e8a54d1c4b66e61b1a50956ba0b8c6a60 Mon Sep 17 00:00:00 2001
+From: Davide Paganelli <paga.david@gmail.com>
+Date: Thu, 8 Feb 2018 11:49:58 +0100
+Subject: [PATCH 19/23] block, bfq-mq, bfq-sq: make bfq_bfqq_expire print
+ expiration reason
+
+Improve readability of the log messages related to the expiration
+reasons of the function bfq_bfqq_expire.
+Change the printing of the number that represents the reason for
+expiration with an actual textual description of the reason.
+
+Signed-off-by: Davide Paganelli <paga.david@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 10 ++++++++--
+ block/bfq-sq-iosched.c | 10 ++++++++--
+ 2 files changed, 16 insertions(+), 4 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index edc93b6af186..9268dd47a4e5 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -133,6 +133,12 @@ static const int bfq_timeout = (HZ / 8);
+ */
+ static const unsigned long bfq_merge_time_limit = HZ/10;
+
++#define MAX_LENGTH_REASON_NAME 25
++
++static const char reason_name[][MAX_LENGTH_REASON_NAME] = {"TOO_IDLE",
++"BUDGET_TIMEOUT", "BUDGET_EXHAUSTED", "NO_MORE_REQUESTS",
++"PREEMPTED"};
++
+ static struct kmem_cache *bfq_pool;
+
+ /* Below this threshold (in ns), we consider thinktime immediate. */
+@@ -3553,8 +3559,8 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
+ }
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "expire (%d, slow %d, num_disp %d, short_ttime %d, weight %d)",
+- reason, slow, bfqq->dispatched,
++ "expire (%s, slow %d, num_disp %d, short_ttime %d, weight %d)",
++ reason_name[reason], slow, bfqq->dispatched,
+ bfq_bfqq_has_short_ttime(bfqq), entity->weight);
+
+ /*
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index e49e8ac882b3..f95deaab49a1 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -127,6 +127,12 @@ static const int bfq_timeout = (HZ / 8);
+ */
+ static const unsigned long bfq_merge_time_limit = HZ/10;
+
++#define MAX_LENGTH_REASON_NAME 25
++
++static const char reason_name[][MAX_LENGTH_REASON_NAME] = {"TOO_IDLE",
++"BUDGET_TIMEOUT", "BUDGET_EXHAUSTED", "NO_MORE_REQUESTS",
++"PREEMPTED"};
++
+ static struct kmem_cache *bfq_pool;
+
+ /* Below this threshold (in ns), we consider thinktime immediate. */
+@@ -3366,8 +3372,8 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd,
+ }
+
+ bfq_log_bfqq(bfqd, bfqq,
+- "expire (%d, slow %d, num_disp %d, short_ttime %d, weight %d)",
+- reason, slow, bfqq->dispatched,
++ "expire (%s, slow %d, num_disp %d, short_ttime %d, weight %d)",
++ reason_name[reason], slow, bfqq->dispatched,
+ bfq_bfqq_has_short_ttime(bfqq), entity->weight);
+
+ /*
+
+From 62e80623fbb58367c3f667dab22fea0804001f3b Mon Sep 17 00:00:00 2001
+From: Melzani Alessandro <melzani.alessandro@gmail.com>
+Date: Mon, 26 Feb 2018 22:21:59 +0100
+Subject: [PATCH 20/23] bfq-mq: port of "block, bfq: remove batches of
+ confusing ifdefs"
+
+Commit a33801e8b473 ("block, bfq: move debug blkio stats behind
+CONFIG_DEBUG_BLK_CGROUP") introduced two batches of confusing ifdefs:
+one reported in [1], plus a similar one in another function. This
+commit removes both batches, in the way suggested in [1].
+
+[1] https://www.spinics.net/lists/linux-block/msg20043.html
+
+Fixes: a33801e8b473 ("block, bfq: move debug blkio stats behind CONFIG_DEBUG_BLK_CGROUP")
+
+Signed-off-by: Alessandro Melzani <melzani.alessandro@gmail.com>
+---
+ block/bfq-mq-iosched.c | 128 ++++++++++++++++++++++++++++---------------------
+ 1 file changed, 73 insertions(+), 55 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 9268dd47a4e5..5a211620f316 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -4256,35 +4256,17 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ return rq;
+ }
+
+-static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+-{
+- struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+- struct request *rq;
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+- struct bfq_queue *in_serv_queue, *bfqq;
+- bool waiting_rq, idle_timer_disabled;
+-#endif
+
+- spin_lock_irq(&bfqd->lock);
+-
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+- in_serv_queue = bfqd->in_service_queue;
+- waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
+-
+- rq = __bfq_dispatch_request(hctx);
+-
+- idle_timer_disabled =
+- waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
+-
+-#else
+- rq = __bfq_dispatch_request(hctx);
+-#endif
+- spin_unlock_irq(&bfqd->lock);
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
++static void bfq_update_dispatch_stats(struct request_queue *q,
++ struct request *rq,
++ struct bfq_queue *in_serv_queue,
++ bool idle_timer_disabled)
++{
++ struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
+
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+- bfqq = rq ? RQ_BFQQ(rq) : NULL;
+ if (!idle_timer_disabled && !bfqq)
+- return rq;
++ return;
+
+ /*
+ * rq and bfqq are guaranteed to exist until this function
+@@ -4299,7 +4281,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ * In addition, the following queue lock guarantees that
+ * bfqq_group(bfqq) exists as well.
+ */
+- spin_lock_irq(hctx->queue->queue_lock);
++ spin_lock_irq(q->queue_lock);
+ if (idle_timer_disabled)
+ /*
+ * Since the idle timer has been disabled,
+@@ -4318,8 +4300,35 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ bfqg_stats_set_start_empty_time(bfqg);
+ bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
+ }
+- spin_unlock_irq(hctx->queue->queue_lock);
++ spin_unlock_irq(q->queue_lock);
++}
++#else
++static inline void bfq_update_dispatch_stats(struct request_queue *q,
++ struct request *rq,
++ struct bfq_queue *in_serv_queue,
++ bool idle_timer_disabled) {}
+ #endif
++static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
++{
++ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
++ struct request *rq;
++ struct bfq_queue *in_serv_queue;
++ bool waiting_rq, idle_timer_disabled;
++
++ spin_lock_irq(&bfqd->lock);
++
++ in_serv_queue = bfqd->in_service_queue;
++ waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
++
++ rq = __bfq_dispatch_request(hctx);
++
++ idle_timer_disabled =
++ waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
++
++ spin_unlock_irq(&bfqd->lock);
++
++ bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue,
++ idle_timer_disabled);
+
+ return rq;
+ }
+@@ -4881,6 +4890,38 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ return idle_timer_disabled;
+ }
+
++#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
++static void bfq_update_insert_stats(struct request_queue *q,
++ struct bfq_queue *bfqq,
++ bool idle_timer_disabled,
++ unsigned int cmd_flags)
++{
++ if (!bfqq)
++ return;
++
++ /*
++ * bfqq still exists, because it can disappear only after
++ * either it is merged with another queue, or the process it
++ * is associated with exits. But both actions must be taken by
++ * the same process currently executing this flow of
++ * instructions.
++ *
++ * In addition, the following queue lock guarantees that
++ * bfqq_group(bfqq) exists as well.
++ */
++ spin_lock_irq(q->queue_lock);
++ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
++ if (idle_timer_disabled)
++ bfqg_stats_update_idle_time(bfqq_group(bfqq));
++ spin_unlock_irq(q->queue_lock);
++}
++#else
++static inline void bfq_update_insert_stats(struct request_queue *q,
++ struct bfq_queue *bfqq,
++ bool idle_timer_disabled,
++ unsigned int cmd_flags) {}
++#endif
++
+ static void bfq_prepare_request(struct request *rq, struct bio *bio);
+
+ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+@@ -4889,10 +4930,8 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ struct request_queue *q = hctx->queue;
+ struct bfq_data *bfqd = q->elevator->elevator_data;
+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ bool idle_timer_disabled = false;
+ unsigned int cmd_flags;
+-#endif
+
+ spin_lock_irq(&bfqd->lock);
+ if (blk_mq_sched_try_insert_merge(q, rq)) {
+@@ -4938,7 +4977,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ bfqq = RQ_BFQQ(rq);
+ }
+
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ idle_timer_disabled = __bfq_insert_request(bfqd, rq);
+ /*
+ * Update bfqq, because, if a queue merge has occurred
+@@ -4946,9 +4984,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ * redirected into a new queue.
+ */
+ bfqq = RQ_BFQQ(rq);
+-#else
+- __bfq_insert_request(bfqd, rq);
+-#endif
+
+ if (rq_mergeable(rq)) {
+ elv_rqhash_add(q, rq);
+@@ -4956,34 +4991,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ q->last_merge = rq;
+ }
+ }
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
++
+ /*
+ * Cache cmd_flags before releasing scheduler lock, because rq
+ * may disappear afterwards (for example, because of a request
+ * merge).
+ */
+ cmd_flags = rq->cmd_flags;
+-#endif
++
+ spin_unlock_irq(&bfqd->lock);
+-#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+- if (!bfqq)
+- return;
+- /*
+- * bfqq still exists, because it can disappear only after
+- * either it is merged with another queue, or the process it
+- * is associated with exits. But both actions must be taken by
+- * the same process currently executing this flow of
+- * instruction.
+- *
+- * In addition, the following queue lock guarantees that
+- * bfqq_group(bfqq) exists as well.
+- */
+- spin_lock_irq(q->queue_lock);
+- bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
+- if (idle_timer_disabled)
+- bfqg_stats_update_idle_time(bfqq_group(bfqq));
+- spin_unlock_irq(q->queue_lock);
+-#endif
++ bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
++ cmd_flags);
+ }
+
+ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
+
+From 0d0d05632872b226f4fae5e56af8736a4c24bf57 Mon Sep 17 00:00:00 2001
+From: Melzani Alessandro <melzani.alessandro@gmail.com>
+Date: Mon, 26 Feb 2018 22:43:30 +0100
+Subject: [PATCH 21/23] bfq-sq, bfq-mq: port of "bfq: Use icq_to_bic()
+ consistently"
+
+Some code uses icq_to_bic() to convert an io_cq pointer to a
+bfq_io_cq pointer while other code uses a direct cast. Convert
+the code that uses a direct cast such that it uses icq_to_bic().
+
+Signed-off-by: Alessandro Melzani <melzani.alessandro@gmail.com>
+---
+ block/bfq-mq-iosched.c | 2 +-
+ block/bfq-sq-iosched.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 5a211620f316..7b1269558c47 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -272,7 +272,7 @@ static const unsigned long max_service_from_wr = 120000;
+ #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
+
+-#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0])
+ #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
+
+ /**
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index f95deaab49a1..c4aff8d55fc4 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -266,7 +266,7 @@ static const unsigned long max_service_from_wr = 120000;
+ #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
+
+-#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0])
+ #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
+
+ static void bfq_schedule_dispatch(struct bfq_data *bfqd);
+
+From 4cb5de6add7d6ad0d25d73cb95dc871305db1522 Mon Sep 17 00:00:00 2001
+From: Melzani Alessandro <melzani.alessandro@gmail.com>
+Date: Mon, 26 Feb 2018 22:59:30 +0100
+Subject: [PATCH 22/23] bfq-sq, bfq-mq: port of "block, bfq: fix error handle
+ in bfq_init"
+
+if elv_register fail, bfq_pool should be free.
+
+Signed-off-by: Alessandro Melzani <melzani.alessandro@gmail.com>
+---
+ block/bfq-mq-iosched.c | 4 +++-
+ block/bfq-sq-iosched.c | 4 +++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 7b1269558c47..964e88c2ce59 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -6129,7 +6129,7 @@ static int __init bfq_init(void)
+
+ ret = elv_register(&iosched_bfq_mq);
+ if (ret)
+- goto err_pol_unreg;
++ goto slab_kill;
+
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ strcat(msg, " (with cgroups support)");
+@@ -6138,6 +6138,8 @@ static int __init bfq_init(void)
+
+ return 0;
+
++slab_kill:
++ bfq_slab_kill();
+ err_pol_unreg:
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index c4aff8d55fc4..7f0cf1f01ffc 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -5590,7 +5590,7 @@ static int __init bfq_init(void)
+
+ ret = elv_register(&iosched_bfq);
+ if (ret)
+- goto err_pol_unreg;
++ goto slab_kill;
+
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ strcat(msg, " (with cgroups support)");
+@@ -5599,6 +5599,8 @@ static int __init bfq_init(void)
+
+ return 0;
+
++slab_kill:
++ bfq_slab_kill();
+ err_pol_unreg:
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
+ blkcg_policy_unregister(&blkcg_policy_bfq);
+
+From 1f77c173aaa87ffb22c9f062a6449245d14311e4 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@linaro.org>
+Date: Wed, 4 Apr 2018 11:28:16 +0200
+Subject: [PATCH 23/23] block, bfq-sq, bfq-mq: lower-bound the estimated peak
+ rate to 1
+
+If a storage device handled by BFQ happens to be slower than 7.5 KB/s
+for a certain amount of time (in the order of a second), then the
+estimated peak rate of the device, maintained in BFQ, becomes equal to
+0. The reason is the limited precision with which the rate is
+represented (details on the range of representable values in the
+comments introduced by this commit). This leads to a division-by-zero
+error where the estimated peak rate is used as divisor. Such a type of
+failure has been reported in [1].
+
+This commit addresses this issue by:
+1. Lower-bounding the estimated peak rate to 1
+2. Adding and improving comments on the range of rates representable
+
+[1] https://www.spinics.net/lists/kernel/msg2739205.html
+
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
+---
+ block/bfq-mq-iosched.c | 25 ++++++++++++++++++++++++-
+ block/bfq-mq.h | 7 ++++++-
+ block/bfq-sq-iosched.c | 25 ++++++++++++++++++++++++-
+ block/bfq.h | 7 ++++++-
+ 4 files changed, 60 insertions(+), 4 deletions(-)
+
+diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c
+index 964e88c2ce59..03efd90c5d20 100644
+--- a/block/bfq-mq-iosched.c
++++ b/block/bfq-mq-iosched.c
+@@ -160,7 +160,20 @@ static struct kmem_cache *bfq_pool;
+ /* Target observation time interval for a peak-rate update (ns) */
+ #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
+
+-/* Shift used for peak rate fixed precision calculations. */
++/*
++ * Shift used for peak-rate fixed precision calculations.
++ * With
++ * - the current shift: 16 positions
++ * - the current type used to store rate: u32
++ * - the current unit of measure for rate: [sectors/usec], or, more precisely,
++ * [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift,
++ * the range of rates that can be stored is
++ * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec =
++ * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec =
++ * [15, 65G] sectors/sec
++ * Which, assuming a sector size of 512B, corresponds to a range of
++ * [7.5K, 33T] B/sec
++ */
+ #define BFQ_RATE_SHIFT 16
+
+ /*
+@@ -2881,6 +2894,16 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
+
+ bfqd->peak_rate += rate;
++
++ /*
++ * For a very slow device, bfqd->peak_rate can reach 0 (see
++ * the minimum representable values reported in the comments
++ * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid
++ * divisions by zero where bfqd->peak_rate is used as a
++ * divisor.
++ */
++ bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate);
++
+ update_thr_responsiveness_params(bfqd);
+ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
+
+diff --git a/block/bfq-mq.h b/block/bfq-mq.h
+index e2ae11bf8f76..4a54e5076863 100644
+--- a/block/bfq-mq.h
++++ b/block/bfq-mq.h
+@@ -490,7 +490,12 @@ struct bfq_data {
+ u32 last_rq_max_size;
+ /* time elapsed from first dispatch in current observ. interval (us) */
+ u64 delta_from_first;
+- /* current estimate of device peak rate */
++ /*
++ * Current estimate of the device peak rate, measured in
++ * [(sectors/usec) / 2^BFQ_RATE_SHIFT]. The left-shift by
++ * BFQ_RATE_SHIFT is performed to increase precision in
++ * fixed-point calculations.
++ */
+ u32 peak_rate;
+
+ /* maximum budget allotted to a bfq_queue before rescheduling */
+diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c
+index 7f0cf1f01ffc..e96213865fc2 100644
+--- a/block/bfq-sq-iosched.c
++++ b/block/bfq-sq-iosched.c
+@@ -154,7 +154,20 @@ static struct kmem_cache *bfq_pool;
+ /* Target observation time interval for a peak-rate update (ns) */
+ #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
+
+-/* Shift used for peak rate fixed precision calculations. */
++/*
++ * Shift used for peak-rate fixed precision calculations.
++ * With
++ * - the current shift: 16 positions
++ * - the current type used to store rate: u32
++ * - the current unit of measure for rate: [sectors/usec], or, more precisely,
++ * [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift,
++ * the range of rates that can be stored is
++ * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec =
++ * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec =
++ * [15, 65G] sectors/sec
++ * Which, assuming a sector size of 512B, corresponds to a range of
++ * [7.5K, 33T] B/sec
++ */
+ #define BFQ_RATE_SHIFT 16
+
+ /*
+@@ -2695,6 +2708,16 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
+ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
+
+ bfqd->peak_rate += rate;
++
++ /*
++ * For a very slow device, bfqd->peak_rate can reach 0 (see
++ * the minimum representable values reported in the comments
++ * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid
++ * divisions by zero where bfqd->peak_rate is used as a
++ * divisor.
++ */
++ bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate);
++
+ update_thr_responsiveness_params(bfqd);
+ BUG_ON(bfqd->peak_rate > 20<<BFQ_RATE_SHIFT);
+
+diff --git a/block/bfq.h b/block/bfq.h
+index 4d2fe7f77af1..a25e76c906d9 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -498,7 +498,12 @@ struct bfq_data {
+ u32 last_rq_max_size;
+ /* time elapsed from first dispatch in current observ. interval (us) */
+ u64 delta_from_first;
+- /* current estimate of device peak rate */
++ /*
++ * Current estimate of the device peak rate, measured in
++ * [(sectors/usec) / 2^BFQ_RATE_SHIFT]. The left-shift by
++ * BFQ_RATE_SHIFT is performed to increase precision in
++ * fixed-point calculations.
++ */
+ u32 peak_rate;
+
+ /* maximum budget allotted to a bfq_queue before rescheduling */
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0002-Make-preemptible-kernel-default.patch b/sys-kernel/linux-sources-redcore-lts/files/0002-Make-preemptible-kernel-default.patch
new file mode 100644
index 00000000..69abb373
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0002-Make-preemptible-kernel-default.patch
@@ -0,0 +1,733 @@
+From e8e37da685f7988182d7920a711e00dd2457af65 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Sat, 29 Oct 2016 11:20:37 +1100
+Subject: [PATCH 02/16] Make preemptible kernel default.
+
+Make full preempt default on all arches.
+---
+ arch/arc/configs/tb10x_defconfig | 2 +-
+ arch/arm/configs/bcm2835_defconfig | 2 +-
+ arch/arm/configs/imx_v6_v7_defconfig | 2 +-
+ arch/arm/configs/mps2_defconfig | 2 +-
+ arch/arm/configs/mxs_defconfig | 2 +-
+ arch/blackfin/configs/BF518F-EZBRD_defconfig | 2 +-
+ arch/blackfin/configs/BF526-EZBRD_defconfig | 2 +-
+ arch/blackfin/configs/BF527-EZKIT-V2_defconfig | 2 +-
+ arch/blackfin/configs/BF527-EZKIT_defconfig | 2 +-
+ arch/blackfin/configs/BF527-TLL6527M_defconfig | 2 +-
+ arch/blackfin/configs/BF533-EZKIT_defconfig | 2 +-
+ arch/blackfin/configs/BF533-STAMP_defconfig | 2 +-
+ arch/blackfin/configs/BF537-STAMP_defconfig | 2 +-
+ arch/blackfin/configs/BF538-EZKIT_defconfig | 2 +-
+ arch/blackfin/configs/BF548-EZKIT_defconfig | 2 +-
+ arch/blackfin/configs/BF561-ACVILON_defconfig | 2 +-
+ arch/blackfin/configs/BF561-EZKIT-SMP_defconfig | 2 +-
+ arch/blackfin/configs/BF561-EZKIT_defconfig | 2 +-
+ arch/blackfin/configs/BF609-EZKIT_defconfig | 2 +-
+ arch/blackfin/configs/BlackStamp_defconfig | 2 +-
+ arch/blackfin/configs/CM-BF527_defconfig | 2 +-
+ arch/blackfin/configs/PNAV-10_defconfig | 2 +-
+ arch/blackfin/configs/SRV1_defconfig | 2 +-
+ arch/blackfin/configs/TCM-BF518_defconfig | 2 +-
+ arch/mips/configs/fuloong2e_defconfig | 3 ++-
+ arch/mips/configs/gpr_defconfig | 3 ++-
+ arch/mips/configs/ip22_defconfig | 3 ++-
+ arch/mips/configs/ip28_defconfig | 3 ++-
+ arch/mips/configs/jazz_defconfig | 3 ++-
+ arch/mips/configs/mtx1_defconfig | 3 ++-
+ arch/mips/configs/nlm_xlr_defconfig | 2 +-
+ arch/mips/configs/pic32mzda_defconfig | 2 +-
+ arch/mips/configs/pistachio_defconfig | 2 +-
+ arch/mips/configs/pnx8335_stb225_defconfig | 2 +-
+ arch/mips/configs/rm200_defconfig | 3 ++-
+ arch/parisc/configs/712_defconfig | 2 +-
+ arch/parisc/configs/c3000_defconfig | 2 +-
+ arch/parisc/configs/default_defconfig | 2 +-
+ arch/powerpc/configs/c2k_defconfig | 2 +-
+ arch/powerpc/configs/ppc6xx_defconfig | 2 +-
+ arch/score/configs/spct6600_defconfig | 2 +-
+ arch/sh/configs/se7712_defconfig | 2 +-
+ arch/sh/configs/se7721_defconfig | 2 +-
+ arch/sh/configs/titan_defconfig | 2 +-
+ arch/sparc/configs/sparc64_defconfig | 2 +-
+ arch/tile/configs/tilegx_defconfig | 2 +-
+ arch/tile/configs/tilepro_defconfig | 2 +-
+ arch/x86/configs/i386_defconfig | 2 +-
+ arch/x86/configs/x86_64_defconfig | 2 +-
+ kernel/Kconfig.preempt | 7 ++++---
+ 50 files changed, 60 insertions(+), 52 deletions(-)
+
+diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
+index f30182549395..42910f628869 100644
+--- a/arch/arc/configs/tb10x_defconfig
++++ b/arch/arc/configs/tb10x_defconfig
+@@ -28,7 +28,7 @@ CONFIG_ARC_PLAT_TB10X=y
+ CONFIG_ARC_CACHE_LINE_SHIFT=5
+ CONFIG_HZ=250
+ CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_COMPACTION is not set
+ CONFIG_NET=y
+ CONFIG_PACKET=y
+diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
+index 43dab4890ad3..44a52166ca5e 100644
+--- a/arch/arm/configs/bcm2835_defconfig
++++ b/arch/arm/configs/bcm2835_defconfig
+@@ -29,7 +29,7 @@ CONFIG_MODULE_UNLOAD=y
+ CONFIG_ARCH_MULTI_V6=y
+ CONFIG_ARCH_BCM=y
+ CONFIG_ARCH_BCM2835=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_AEABI=y
+ CONFIG_KSM=y
+ CONFIG_CLEANCACHE=y
+diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
+index 32acac9ab81a..1482bb312987 100644
+--- a/arch/arm/configs/imx_v6_v7_defconfig
++++ b/arch/arm/configs/imx_v6_v7_defconfig
+@@ -47,7 +47,7 @@ CONFIG_PCI_MSI=y
+ CONFIG_PCI_IMX6=y
+ CONFIG_SMP=y
+ CONFIG_ARM_PSCI=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_AEABI=y
+ CONFIG_HIGHMEM=y
+ CONFIG_CMA=y
+diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig
+index 0bcdec7cc169..10ceaefa51e0 100644
+--- a/arch/arm/configs/mps2_defconfig
++++ b/arch/arm/configs/mps2_defconfig
+@@ -18,7 +18,7 @@ CONFIG_ARCH_MPS2=y
+ CONFIG_SET_MEM_PARAM=y
+ CONFIG_DRAM_BASE=0x21000000
+ CONFIG_DRAM_SIZE=0x1000000
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_ATAGS is not set
+ CONFIG_ZBOOT_ROM_TEXT=0x0
+ CONFIG_ZBOOT_ROM_BSS=0x0
+diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
+index e5822ab01b7d..3e77e02f678f 100644
+--- a/arch/arm/configs/mxs_defconfig
++++ b/arch/arm/configs/mxs_defconfig
+@@ -27,7 +27,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
+ # CONFIG_ARCH_MULTI_V7 is not set
+ CONFIG_ARCH_MXS=y
+ # CONFIG_ARM_THUMB is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_AEABI=y
+ CONFIG_NET=y
+ CONFIG_PACKET=y
+diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
+index 99c00d835f47..39b91dfa55b5 100644
+--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
++++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF518=y
+ CONFIG_IRQ_TIMER0=12
+ # CONFIG_CYCLES_CLOCKSOURCE is not set
+diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
+index e66ba31ef84d..675cadb3a0c4 100644
+--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
++++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF526=y
+ CONFIG_IRQ_TIMER0=12
+ CONFIG_BFIN526_EZBRD=y
+diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
+index 0207c588c19f..4c517c443af5 100644
+--- a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
++++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF527=y
+ CONFIG_BF_REV_0_2=y
+ CONFIG_BFIN527_EZKIT_V2=y
+diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
+index 99c131ba7d90..bf8df3e6cf02 100644
+--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF527=y
+ CONFIG_BF_REV_0_1=y
+ CONFIG_IRQ_USB_INT0=11
+diff --git a/arch/blackfin/configs/BF527-TLL6527M_defconfig b/arch/blackfin/configs/BF527-TLL6527M_defconfig
+index cdeb51856f26..0220b3b15c53 100644
+--- a/arch/blackfin/configs/BF527-TLL6527M_defconfig
++++ b/arch/blackfin/configs/BF527-TLL6527M_defconfig
+@@ -21,7 +21,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_LBDAF is not set
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF527=y
+ CONFIG_BF_REV_0_2=y
+ CONFIG_BFIN527_TLL6527M=y
+diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
+index ed7d2c096739..6023e3fd2c48 100644
+--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BFIN533_EZKIT=y
+ CONFIG_TIMER0=11
+ CONFIG_CLKIN_HZ=27000000
+diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
+index 0c241f4d28d7..f5cd0f18b711 100644
+--- a/arch/blackfin/configs/BF533-STAMP_defconfig
++++ b/arch/blackfin/configs/BF533-STAMP_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_TIMER0=11
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
+diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
+index e5360b30e39a..48085fde7f9e 100644
+--- a/arch/blackfin/configs/BF537-STAMP_defconfig
++++ b/arch/blackfin/configs/BF537-STAMP_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF537=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
+diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
+index 60f6fb86125c..12deeaaef3cb 100644
+--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
+@@ -21,7 +21,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF538=y
+ CONFIG_IRQ_TIMER0=12
+ CONFIG_IRQ_TIMER1=12
+diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
+index 38cb17d218d4..6a68ffc55b5a 100644
+--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF548_std=y
+ CONFIG_IRQ_TIMER0=11
+ # CONFIG_CYCLES_CLOCKSOURCE is not set
+diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
+index 78f6bc79f910..e9f3ba783a4e 100644
+--- a/arch/blackfin/configs/BF561-ACVILON_defconfig
++++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
+@@ -20,7 +20,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_LBDAF is not set
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF561=y
+ CONFIG_BF_REV_0_5=y
+ CONFIG_IRQ_TIMER0=10
+diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
+index fac8bb578249..89b75a6c3fab 100644
+--- a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
++++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF561=y
+ CONFIG_SMP=y
+ CONFIG_IRQ_TIMER0=10
+diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
+index 2a2e4d0cebc1..67b3d2f419ba 100644
+--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF561=y
+ CONFIG_IRQ_TIMER0=10
+ CONFIG_CLKIN_HZ=30000000
+diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
+index 3ce77f07208a..8cc75d4218fb 100644
+--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
++++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
+@@ -20,7 +20,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF609=y
+ CONFIG_PINT1_ASSIGN=0x01010000
+ CONFIG_PINT2_ASSIGN=0x07000101
+diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
+index f4a9200e1ab1..9faf0ec7007f 100644
+--- a/arch/blackfin/configs/BlackStamp_defconfig
++++ b/arch/blackfin/configs/BlackStamp_defconfig
+@@ -17,7 +17,7 @@ CONFIG_MODULE_UNLOAD=y
+ CONFIG_MODULE_FORCE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF532=y
+ CONFIG_BF_REV_0_5=y
+ CONFIG_BLACKSTAMP=y
+diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
+index 1902bb05d086..4a1ad4fd7bb2 100644
+--- a/arch/blackfin/configs/CM-BF527_defconfig
++++ b/arch/blackfin/configs/CM-BF527_defconfig
+@@ -19,7 +19,7 @@ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF527=y
+ CONFIG_BF_REV_0_1=y
+ CONFIG_IRQ_TIMER0=12
+diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
+index c7926812971c..9d787e28bbe8 100644
+--- a/arch/blackfin/configs/PNAV-10_defconfig
++++ b/arch/blackfin/configs/PNAV-10_defconfig
+@@ -15,7 +15,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF537=y
+ CONFIG_IRQ_TIMER0=12
+ CONFIG_PNAV10=y
+diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
+index 23fdc57d657a..225df32dc9a8 100644
+--- a/arch/blackfin/configs/SRV1_defconfig
++++ b/arch/blackfin/configs/SRV1_defconfig
+@@ -13,7 +13,7 @@ CONFIG_MMAP_ALLOW_UNINITIALIZED=y
+ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_IOSCHED_DEADLINE is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF537=y
+ CONFIG_IRQ_TIMER0=12
+ CONFIG_BOOT_LOAD=0x400000
+diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig
+index e28959479fe0..425c24e43c34 100644
+--- a/arch/blackfin/configs/TCM-BF518_defconfig
++++ b/arch/blackfin/configs/TCM-BF518_defconfig
+@@ -23,7 +23,7 @@ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ # CONFIG_IOSCHED_DEADLINE is not set
+ # CONFIG_IOSCHED_CFQ is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BF518=y
+ CONFIG_BF_REV_0_1=y
+ CONFIG_BFIN518F_TCM=y
+diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
+index 499f51498ecb..f7cb39b0662c 100644
+--- a/arch/mips/configs/fuloong2e_defconfig
++++ b/arch/mips/configs/fuloong2e_defconfig
+@@ -2,7 +2,8 @@ CONFIG_MACH_LOONGSON64=y
+ CONFIG_64BIT=y
+ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_LOCALVERSION="-fuloong2e"
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_SYSVIPC=y
+diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
+index 55438fc9991e..db03ef4f737d 100644
+--- a/arch/mips/configs/gpr_defconfig
++++ b/arch/mips/configs/gpr_defconfig
+@@ -1,7 +1,8 @@
+ CONFIG_MIPS_ALCHEMY=y
+ CONFIG_MIPS_GPR=y
+ CONFIG_HIGH_RES_TIMERS=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
+index 83e8fe2064aa..93e7b167433b 100644
+--- a/arch/mips/configs/ip22_defconfig
++++ b/arch/mips/configs/ip22_defconfig
+@@ -3,7 +3,8 @@ CONFIG_CPU_R5000=y
+ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_HZ_1000=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_IKCONFIG=y
+ CONFIG_IKCONFIG_PROC=y
+diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
+index d0a4c2cfacf8..6f0600e99c25 100644
+--- a/arch/mips/configs/ip28_defconfig
++++ b/arch/mips/configs/ip28_defconfig
+@@ -1,6 +1,7 @@
+ CONFIG_SGI_IP28=y
+ CONFIG_ARC_CONSOLE=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_IKCONFIG=y
+ CONFIG_IKCONFIG_PROC=y
+diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
+index 9ad1c94376c8..1d62ce7ff5dc 100644
+--- a/arch/mips/configs/jazz_defconfig
++++ b/arch/mips/configs/jazz_defconfig
+@@ -1,6 +1,7 @@
+ CONFIG_MACH_JAZZ=y
+ CONFIG_OLIVETTI_M700=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+ CONFIG_BSD_PROCESS_ACCT=y
+diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
+index c3d0d0a6e044..aa3426d5f7d7 100644
+--- a/arch/mips/configs/mtx1_defconfig
++++ b/arch/mips/configs/mtx1_defconfig
+@@ -1,6 +1,7 @@
+ CONFIG_MIPS_ALCHEMY=y
+ CONFIG_MIPS_MTX1=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
+index 1e18fd7de209..b514e91e5426 100644
+--- a/arch/mips/configs/nlm_xlr_defconfig
++++ b/arch/mips/configs/nlm_xlr_defconfig
+@@ -5,7 +5,7 @@ CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+ CONFIG_SMP=y
+ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_KEXEC=y
+ CONFIG_CROSS_COMPILE=""
+ # CONFIG_LOCALVERSION_AUTO is not set
+diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig
+index 52192c632ae8..96b087498dab 100644
+--- a/arch/mips/configs/pic32mzda_defconfig
++++ b/arch/mips/configs/pic32mzda_defconfig
+@@ -1,7 +1,7 @@
+ CONFIG_MACH_PIC32=y
+ CONFIG_DTB_PIC32_MZDA_SK=y
+ CONFIG_HZ_100=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_SECCOMP is not set
+ CONFIG_SYSVIPC=y
+ CONFIG_NO_HZ=y
+diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig
+index b22a3cf149b6..cfffca3d37f4 100644
+--- a/arch/mips/configs/pistachio_defconfig
++++ b/arch/mips/configs/pistachio_defconfig
+@@ -5,7 +5,7 @@ CONFIG_MIPS_CPS=y
+ CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+ CONFIG_ZSMALLOC=y
+ CONFIG_NR_CPUS=4
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_DEFAULT_HOSTNAME="localhost"
+ CONFIG_SYSVIPC=y
+diff --git a/arch/mips/configs/pnx8335_stb225_defconfig b/arch/mips/configs/pnx8335_stb225_defconfig
+index 81b5eb89446c..19f8cea849a1 100644
+--- a/arch/mips/configs/pnx8335_stb225_defconfig
++++ b/arch/mips/configs/pnx8335_stb225_defconfig
+@@ -3,7 +3,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
+ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_HZ_128=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_SECCOMP is not set
+ # CONFIG_LOCALVERSION_AUTO is not set
+ # CONFIG_SWAP is not set
+diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
+index 99679e514042..2ced507a8ba7 100644
+--- a/arch/mips/configs/rm200_defconfig
++++ b/arch/mips/configs/rm200_defconfig
+@@ -2,7 +2,8 @@ CONFIG_SNI_RM=y
+ CONFIG_CPU_LITTLE_ENDIAN=y
+ CONFIG_ARC_CONSOLE=y
+ CONFIG_HZ_1000=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
++CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+ CONFIG_BSD_PROCESS_ACCT=y
+diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
+index ccc109761f44..a6a5b0b7a9c9 100644
+--- a/arch/parisc/configs/712_defconfig
++++ b/arch/parisc/configs/712_defconfig
+@@ -13,7 +13,7 @@ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ CONFIG_MODULE_FORCE_UNLOAD=y
+ CONFIG_PA7100LC=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_GSC_LASI=y
+ # CONFIG_PDC_CHASSIS is not set
+ CONFIG_BINFMT_MISC=m
+diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
+index 8d41a73bd71b..b8e0a6662ff9 100644
+--- a/arch/parisc/configs/c3000_defconfig
++++ b/arch/parisc/configs/c3000_defconfig
+@@ -13,7 +13,7 @@ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ CONFIG_MODULE_FORCE_UNLOAD=y
+ CONFIG_PA8X00=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ # CONFIG_GSC is not set
+ CONFIG_PCI=y
+ CONFIG_PCI_LBA=y
+diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig
+index 52c9050a7c5c..8d86d2e989f4 100644
+--- a/arch/parisc/configs/default_defconfig
++++ b/arch/parisc/configs/default_defconfig
+@@ -14,7 +14,7 @@ CONFIG_MODULE_UNLOAD=y
+ CONFIG_MODULE_FORCE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+ CONFIG_PA7100LC=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_IOMMU_CCIO=y
+ CONFIG_GSC_LASI=y
+ CONFIG_GSC_WAX=y
+diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
+index f1552af9eecc..f8505e6ec7b3 100644
+--- a/arch/powerpc/configs/c2k_defconfig
++++ b/arch/powerpc/configs/c2k_defconfig
+@@ -29,7 +29,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+ CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+ CONFIG_GEN_RTC=y
+ CONFIG_HIGHMEM=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BINFMT_MISC=y
+ CONFIG_PM=y
+ CONFIG_PCI_MSI=y
+diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
+index da0e8d535eb8..c016af41ab4f 100644
+--- a/arch/powerpc/configs/ppc6xx_defconfig
++++ b/arch/powerpc/configs/ppc6xx_defconfig
+@@ -74,7 +74,7 @@ CONFIG_QE_GPIO=y
+ CONFIG_MCU_MPC8349EMITX=y
+ CONFIG_HIGHMEM=y
+ CONFIG_HZ_1000=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_BINFMT_MISC=y
+ CONFIG_HIBERNATION=y
+ CONFIG_PM_DEBUG=y
+diff --git a/arch/score/configs/spct6600_defconfig b/arch/score/configs/spct6600_defconfig
+index b2d8802f43b4..46434ca1fa10 100644
+--- a/arch/score/configs/spct6600_defconfig
++++ b/arch/score/configs/spct6600_defconfig
+@@ -1,5 +1,5 @@
+ CONFIG_HZ_100=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_EXPERIMENTAL=y
+ # CONFIG_LOCALVERSION_AUTO is not set
+ CONFIG_SYSVIPC=y
+diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
+index 5a1097641247..eb5fbf554e7f 100644
+--- a/arch/sh/configs/se7712_defconfig
++++ b/arch/sh/configs/se7712_defconfig
+@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y
+ CONFIG_SH_SOLUTION_ENGINE=y
+ CONFIG_SH_PCLK_FREQ=66666666
+ CONFIG_HEARTBEAT=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_CMDLINE_OVERWRITE=y
+ CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
+ CONFIG_NET=y
+diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
+index 9c0ef13bee10..cbaa65c8bf9e 100644
+--- a/arch/sh/configs/se7721_defconfig
++++ b/arch/sh/configs/se7721_defconfig
+@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y
+ CONFIG_SH_7721_SOLUTION_ENGINE=y
+ CONFIG_SH_PCLK_FREQ=33333333
+ CONFIG_HEARTBEAT=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_CMDLINE_OVERWRITE=y
+ CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda2"
+ CONFIG_NET=y
+diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
+index ceb48e9b70f4..1a69eda6610c 100644
+--- a/arch/sh/configs/titan_defconfig
++++ b/arch/sh/configs/titan_defconfig
+@@ -20,7 +20,7 @@ CONFIG_SH_TITAN=y
+ CONFIG_SH_PCLK_FREQ=30000000
+ CONFIG_SH_DMA=y
+ CONFIG_SH_DMA_API=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_CMDLINE_OVERWRITE=y
+ CONFIG_CMDLINE="console=ttySC1,38400N81 root=/dev/nfs ip=:::::eth1:autoconf rw"
+ CONFIG_PCI=y
+diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
+index 4d4e1cc6402f..04bea1d28ba7 100644
+--- a/arch/sparc/configs/sparc64_defconfig
++++ b/arch/sparc/configs/sparc64_defconfig
+@@ -22,7 +22,7 @@ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_NUMA=y
+ CONFIG_DEFAULT_MMAP_MIN_ADDR=8192
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_SUN_LDOMS=y
+ CONFIG_PCI=y
+ CONFIG_PCI_MSI=y
+diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
+index 9f94435cc44f..aa78ee6cd5eb 100644
+--- a/arch/tile/configs/tilegx_defconfig
++++ b/arch/tile/configs/tilegx_defconfig
+@@ -47,7 +47,7 @@ CONFIG_CFQ_GROUP_IOSCHED=y
+ CONFIG_NR_CPUS=100
+ CONFIG_HZ_100=y
+ # CONFIG_COMPACTION is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_TILE_PCI_IO=y
+ CONFIG_PCI_DEBUG=y
+ # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
+index 1c5bd4f8ffca..38005862062c 100644
+--- a/arch/tile/configs/tilepro_defconfig
++++ b/arch/tile/configs/tilepro_defconfig
+@@ -44,7 +44,7 @@ CONFIG_KARMA_PARTITION=y
+ CONFIG_CFQ_GROUP_IOSCHED=y
+ CONFIG_HZ_100=y
+ # CONFIG_COMPACTION is not set
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_PCI_DEBUG=y
+ # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+ CONFIG_BINFMT_MISC=y
+diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
+index 0eb9f92f3717..e5890ae917e5 100644
+--- a/arch/x86/configs/i386_defconfig
++++ b/arch/x86/configs/i386_defconfig
+@@ -41,7 +41,7 @@ CONFIG_SMP=y
+ CONFIG_X86_GENERIC=y
+ CONFIG_HPET_TIMER=y
+ CONFIG_SCHED_SMT=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+ CONFIG_X86_MCE=y
+ CONFIG_X86_REBOOTFIXUPS=y
+diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
+index 4a4b16e56d35..7452dcadda74 100644
+--- a/arch/x86/configs/x86_64_defconfig
++++ b/arch/x86/configs/x86_64_defconfig
+@@ -40,7 +40,7 @@ CONFIG_SMP=y
+ CONFIG_CALGARY_IOMMU=y
+ CONFIG_NR_CPUS=64
+ CONFIG_SCHED_SMT=y
+-CONFIG_PREEMPT_VOLUNTARY=y
++CONFIG_PREEMPT=y
+ CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+ CONFIG_X86_MCE=y
+ CONFIG_MICROCODE=y
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index 3f9c97419f02..1dc79ec7ad09 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -1,7 +1,7 @@
+
+ choice
+ prompt "Preemption Model"
+- default PREEMPT_NONE
++ default PREEMPT
+
+ config PREEMPT_NONE
+ bool "No Forced Preemption (Server)"
+@@ -17,7 +17,7 @@ config PREEMPT_NONE
+ latencies.
+
+ config PREEMPT_VOLUNTARY
+- bool "Voluntary Kernel Preemption (Desktop)"
++ bool "Voluntary Kernel Preemption (Nothing)"
+ help
+ This option reduces the latency of the kernel by adding more
+ "explicit preemption points" to the kernel code. These new
+@@ -31,7 +31,8 @@ config PREEMPT_VOLUNTARY
+ applications to run more 'smoothly' even when the system is
+ under load.
+
+- Select this if you are building a kernel for a desktop system.
++ Select this for no system in particular (choose Preemptible
++ instead on a desktop if you know what's good for you).
+
+ config PREEMPT
+ bool "Preemptible Kernel (Low-Latency Desktop)"
+--
+2.11.0
+
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0003-Expose-vmsplit-for-our-poor-32-bit-users.patch b/sys-kernel/linux-sources-redcore-lts/files/0003-Expose-vmsplit-for-our-poor-32-bit-users.patch
new file mode 100644
index 00000000..b7897dbe
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0003-Expose-vmsplit-for-our-poor-32-bit-users.patch
@@ -0,0 +1,48 @@
+From 44fc740a3ff85d378c28a416a076cc7e019d7b8c Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Fri, 12 May 2017 13:07:37 +1000
+Subject: [PATCH 03/16] Expose vmsplit for our poor 32 bit users.
+
+---
+ arch/x86/Kconfig | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index e06a7b4e1dc4..931aba4fc567 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1361,7 +1361,7 @@ config HIGHMEM64G
+ endchoice
+
+ choice
+- prompt "Memory split" if EXPERT
++ prompt "Memory split"
+ default VMSPLIT_3G
+ depends on X86_32
+ ---help---
+@@ -1381,17 +1381,17 @@ choice
+ option alone!
+
+ config VMSPLIT_3G
+- bool "3G/1G user/kernel split"
++ bool "Default 896MB lowmem (3G/1G user/kernel split)"
+ config VMSPLIT_3G_OPT
+ depends on !X86_PAE
+- bool "3G/1G user/kernel split (for full 1G low memory)"
++ bool "1GB lowmem (3G/1G user/kernel split)"
+ config VMSPLIT_2G
+- bool "2G/2G user/kernel split"
++ bool "2GB lowmem (2G/2G user/kernel split)"
+ config VMSPLIT_2G_OPT
+ depends on !X86_PAE
+- bool "2G/2G user/kernel split (for full 2G low memory)"
++ bool "2GB lowmem (2G/2G user/kernel split)"
+ config VMSPLIT_1G
+- bool "1G/3G user/kernel split"
++ bool "3GB lowmem (1G/3G user/kernel split)"
+ endchoice
+
+ config PAGE_OFFSET
+--
+2.11.0
+
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0004-Create-highres-timeout-variants-of-schedule_timeout-.patch b/sys-kernel/linux-sources-redcore-lts/files/0004-Create-highres-timeout-variants-of-schedule_timeout-.patch
new file mode 100644
index 00000000..3c182fbe
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0004-Create-highres-timeout-variants-of-schedule_timeout-.patch
@@ -0,0 +1,153 @@
+From d27b58b0707ac311be5a51594fc6f22ed1d109e5 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Sat, 12 Aug 2017 11:53:39 +1000
+Subject: [PATCH 04/16] Create highres timeout variants of schedule_timeout
+ functions.
+
+---
+ include/linux/freezer.h | 1 +
+ include/linux/sched.h | 31 +++++++++++++++++++--
+ kernel/time/hrtimer.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 101 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index 3995df1d068f..f8645e8f2444 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -297,6 +297,7 @@ static inline void set_freezable(void) {}
+ #define wait_event_freezekillable_unsafe(wq, condition) \
+ wait_event_killable(wq, condition)
+
++#define pm_freezing (false)
+ #endif /* !CONFIG_FREEZER */
+
+ #endif /* FREEZER_H_INCLUDED */
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 35dc91a0e2ed..38852ebfa864 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -173,13 +173,40 @@ extern cpumask_var_t cpu_isolated_map;
+
+ extern void scheduler_tick(void);
+
+-#define MAX_SCHEDULE_TIMEOUT LONG_MAX
+-
++#define MAX_SCHEDULE_TIMEOUT LONG_MAX
+ extern long schedule_timeout(long timeout);
+ extern long schedule_timeout_interruptible(long timeout);
+ extern long schedule_timeout_killable(long timeout);
+ extern long schedule_timeout_uninterruptible(long timeout);
+ extern long schedule_timeout_idle(long timeout);
++
++#ifdef CONFIG_HIGH_RES_TIMERS
++extern long schedule_msec_hrtimeout(long timeout);
++extern long schedule_min_hrtimeout(void);
++extern long schedule_msec_hrtimeout_interruptible(long timeout);
++extern long schedule_msec_hrtimeout_uninterruptible(long timeout);
++#else
++static inline long schedule_msec_hrtimeout(long timeout)
++{
++ return schedule_timeout(msecs_to_jiffies(timeout));
++}
++
++static inline long schedule_min_hrtimeout(void)
++{
++ return schedule_timeout(1);
++}
++
++static inline long schedule_msec_hrtimeout_interruptible(long timeout)
++{
++ return schedule_timeout_interruptible(msecs_to_jiffies(timeout));
++}
++
++static inline long schedule_msec_hrtimeout_uninterruptible(long timeout)
++{
++ return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout));
++}
++#endif
++
+ asmlinkage void schedule(void);
+ extern void schedule_preempt_disabled(void);
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 88f75f92ef36..13227cf2814c 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1787,3 +1787,74 @@ int __sched schedule_hrtimeout(ktime_t *expires,
+ return schedule_hrtimeout_range(expires, 0, mode);
+ }
+ EXPORT_SYMBOL_GPL(schedule_hrtimeout);
++
++/*
++ * As per schedule_hrtimeout but taskes a millisecond value and returns how
++ * many milliseconds are left.
++ */
++long __sched schedule_msec_hrtimeout(long timeout)
++{
++ struct hrtimer_sleeper t;
++ int delta, secs, jiffs;
++ ktime_t expires;
++
++ if (!timeout) {
++ __set_current_state(TASK_RUNNING);
++ return 0;
++ }
++
++ jiffs = msecs_to_jiffies(timeout);
++ /*
++ * If regular timer resolution is adequate or hrtimer resolution is not
++ * (yet) better than Hz, as would occur during startup, use regular
++ * timers.
++ */
++ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ)
++ return schedule_timeout(jiffs);
++
++ secs = timeout / 1000;
++ delta = (timeout % 1000) * NSEC_PER_MSEC;
++ expires = ktime_set(secs, delta);
++
++ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
++
++ hrtimer_init_sleeper(&t, current);
++
++ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL);
++
++ if (likely(t.task))
++ schedule();
++
++ hrtimer_cancel(&t.timer);
++ destroy_hrtimer_on_stack(&t.timer);
++
++ __set_current_state(TASK_RUNNING);
++
++ expires = hrtimer_expires_remaining(&t.timer);
++ timeout = ktime_to_ms(expires);
++ return timeout < 0 ? 0 : timeout;
++}
++
++EXPORT_SYMBOL(schedule_msec_hrtimeout);
++
++long __sched schedule_min_hrtimeout(void)
++{
++ return schedule_msec_hrtimeout(1);
++}
++
++EXPORT_SYMBOL(schedule_min_hrtimeout);
++
++long __sched schedule_msec_hrtimeout_interruptible(long timeout)
++{
++ __set_current_state(TASK_INTERRUPTIBLE);
++ return schedule_msec_hrtimeout(timeout);
++}
++EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible);
++
++long __sched schedule_msec_hrtimeout_uninterruptible(long timeout)
++{
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ return schedule_msec_hrtimeout(timeout);
++}
++EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible);
+--
+2.11.0
+
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch b/sys-kernel/linux-sources-redcore-lts/files/0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch
new file mode 100644
index 00000000..3c889719
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch
@@ -0,0 +1,50 @@
+From 5da7d1778b96c514394334c92de9b3d8d71f4a29 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Sat, 5 Nov 2016 09:27:36 +1100
+Subject: [PATCH 05/16] Special case calls of schedule_timeout(1) to use the
+ min hrtimeout of 1ms, working around low Hz resolutions.
+
+---
+ kernel/time/timer.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 9c18e16059a3..dd4d1b193286 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1741,6 +1741,19 @@ signed long __sched schedule_timeout(signed long timeout)
+
+ expire = timeout + jiffies;
+
++#ifdef CONFIG_HIGH_RES_TIMERS
++ if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
++ /*
++ * Special case 1 as being a request for the minimum timeout
++ * and use highres timers to timeout after 1ms to workaround
++ * the granularity of low Hz tick timers.
++ */
++ if (!schedule_min_hrtimeout())
++ return 0;
++ goto out_timeout;
++ }
++#endif
++
+ setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
+ __mod_timer(&timer, expire, false);
+ schedule();
+@@ -1748,10 +1761,10 @@ signed long __sched schedule_timeout(signed long timeout)
+
+ /* Remove the timer from the object tracker */
+ destroy_timer_on_stack(&timer);
+-
++out_timeout:
+ timeout = expire - jiffies;
+
+- out:
++out:
+ return timeout < 0 ? 0 : timeout;
+ }
+ EXPORT_SYMBOL(schedule_timeout);
+--
+2.11.0
+
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0006-Convert-msleep-to-use-hrtimers-when-active.patch b/sys-kernel/linux-sources-redcore-lts/files/0006-Convert-msleep-to-use-hrtimers-when-active.patch
new file mode 100644
index 00000000..2f065652
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0006-Convert-msleep-to-use-hrtimers-when-active.patch
@@ -0,0 +1,54 @@
+From 9df803c28bb8ccb2588c0ccaf857b9e673175fed Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Fri, 4 Nov 2016 09:25:54 +1100
+Subject: [PATCH 06/16] Convert msleep to use hrtimers when active.
+
+---
+ kernel/time/timer.c | 24 ++++++++++++++++++++++--
+ 1 file changed, 22 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index dd4d1b193286..c68cb9307f64 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1884,7 +1884,19 @@ void __init init_timers(void)
+ */
+ void msleep(unsigned int msecs)
+ {
+- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
++ int jiffs = msecs_to_jiffies(msecs);
++ unsigned long timeout;
++
++ /*
++ * Use high resolution timers where the resolution of tick based
++ * timers is inadequate.
++ */
++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
++ while (msecs)
++ msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
++ return;
++ }
++ timeout = msecs_to_jiffies(msecs) + 1;
+
+ while (timeout)
+ timeout = schedule_timeout_uninterruptible(timeout);
+@@ -1898,7 +1910,15 @@ EXPORT_SYMBOL(msleep);
+ */
+ unsigned long msleep_interruptible(unsigned int msecs)
+ {
+- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
++ int jiffs = msecs_to_jiffies(msecs);
++ unsigned long timeout;
++
++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
++ while (msecs && !signal_pending(current))
++ msecs = schedule_msec_hrtimeout_interruptible(msecs);
++ return msecs;
++ }
++ timeout = msecs_to_jiffies(msecs) + 1;
+
+ while (timeout && !signal_pending(current))
+ timeout = schedule_timeout_interruptible(timeout);
+--
+2.11.0
+
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch b/sys-kernel/linux-sources-redcore-lts/files/0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
new file mode 100644
index 00000000..ff071da8
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
@@ -0,0 +1,529 @@
+diff -Nur a/drivers/block/swim.c b/drivers/block/swim.c
+--- a/drivers/block/swim.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/block/swim.c 2018-11-03 16:30:39.471807304 +0000
+@@ -332,7 +332,7 @@
+ if (swim_readbit(base, MOTOR_ON))
+ break;
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+ } else if (action == OFF) {
+ swim_action(base, MOTOR_OFF);
+@@ -351,7 +351,7 @@
+ if (!swim_readbit(base, DISK_IN))
+ break;
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+ swim_select(base, RELAX);
+ }
+@@ -375,7 +375,7 @@
+ for (wait = 0; wait < HZ; wait++) {
+
+ current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+
+ swim_select(base, RELAX);
+ if (!swim_readbit(base, STEP))
+diff -Nur a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+--- a/drivers/bluetooth/hci_qca.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/bluetooth/hci_qca.c 2018-11-03 16:31:56.065260061 +0000
+@@ -880,7 +880,7 @@
+ * then host can communicate with new baudrate to controller
+ */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
++ schedule_msec_hrtimeout((BAUDRATE_SETTLE_TIMEOUT_MS));
+ set_current_state(TASK_RUNNING);
+
+ return 0;
+diff -Nur a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+--- a/drivers/char/ipmi/ipmi_msghandler.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/char/ipmi/ipmi_msghandler.c 2018-11-03 16:30:39.473807368 +0000
+@@ -2953,7 +2953,7 @@
+ /* Current message first, to preserve order */
+ while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
+ /* Wait for the message to clear out. */
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+
+ /* No need for locks, the interface is down. */
+diff -Nur a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+--- a/drivers/char/ipmi/ipmi_ssif.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/char/ipmi/ipmi_ssif.c 2018-11-03 16:30:39.473807368 +0000
+@@ -1200,7 +1200,7 @@
+
+ /* make sure the driver is not looking for flags any more. */
+ while (ssif_info->ssif_state != SSIF_NORMAL)
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+
+ ssif_info->stopping = true;
+ del_timer_sync(&ssif_info->retry_timer);
+diff -Nur a/drivers/char/snsc.c b/drivers/char/snsc.c
+--- a/drivers/char/snsc.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/char/snsc.c 2018-11-03 16:30:39.474807400 +0000
+@@ -198,7 +198,7 @@
+ add_wait_queue(&sd->sd_rq, &wait);
+ spin_unlock_irqrestore(&sd->sd_rlock, flags);
+
+- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
++ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
+
+ remove_wait_queue(&sd->sd_rq, &wait);
+ if (signal_pending(current)) {
+@@ -294,7 +294,7 @@
+ add_wait_queue(&sd->sd_wq, &wait);
+ spin_unlock_irqrestore(&sd->sd_wlock, flags);
+
+- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
++ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
+
+ remove_wait_queue(&sd->sd_wq, &wait);
+ if (signal_pending(current)) {
+diff -Nur a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2018-11-03 16:30:39.474807400 +0000
+@@ -235,7 +235,7 @@
+ DRM_ERROR("SVGA device lockup.\n");
+ break;
+ }
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+diff -Nur a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2018-11-03 16:30:39.474807400 +0000
+@@ -202,7 +202,7 @@
+ break;
+ }
+ if (lazy)
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ else if ((++count & 0x0F) == 0) {
+ /**
+ * FIXME: Use schedule_hr_timeout here for
+diff -Nur a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
+--- a/drivers/media/pci/ivtv/ivtv-ioctl.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/media/pci/ivtv/ivtv-ioctl.c 2018-11-03 16:30:39.475807432 +0000
+@@ -1154,7 +1154,7 @@
+ TASK_UNINTERRUPTIBLE);
+ if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
+ break;
+- schedule_timeout(msecs_to_jiffies(25));
++ schedule_msec_hrtimeout((25));
+ }
+ finish_wait(&itv->vsync_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
+diff -Nur a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
+--- a/drivers/media/pci/ivtv/ivtv-streams.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/media/pci/ivtv/ivtv-streams.c 2018-11-03 16:30:39.475807432 +0000
+@@ -834,7 +834,7 @@
+ while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
+ time_before(jiffies,
+ then + msecs_to_jiffies(2000))) {
+- schedule_timeout(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout((10));
+ }
+
+ /* To convert jiffies to ms, we must multiply by 1000
+diff -Nur a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
+--- a/drivers/mfd/ucb1x00-core.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/mfd/ucb1x00-core.c 2018-11-03 16:30:39.476807464 +0000
+@@ -253,7 +253,7 @@
+ break;
+ /* yield to other processes */
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+
+ return UCB_ADC_DAT(val);
+diff -Nur a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
+--- a/drivers/misc/sgi-xp/xpc_channel.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/misc/sgi-xp/xpc_channel.c 2018-11-03 16:30:39.476807464 +0000
+@@ -837,7 +837,7 @@
+
+ atomic_inc(&ch->n_on_msg_allocate_wq);
+ prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
+- ret = schedule_timeout(1);
++ ret = schedule_min_hrtimeout();
+ finish_wait(&ch->msg_allocate_wq, &wait);
+ atomic_dec(&ch->n_on_msg_allocate_wq);
+
+diff -Nur a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
+--- a/drivers/net/caif/caif_hsi.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/net/caif/caif_hsi.c 2018-11-03 16:30:39.477807497 +0000
+@@ -940,7 +940,7 @@
+ break;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ retry--;
+ }
+
+diff -Nur a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
+--- a/drivers/net/can/usb/peak_usb/pcan_usb.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/net/can/usb/peak_usb/pcan_usb.c 2018-11-03 16:30:39.477807497 +0000
+@@ -250,7 +250,7 @@
+ } else {
+ /* the PCAN-USB needs time to init */
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
++ schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
+ }
+
+ return err;
+diff -Nur a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+--- a/drivers/net/usb/lan78xx.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/net/usb/lan78xx.c 2018-11-03 16:30:39.478807529 +0000
+@@ -2567,7 +2567,7 @@
+ while (!skb_queue_empty(&dev->rxq) &&
+ !skb_queue_empty(&dev->txq) &&
+ !skb_queue_empty(&dev->done)) {
+- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ netif_dbg(dev, ifdown, dev->net,
+ "waited for %d urb completions\n", temp);
+diff -Nur a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+--- a/drivers/net/usb/usbnet.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/net/usb/usbnet.c 2018-11-03 16:30:39.479807561 +0000
+@@ -772,7 +772,7 @@
+ spin_lock_irqsave(&q->lock, flags);
+ while (!skb_queue_empty(q)) {
+ spin_unlock_irqrestore(&q->lock, flags);
+- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_lock_irqsave(&q->lock, flags);
+ }
+diff -Nur a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
+--- a/drivers/ntb/test/ntb_perf.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/ntb/test/ntb_perf.c 2018-11-03 16:30:39.479807561 +0000
+@@ -310,7 +310,7 @@
+ if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
+ last_sleep = jiffies;
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+
+ if (unlikely(kthread_should_stop()))
+diff -Nur a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
+--- a/drivers/scsi/fnic/fnic_scsi.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/scsi/fnic/fnic_scsi.c 2018-11-03 16:30:39.480807592 +0000
+@@ -217,7 +217,7 @@
+
+ /* wait for io cmpl */
+ while (atomic_read(&fnic->in_flight))
+- schedule_timeout(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout((1));
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+
+@@ -2255,7 +2255,7 @@
+ }
+ }
+
+- schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
++ schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
+
+ /* walk again to check, if IOs are still pending in fw */
+ if (fnic_is_abts_pending(fnic, lr_sc))
+diff -Nur a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
+--- a/drivers/scsi/snic/snic_scsi.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/scsi/snic/snic_scsi.c 2018-11-03 16:30:39.481807625 +0000
+@@ -2354,7 +2354,7 @@
+
+ /* Wait for all the IOs that are entered in Qcmd */
+ while (atomic_read(&snic->ios_inflight))
+- schedule_timeout(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout((1));
+
+ ret = snic_issue_hba_reset(snic, sc);
+ if (ret) {
+diff -Nur a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c 2018-11-03 16:30:39.483807688 +0000
+@@ -4657,7 +4657,7 @@
+ if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
+ break;
+ set_current_state(TASK_INTERRUPTIBLE);
+- if (schedule_timeout(1))
++ if (schedule_min_hrtimeout())
+ return -EIO;
+ }
+ if (i == timeout) {
+diff -Nur a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
+--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c 2018-11-03 16:30:39.483807688 +0000
+@@ -329,7 +329,7 @@
+ schedule();
+ } else {
+ now = jiffies;
+- schedule_timeout(msecs_to_jiffies(tms));
++ schedule_msec_hrtimeout((tms));
+ tms -= jiffies_to_msecs(jiffies - now);
+ if (tms < 0) /* no more wait but may have new event */
+ tms = 0;
+diff -Nur a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
+--- a/drivers/staging/rts5208/rtsx.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/rts5208/rtsx.c 2018-11-03 16:30:39.483807688 +0000
+@@ -524,7 +524,7 @@
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
++ schedule_msec_hrtimeout((POLLING_INTERVAL));
+
+ /* lock the device pointers */
+ mutex_lock(&dev->dev_mutex);
+diff -Nur a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
+--- a/drivers/staging/speakup/speakup_acntpc.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_acntpc.c 2018-11-03 16:30:39.484807721 +0000
+@@ -206,7 +206,7 @@
+ full_time_val = full_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout((full_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -234,7 +234,7 @@
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ jiff_max = jiffies + jiffy_delta_val;
+ }
+ }
+diff -Nur a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
+--- a/drivers/staging/speakup/speakup_apollo.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_apollo.c 2018-11-03 16:30:39.484807721 +0000
+@@ -174,7 +174,7 @@
+ if (!synth->io_ops->synth_out(synth, ch)) {
+ synth->io_ops->tiocmset(0, UART_MCR_RTS);
+ synth->io_ops->tiocmset(UART_MCR_RTS, 0);
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout(full_time_val);
+ continue;
+ }
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
+diff -Nur a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
+--- a/drivers/staging/speakup/speakup_decext.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_decext.c 2018-11-03 16:30:39.484807721 +0000
+@@ -185,7 +185,7 @@
+ if (ch == '\n')
+ ch = 0x0D;
+ if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff -Nur a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
+--- a/drivers/staging/speakup/speakup_decpc.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_decpc.c 2018-11-03 16:30:39.484807721 +0000
+@@ -403,7 +403,7 @@
+ if (ch == '\n')
+ ch = 0x0D;
+ if (dt_sendchar(ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff -Nur a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
+--- a/drivers/staging/speakup/speakup_dectlk.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_dectlk.c 2018-11-03 16:30:39.485807753 +0000
+@@ -253,7 +253,7 @@
+ if (ch == '\n')
+ ch = 0x0D;
+ if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout(delay_time_val);
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+diff -Nur a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
+--- a/drivers/staging/speakup/speakup_dtlk.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_dtlk.c 2018-11-03 16:30:39.485807753 +0000
+@@ -220,7 +220,7 @@
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -236,7 +236,7 @@
+ delay_time_val = delay_time->u.n.value;
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ jiff_max = jiffies + jiffy_delta_val;
+ }
+ }
+diff -Nur a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
+--- a/drivers/staging/speakup/speakup_keypc.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/speakup_keypc.c 2018-11-03 16:30:39.485807753 +0000
+@@ -208,7 +208,7 @@
+ full_time_val = full_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ if (synth_full()) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout((full_time_val));
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+@@ -241,7 +241,7 @@
+ jiffy_delta_val = jiffy_delta->u.n.value;
+ delay_time_val = delay_time->u.n.value;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+- schedule_timeout(msecs_to_jiffies(delay_time_val));
++ schedule_msec_hrtimeout((delay_time_val));
+ jiff_max = jiffies+jiffy_delta_val;
+ }
+ }
+diff -Nur a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
+--- a/drivers/staging/speakup/synth.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/speakup/synth.c 2018-11-03 16:30:39.486807785 +0000
+@@ -92,7 +92,7 @@
+ if (ch == '\n')
+ ch = synth->procspeech;
+ if (!synth->io_ops->synth_out(synth, ch)) {
+- schedule_timeout(msecs_to_jiffies(full_time_val));
++ schedule_msec_hrtimeout(full_time_val);
+ continue;
+ }
+ if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
+diff -Nur a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
+--- a/drivers/staging/unisys/visornic/visornic_main.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/staging/unisys/visornic/visornic_main.c 2018-11-03 16:30:39.486807785 +0000
+@@ -556,7 +556,7 @@
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- wait += schedule_timeout(msecs_to_jiffies(10));
++ wait += schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ }
+
+@@ -567,7 +567,7 @@
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- schedule_timeout(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ if (atomic_read(&devdata->usage))
+ break;
+@@ -721,7 +721,7 @@
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+- wait += schedule_timeout(msecs_to_jiffies(10));
++ wait += schedule_msec_hrtimeout((10));
+ spin_lock_irqsave(&devdata->priv_lock, flags);
+ }
+
+diff -Nur a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+--- a/drivers/target/target_core_user.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/target/target_core_user.c 2018-11-03 16:30:39.487807817 +0000
+@@ -808,10 +808,9 @@
+ pr_debug("sleeping for ring space\n");
+ mutex_unlock(&udev->cmdr_lock);
+ if (udev->cmd_time_out)
+- ret = schedule_timeout(
+- msecs_to_jiffies(udev->cmd_time_out));
++ ret = schedule_msec_hrtimeout(udev->cmd_time_out);
+ else
+- ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
++ ret = schedule_msec_hrtimeout(TCMU_TIME_OUT);
+ finish_wait(&udev->wait_cmdr, &__wait);
+ if (!ret) {
+ pr_warn("tcmu: command timed out\n");
+diff -Nur a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
+--- a/drivers/video/fbdev/omap/hwa742.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/video/fbdev/omap/hwa742.c 2018-11-03 16:30:39.487807817 +0000
+@@ -926,7 +926,7 @@
+ if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(5));
++ schedule_msec_hrtimeout((5));
+ }
+ hwa742_set_update_mode(hwa742.update_mode_before_suspend);
+ }
+diff -Nur a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
+--- a/drivers/video/fbdev/pxafb.c 2018-10-10 07:54:28.000000000 +0100
++++ b/drivers/video/fbdev/pxafb.c 2018-11-03 16:30:39.488807849 +0000
+@@ -1286,7 +1286,7 @@
+ mutex_unlock(&fbi->ctrlr_lock);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule_timeout(msecs_to_jiffies(30));
++ schedule_msec_hrtimeout((30));
+ }
+
+ pr_debug("%s(): task ending\n", __func__);
+diff -Nur a/fs/afs/vlocation.c b/fs/afs/vlocation.c
+--- a/fs/afs/vlocation.c 2018-10-10 07:54:28.000000000 +0100
++++ b/fs/afs/vlocation.c 2018-11-03 16:30:39.488807849 +0000
+@@ -129,7 +129,7 @@
+ if (vl->upd_busy_cnt > 1) {
+ /* second+ BUSY - sleep a little bit */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ }
+ continue;
+ }
+diff -Nur a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+--- a/fs/btrfs/extent-tree.c 2018-10-10 07:54:28.000000000 +0100
++++ b/fs/btrfs/extent-tree.c 2018-11-03 16:30:39.491807945 +0000
+@@ -6106,7 +6106,7 @@
+
+ if (flush != BTRFS_RESERVE_NO_FLUSH &&
+ btrfs_transaction_in_commit(fs_info))
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+
+ if (delalloc_lock)
+ mutex_lock(&inode->delalloc_mutex);
+diff -Nur a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+--- a/fs/btrfs/inode-map.c 2018-10-10 07:54:28.000000000 +0100
++++ b/fs/btrfs/inode-map.c 2018-11-03 16:30:39.492807977 +0000
+@@ -89,7 +89,7 @@
+ btrfs_release_path(path);
+ root->ino_cache_progress = last;
+ up_read(&fs_info->commit_root_sem);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ goto again;
+ } else
+ continue;
+diff -Nur a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
+--- a/sound/usb/line6/pcm.c 2018-10-10 07:54:28.000000000 +0100
++++ b/sound/usb/line6/pcm.c 2018-11-03 16:30:39.492807977 +0000
+@@ -131,7 +131,7 @@
+ if (!alive)
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_timeout(1);
++ schedule_min_hrtimeout();
+ } while (--timeout > 0);
+ if (alive)
+ dev_err(line6pcm->line6->ifcdev,
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch b/sys-kernel/linux-sources-redcore-lts/files/0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch
new file mode 100644
index 00000000..f9f274ce
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch
@@ -0,0 +1,311 @@
+From 3ef5df78c2f425115b87f0f2f59fd189c0f1bbe3 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Mon, 20 Feb 2017 13:30:07 +1100
+Subject: [PATCH 08/16] Replace all calls to schedule_timeout_interruptible of
+ potentially under 50ms to use schedule_msec_hrtimeout_interruptible.
+
+---
+ drivers/hwmon/fam15h_power.c | 2 +-
+ drivers/iio/light/tsl2563.c | 6 +-----
+ drivers/media/i2c/msp3400-driver.c | 4 ++--
+ drivers/media/pci/ivtv/ivtv-gpio.c | 6 +++---
+ drivers/media/radio/radio-mr800.c | 2 +-
+ drivers/media/radio/radio-tea5777.c | 2 +-
+ drivers/media/radio/tea575x.c | 2 +-
+ drivers/parport/ieee1284.c | 2 +-
+ drivers/parport/ieee1284_ops.c | 2 +-
+ drivers/platform/x86/intel_ips.c | 8 ++++----
+ net/core/pktgen.c | 2 +-
+ sound/soc/codecs/wm8350.c | 12 ++++++------
+ sound/soc/codecs/wm8900.c | 2 +-
+ sound/soc/codecs/wm9713.c | 4 ++--
+ 14 files changed, 26 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
+index 9545a346044f..c24cf1302ec7 100644
+--- a/drivers/hwmon/fam15h_power.c
++++ b/drivers/hwmon/fam15h_power.c
+@@ -237,7 +237,7 @@ static ssize_t power1_average_show(struct device *dev,
+ prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
+ }
+
+- leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
++ leftover = schedule_msec_hrtimeout_interruptible((data->power_period));
+ if (leftover)
+ return 0;
+
+diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
+index 7599693f7fe9..452090739138 100644
+--- a/drivers/iio/light/tsl2563.c
++++ b/drivers/iio/light/tsl2563.c
+@@ -282,11 +282,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip)
+ default:
+ delay = 402;
+ }
+- /*
+- * TODO: Make sure that we wait at least required delay but why we
+- * have to extend it one tick more?
+- */
+- schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
++ schedule_msec_hrtimeout_interruptible(delay + 1);
+ }
+
+ static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
+diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
+index 3db966db83eb..f0fab7676f72 100644
+--- a/drivers/media/i2c/msp3400-driver.c
++++ b/drivers/media/i2c/msp3400-driver.c
+@@ -179,7 +179,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
+ break;
+ dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
+ dev, addr);
+- schedule_timeout_interruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_interruptible((10));
+ }
+ if (err == 3) {
+ dev_warn(&client->dev, "resetting chip, sound will go off.\n");
+@@ -220,7 +220,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
+ break;
+ dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
+ dev, addr);
+- schedule_timeout_interruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_interruptible((10));
+ }
+ if (err == 3) {
+ dev_warn(&client->dev, "resetting chip, sound will go off.\n");
+diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
+index f752f3993687..23372af61ebf 100644
+--- a/drivers/media/pci/ivtv/ivtv-gpio.c
++++ b/drivers/media/pci/ivtv/ivtv-gpio.c
+@@ -117,7 +117,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv)
+ curout = (curout & ~0xF) | 1;
+ write_reg(curout, IVTV_REG_GPIO_OUT);
+ /* We could use something else for smaller time */
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+ curout |= 2;
+ write_reg(curout, IVTV_REG_GPIO_OUT);
+ curdir &= ~0x80;
+@@ -137,11 +137,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
+ curout = read_reg(IVTV_REG_GPIO_OUT);
+ curout &= ~(1 << itv->card->xceive_pin);
+ write_reg(curout, IVTV_REG_GPIO_OUT);
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+
+ curout |= 1 << itv->card->xceive_pin;
+ write_reg(curout, IVTV_REG_GPIO_OUT);
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+ return 0;
+ }
+
+diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
+index c9f59129af79..cb6f8394a5c2 100644
+--- a/drivers/media/radio/radio-mr800.c
++++ b/drivers/media/radio/radio-mr800.c
+@@ -378,7 +378,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
+ retval = -ENODATA;
+ break;
+ }
+- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
++ if (schedule_msec_hrtimeout_interruptible((10))) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
+index 04ed1a5d1177..d593d28dc286 100644
+--- a/drivers/media/radio/radio-tea5777.c
++++ b/drivers/media/radio/radio-tea5777.c
+@@ -245,7 +245,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait)
+ }
+
+ if (wait) {
+- if (schedule_timeout_interruptible(msecs_to_jiffies(wait)))
++ if (schedule_msec_hrtimeout_interruptible((wait)))
+ return -ERESTARTSYS;
+ }
+
+diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
+index 4dc2067bce14..29f4416fb9ae 100644
+--- a/drivers/media/radio/tea575x.c
++++ b/drivers/media/radio/tea575x.c
+@@ -416,7 +416,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ break;
+- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
++ if (schedule_msec_hrtimeout_interruptible((10))) {
+ /* some signal arrived, stop search */
+ tea->val &= ~TEA575X_BIT_SEARCH;
+ snd_tea575x_set_freq(tea);
+diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
+index 74cc6dd982d2..c22c4d5f08d0 100644
+--- a/drivers/parport/ieee1284.c
++++ b/drivers/parport/ieee1284.c
+@@ -215,7 +215,7 @@ int parport_wait_peripheral(struct parport *port,
+ /* parport_wait_event didn't time out, but the
+ * peripheral wasn't actually ready either.
+ * Wait for another 10ms. */
+- schedule_timeout_interruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_interruptible((10));
+ }
+ }
+
+diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
+index 5d41dda6da4e..34705f6b423f 100644
+--- a/drivers/parport/ieee1284_ops.c
++++ b/drivers/parport/ieee1284_ops.c
+@@ -537,7 +537,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
+ /* Yield the port for a while. */
+ if (count && dev->port->irq != PARPORT_IRQ_NONE) {
+ parport_release (dev);
+- schedule_timeout_interruptible(msecs_to_jiffies(40));
++ schedule_msec_hrtimeout_interruptible((40));
+ parport_claim_or_block (dev);
+ }
+ else
+diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
+index 58dcee562d64..b661b7c071bb 100644
+--- a/drivers/platform/x86/intel_ips.c
++++ b/drivers/platform/x86/intel_ips.c
+@@ -813,7 +813,7 @@ static int ips_adjust(void *data)
+ ips_gpu_lower(ips);
+
+ sleep:
+- schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
++ schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD));
+ } while (!kthread_should_stop());
+
+ dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n");
+@@ -992,7 +992,7 @@ static int ips_monitor(void *data)
+ seqno_timestamp = get_jiffies_64();
+
+ old_cpu_power = thm_readl(THM_CEC);
+- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
+
+ /* Collect an initial average */
+ for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
+@@ -1019,7 +1019,7 @@ static int ips_monitor(void *data)
+ mchp_samples[i] = mchp;
+ }
+
+- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
+ if (kthread_should_stop())
+ break;
+ }
+@@ -1046,7 +1046,7 @@ static int ips_monitor(void *data)
+ * us to reduce the sample frequency if the CPU and GPU are idle.
+ */
+ old_cpu_power = thm_readl(THM_CEC);
+- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
+ last_sample_period = IPS_SAMPLE_PERIOD;
+
+ setup_deferrable_timer_on_stack(&timer, monitor_timeout,
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 6e1e10ff433a..be5d6f7142e4 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -1992,7 +1992,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
+ mutex_unlock(&pktgen_thread_lock);
+ pr_debug("%s: waiting for %s to disappear....\n",
+ __func__, ifname);
+- schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
++ schedule_msec_hrtimeout_interruptible((msec_per_try));
+ mutex_lock(&pktgen_thread_lock);
+
+ if (++i >= max_tries) {
+diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
+index 2efc5b41ad0f..3e3248c48c6b 100644
+--- a/sound/soc/codecs/wm8350.c
++++ b/sound/soc/codecs/wm8350.c
+@@ -236,10 +236,10 @@ static void wm8350_pga_work(struct work_struct *work)
+ out2->ramp == WM8350_RAMP_UP) {
+ /* delay is longer over 0dB as increases are larger */
+ if (i >= WM8350_OUTn_0dB)
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (2));
+ else
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (1));
+ } else
+ udelay(50); /* doesn't matter if we delay longer */
+@@ -1123,7 +1123,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
+ (platform->dis_out4 << 6));
+
+ /* wait for discharge */
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (platform->
+ cap_discharge_msecs));
+
+@@ -1139,7 +1139,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
+ WM8350_VBUFEN);
+
+ /* wait for vmid */
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (platform->
+ vmid_charge_msecs));
+
+@@ -1190,7 +1190,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
+ wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1);
+
+ /* wait */
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (platform->
+ vmid_discharge_msecs));
+
+@@ -1208,7 +1208,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
+ pm1 | WM8350_OUTPUT_DRAIN_EN);
+
+ /* wait */
+- schedule_timeout_interruptible(msecs_to_jiffies
++ schedule_msec_hrtimeout_interruptible(
+ (platform->drain_msecs));
+
+ pm1 &= ~WM8350_BIASEN;
+diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
+index c77b49a29311..fc50456e90a9 100644
+--- a/sound/soc/codecs/wm8900.c
++++ b/sound/soc/codecs/wm8900.c
+@@ -1112,7 +1112,7 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec,
+ /* Need to let things settle before stopping the clock
+ * to ensure that restart works, see "Stopping the
+ * master clock" in the datasheet. */
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+ snd_soc_write(codec, WM8900_REG_POWER2,
+ WM8900_REG_POWER2_SYSCLK_ENA);
+ break;
+diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
+index 7e4822185feb..0c85a207446a 100644
+--- a/sound/soc/codecs/wm9713.c
++++ b/sound/soc/codecs/wm9713.c
+@@ -199,7 +199,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
+
+ /* Gracefully shut down the voice interface. */
+ snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0200);
+- schedule_timeout_interruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_interruptible((1));
+ snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0f00);
+ snd_soc_update_bits(codec, AC97_EXTENDED_MID, 0x1000, 0x1000);
+
+@@ -868,7 +868,7 @@ static int wm9713_set_pll(struct snd_soc_codec *codec,
+ wm9713->pll_in = freq_in;
+
+ /* wait 10ms AC97 link frames for the link to stabilise */
+- schedule_timeout_interruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_interruptible((10));
+ return 0;
+ }
+
+--
+2.11.0
+
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch b/sys-kernel/linux-sources-redcore-lts/files/0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch
new file mode 100644
index 00000000..c910f3df
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch
@@ -0,0 +1,160 @@
+From 6044370cf4bbc5e05f5d78f5772c1d88e3153603 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Mon, 20 Feb 2017 13:30:32 +1100
+Subject: [PATCH 09/16] Replace all calls to schedule_timeout_uninterruptible
+ of potentially under 50ms to use schedule_msec_hrtimeout_uninterruptible
+
+---
+ drivers/media/pci/cx18/cx18-gpio.c | 4 ++--
+ drivers/net/wireless/intel/ipw2x00/ipw2100.c | 4 ++--
+ drivers/rtc/rtc-wm8350.c | 6 +++---
+ drivers/scsi/lpfc/lpfc_scsi.c | 2 +-
+ sound/pci/maestro3.c | 4 ++--
+ sound/soc/codecs/rt5631.c | 4 ++--
+ sound/soc/soc-dapm.c | 2 +-
+ 7 files changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
+index 012859e6dc7b..206bd08265a5 100644
+--- a/drivers/media/pci/cx18/cx18-gpio.c
++++ b/drivers/media/pci/cx18/cx18-gpio.c
+@@ -90,11 +90,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi,
+
+ /* Assert */
+ gpio_update(cx, mask, ~active_lo);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs));
++ schedule_msec_hrtimeout_uninterruptible((assert_msecs));
+
+ /* Deassert */
+ gpio_update(cx, mask, ~active_hi);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs));
++ schedule_msec_hrtimeout_uninterruptible((recovery_msecs));
+ }
+
+ /*
+diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+index 19c442cb93e4..448f41782060 100644
+--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
++++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+@@ -830,7 +830,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
+ * doesn't seem to have as many firmware restart cycles...
+ *
+ * As a test, we're sticking in a 1/100s delay here */
+- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_uninterruptible((10));
+
+ return 0;
+
+@@ -1281,7 +1281,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
+ IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
+ i = 5000;
+ do {
+- schedule_timeout_uninterruptible(msecs_to_jiffies(40));
++ schedule_msec_hrtimeout_uninterruptible((40));
+ /* Todo... wait for sync command ... */
+
+ read_register(priv->net_dev, IPW_REG_INTA, &inta);
+diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
+index 483c7993516b..fddbaa475066 100644
+--- a/drivers/rtc/rtc-wm8350.c
++++ b/drivers/rtc/rtc-wm8350.c
+@@ -119,7 +119,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
+ /* Wait until confirmation of stopping */
+ do {
+ rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_uninterruptible((1));
+ } while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
+
+ if (!retries) {
+@@ -202,7 +202,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
+ /* Wait until confirmation of stopping */
+ do {
+ rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_uninterruptible((1));
+ } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
+
+ if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
+@@ -225,7 +225,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
+ /* Wait until confirmation */
+ do {
+ rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
+- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
++ schedule_msec_hrtimeout_uninterruptible((1));
+ } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
+
+ if (rtc_ctrl & WM8350_RTC_ALMSTS)
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 1a6f122bb25d..c0db66302a3e 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -5131,7 +5131,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
+ tgt_id, lun_id, context);
+ later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ while (time_after(later, jiffies) && cnt) {
+- schedule_timeout_uninterruptible(msecs_to_jiffies(20));
++ schedule_msec_hrtimeout_uninterruptible((20));
+ cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
+ }
+ if (cnt) {
+diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
+index 8f20dec97843..944ce63431b0 100644
+--- a/sound/pci/maestro3.c
++++ b/sound/pci/maestro3.c
+@@ -2016,7 +2016,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
+ outw(0, io + GPIO_DATA);
+ outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION);
+
+- schedule_timeout_uninterruptible(msecs_to_jiffies(delay1));
++ schedule_msec_hrtimeout_uninterruptible((delay1));
+
+ outw(GPO_PRIMARY_AC97, io + GPIO_DATA);
+ udelay(5);
+@@ -2024,7 +2024,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
+ outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A);
+ outw(~0, io + GPIO_MASK);
+
+- schedule_timeout_uninterruptible(msecs_to_jiffies(delay2));
++ schedule_msec_hrtimeout_uninterruptible((delay2));
+
+ if (! snd_m3_try_read_vendor(chip))
+ break;
+diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
+index 55b04c55fb4b..2ed02ad6ac41 100644
+--- a/sound/soc/codecs/rt5631.c
++++ b/sound/soc/codecs/rt5631.c
+@@ -419,7 +419,7 @@ static void onebit_depop_mute_stage(struct snd_soc_codec *codec, int enable)
+ hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
+ snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
+ if (enable) {
+- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_uninterruptible((10));
+ /* config one-bit depop parameter */
+ rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x307f);
+ snd_soc_update_bits(codec, RT5631_HP_OUT_VOL,
+@@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_codec *codec, int enable)
+ hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
+ snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
+ if (enable) {
+- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
++ schedule_msec_hrtimeout_uninterruptible((10));
+
+ /* config depop sequence parameter */
+ rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x302f);
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index dcef67a9bd48..11c2bb48c8f2 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -134,7 +134,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm)
+ static void pop_wait(u32 pop_time)
+ {
+ if (pop_time)
+- schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
++ schedule_msec_hrtimeout_uninterruptible((pop_time));
+ }
+
+ static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...)
+--
+2.11.0
+
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch b/sys-kernel/linux-sources-redcore-lts/files/0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch
new file mode 100644
index 00000000..260bb98d
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch
@@ -0,0 +1,69 @@
+From 071486de633698dcdd163295173ce4663ec9158c Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Mon, 20 Feb 2017 13:32:58 +1100
+Subject: [PATCH 10/16] Don't use hrtimer overlay when pm_freezing since some
+ drivers still don't correctly use freezable timeouts.
+
+---
+ kernel/time/hrtimer.c | 2 +-
+ kernel/time/timer.c | 9 +++++----
+ 2 files changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 13227cf2814c..66456c72bace 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1809,7 +1809,7 @@ long __sched schedule_msec_hrtimeout(long timeout)
+ * (yet) better than Hz, as would occur during startup, use regular
+ * timers.
+ */
+- if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ)
++ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
+ return schedule_timeout(jiffs);
+
+ secs = timeout / 1000;
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index c68cb9307f64..2f2c96b03efe 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -44,6 +44,7 @@
+ #include <linux/sched/debug.h>
+ #include <linux/slab.h>
+ #include <linux/compat.h>
++#include <linux/freezer.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/unistd.h>
+@@ -1891,12 +1892,12 @@ void msleep(unsigned int msecs)
+ * Use high resolution timers where the resolution of tick based
+ * timers is inadequate.
+ */
+- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
+ while (msecs)
+ msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
+ return;
+ }
+- timeout = msecs_to_jiffies(msecs) + 1;
++ timeout = jiffs + 1;
+
+ while (timeout)
+ timeout = schedule_timeout_uninterruptible(timeout);
+@@ -1913,12 +1914,12 @@ unsigned long msleep_interruptible(unsigned int msecs)
+ int jiffs = msecs_to_jiffies(msecs);
+ unsigned long timeout;
+
+- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
+ while (msecs && !signal_pending(current))
+ msecs = schedule_msec_hrtimeout_interruptible(msecs);
+ return msecs;
+ }
+- timeout = msecs_to_jiffies(msecs) + 1;
++ timeout = jiffs + 1;
+
+ while (timeout && !signal_pending(current))
+ timeout = schedule_timeout_interruptible(timeout);
+--
+2.11.0
+
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch b/sys-kernel/linux-sources-redcore-lts/files/0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch
new file mode 100644
index 00000000..5ac20300
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch
@@ -0,0 +1,136 @@
+diff -Nur a/kernel/sysctl.c b/kernel/sysctl.c
+--- a/kernel/sysctl.c 2018-11-03 17:03:07.433069521 +0000
++++ b/kernel/sysctl.c 2018-11-03 17:02:11.020267246 +0000
+@@ -141,7 +141,9 @@
+ extern int sched_iso_cpu;
+ extern int sched_yield_type;
+ #endif
+-#ifdef CONFIG_PRINTK
++extern int hrtimer_granularity_us;
++extern int hrtimeout_min_us;
++#if defined(CONFIG_PRINTK) || defined(CONFIG_SCHED_MUQSS)
+ static int ten_thousand __read_only = 10000;
+ #endif
+ #ifdef CONFIG_PERF_EVENTS
+@@ -1119,6 +1121,24 @@
+ .extra2 = &two,
+ },
+ #endif
++ {
++ .procname = "hrtimer_granularity_us",
++ .data = &hrtimer_granularity_us,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &ten_thousand,
++ },
++ {
++ .procname = "hrtimeout_min_us",
++ .data = &hrtimeout_min_us,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &ten_thousand,
++ },
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ {
+ .procname = "spin_retry",
+diff -Nur a/kernel/time/clockevents.c b/kernel/time/clockevents.c
+--- a/kernel/time/clockevents.c 2018-11-03 17:03:07.433069521 +0000
++++ b/kernel/time/clockevents.c 2018-11-03 16:58:17.283800909 +0000
+@@ -198,13 +198,9 @@
+
+ #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
+
+-#ifdef CONFIG_SCHED_MUQSS
++int __read_mostly hrtimer_granularity_us = 100;
+ /* Limit min_delta to 100us */
+-#define MIN_DELTA_LIMIT (NSEC_PER_SEC / 10000)
+-#else
+-/* Limit min_delta to a jiffie */
+-#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
+-#endif
++#define MIN_DELTA_LIMIT (hrtimer_granularity_us * NSEC_PER_USEC)
+
+ /**
+ * clockevents_increase_min_delta - raise minimum delta of a clock event device
+diff -Nur a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+--- a/kernel/time/hrtimer.c 2018-11-03 17:04:16.448274547 +0000
++++ b/kernel/time/hrtimer.c 2018-11-03 16:58:17.283800909 +0000
+@@ -1803,7 +1803,7 @@
+ long __sched schedule_msec_hrtimeout(long timeout)
+ {
+ struct hrtimer_sleeper t;
+- int delta, secs, jiffs;
++ int delta, jiffs;
+ ktime_t expires;
+
+ if (!timeout) {
+@@ -1820,9 +1820,8 @@
+ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
+ return schedule_timeout(jiffs);
+
+- secs = timeout / 1000;
+ delta = (timeout % 1000) * NSEC_PER_MSEC;
+- expires = ktime_set(secs, delta);
++ expires = ktime_set(0, delta);
+
+ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
+@@ -1846,9 +1845,53 @@
+
+ EXPORT_SYMBOL(schedule_msec_hrtimeout);
+
++#define USECS_PER_SEC 1000000
++extern int hrtimer_granularity_us;
++
++static inline long schedule_usec_hrtimeout(long timeout)
++{
++ struct hrtimer_sleeper t;
++ ktime_t expires;
++ int delta;
++
++ if (!timeout) {
++ __set_current_state(TASK_RUNNING);
++ return 0;
++ }
++
++ if (hrtimer_resolution >= NSEC_PER_SEC / HZ)
++ return schedule_timeout(usecs_to_jiffies(timeout));
++
++ if (timeout < hrtimer_granularity_us)
++ timeout = hrtimer_granularity_us;
++ delta = (timeout % USECS_PER_SEC) * NSEC_PER_USEC;
++ expires = ktime_set(0, delta);
++
++ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
++
++ hrtimer_init_sleeper(&t, current);
++
++ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL);
++
++ if (likely(t.task))
++ schedule();
++
++ hrtimer_cancel(&t.timer);
++ destroy_hrtimer_on_stack(&t.timer);
++
++ __set_current_state(TASK_RUNNING);
++
++ expires = hrtimer_expires_remaining(&t.timer);
++ timeout = ktime_to_us(expires);
++ return timeout < 0 ? 0 : timeout;
++}
++
++int __read_mostly hrtimeout_min_us = 1000;
++
+ long __sched schedule_min_hrtimeout(void)
+ {
+- return schedule_msec_hrtimeout(1);
++ return usecs_to_jiffies(schedule_usec_hrtimeout(hrtimeout_min_us));
+ }
+
+ EXPORT_SYMBOL(schedule_min_hrtimeout);
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch b/sys-kernel/linux-sources-redcore-lts/files/0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch
new file mode 100644
index 00000000..99b28d65
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch
@@ -0,0 +1,81 @@
+From 9e47a80f690080c12ce607158b96c305707543b8 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Wed, 7 Dec 2016 21:23:01 +1100
+Subject: [PATCH 12/16] Reinstate default Hz of 100 in combination with MuQSS
+ and -ck patches.
+
+---
+ kernel/Kconfig.hz | 25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
+index 2a202a846757..1806fcac8f14 100644
+--- a/kernel/Kconfig.hz
++++ b/kernel/Kconfig.hz
+@@ -4,7 +4,8 @@
+
+ choice
+ prompt "Timer frequency"
+- default HZ_250
++ default HZ_100 if SCHED_MUQSS
++ default HZ_250_NODEF if !SCHED_MUQSS
+ help
+ Allows the configuration of the timer frequency. It is customary
+ to have the timer interrupt run at 1000 Hz but 100 Hz may be more
+@@ -19,11 +20,18 @@ choice
+ config HZ_100
+ bool "100 HZ"
+ help
++ 100 Hz is a suitable choice in combination with MuQSS which does
++ not rely on ticks for rescheduling interrupts, and is not Hz limited
++ for timeouts and sleeps from both the kernel and userspace.
++ This allows us to benefit from the lower overhead and higher
++ throughput of fewer timer ticks.
++
++ Non-MuQSS kernels:
+ 100 Hz is a typical choice for servers, SMP and NUMA systems
+ with lots of processors that may show reduced performance if
+ too many timer interrupts are occurring.
+
+- config HZ_250
++ config HZ_250_NODEF
+ bool "250 HZ"
+ help
+ 250 Hz is a good compromise choice allowing server performance
+@@ -31,7 +39,10 @@ choice
+ on SMP and NUMA systems. If you are going to be using NTSC video
+ or multimedia, selected 300Hz instead.
+
+- config HZ_300
++ 250 Hz is the default choice for the mainline scheduler but not
++ advantageous in combination with MuQSS.
++
++ config HZ_300_NODEF
+ bool "300 HZ"
+ help
+ 300 Hz is a good compromise choice allowing server performance
+@@ -39,7 +50,7 @@ choice
+ on SMP and NUMA systems and exactly dividing by both PAL and
+ NTSC frame rates for video and multimedia work.
+
+- config HZ_1000
++ config HZ_1000_NODEF
+ bool "1000 HZ"
+ help
+ 1000 Hz is the preferred choice for desktop systems and other
+@@ -50,9 +61,9 @@ endchoice
+ config HZ
+ int
+ default 100 if HZ_100
+- default 250 if HZ_250
+- default 300 if HZ_300
+- default 1000 if HZ_1000
++ default 250 if HZ_250_NODEF
++ default 300 if HZ_300_NODEF
++ default 1000 if HZ_1000_NODEF
+
+ config SCHED_HRTICK
+ def_bool HIGH_RES_TIMERS
+--
+2.11.0
+
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch b/sys-kernel/linux-sources-redcore-lts/files/0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch
new file mode 100644
index 00000000..63ec9fdf
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch
@@ -0,0 +1,61 @@
+From 5902b315d4061ebbe73a62c52e6d3b618066cebc Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Wed, 7 Dec 2016 21:13:16 +1100
+Subject: [PATCH 13/16] Make threaded IRQs optionally the default which can be
+ disabled.
+
+---
+ kernel/irq/Kconfig | 14 ++++++++++++++
+ kernel/irq/manage.c | 10 ++++++++++
+ 2 files changed, 24 insertions(+)
+
+diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
+index a117adf7084b..0984c54fd4e9 100644
+--- a/kernel/irq/Kconfig
++++ b/kernel/irq/Kconfig
+@@ -111,6 +111,20 @@ config IRQ_DOMAIN_DEBUG
+ config IRQ_FORCED_THREADING
+ bool
+
++config FORCE_IRQ_THREADING
++ bool "Make IRQ threading compulsory"
++ depends on IRQ_FORCED_THREADING
++ default y
++ ---help---
++
++ Make IRQ threading mandatory for any IRQ handlers that support it
++ instead of being optional and requiring the threadirqs kernel
++ parameter. Instead they can be optionally disabled with the
++ nothreadirqs kernel parameter.
++
++ Enable if you are building for a desktop or low latency system,
++ otherwise say N.
++
+ config SPARSE_IRQ
+ bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ
+ ---help---
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 4bff6a10ae8e..5a6df0dd23c4 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -24,7 +24,17 @@
+ #include "internals.h"
+
+ #ifdef CONFIG_IRQ_FORCED_THREADING
++#ifdef CONFIG_FORCE_IRQ_THREADING
++__read_mostly bool force_irqthreads = true;
++#else
+ __read_mostly bool force_irqthreads;
++#endif
++static int __init setup_noforced_irqthreads(char *arg)
++{
++ force_irqthreads = false;
++ return 0;
++}
++early_param("nothreadirqs", setup_noforced_irqthreads);
+
+ static int __init setup_forced_irqthreads(char *arg)
+ {
+--
+2.11.0
+
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0014-Swap-sucks.patch b/sys-kernel/linux-sources-redcore-lts/files/0014-Swap-sucks.patch
new file mode 100644
index 00000000..6bf5bcda
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0014-Swap-sucks.patch
@@ -0,0 +1,25 @@
+From ed0ab4c80fcb6fa4abb4f2f897e591df6eaa2d0e Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Sat, 12 Aug 2017 12:02:04 +1000
+Subject: [PATCH 14/16] Swap sucks.
+
+---
+ mm/vmscan.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index eb2f0315b8c0..67d03efab288 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -149,7 +149,7 @@ struct scan_control {
+ /*
+ * From 0 .. 100. Higher means more swappy.
+ */
+-int vm_swappiness = 60;
++int vm_swappiness = 33;
+ /*
+ * The total number of pages which are beyond the high watermark within all
+ * zones.
+--
+2.11.0
+
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0015-Enable-BFQ-io-scheduler-by-default.patch b/sys-kernel/linux-sources-redcore-lts/files/0015-Enable-BFQ-io-scheduler-by-default.patch
deleted file mode 100644
index d12753be..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/0015-Enable-BFQ-io-scheduler-by-default.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From 0e7ab31fb218e2a18fbecd19c24dfaae14c88afd Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Mon, 20 Nov 2017 18:02:03 +1100
-Subject: [PATCH 15/18] Enable BFQ io scheduler by default.
-
----
- block/Kconfig.iosched | 2 +-
- drivers/scsi/Kconfig | 1 +
- 2 files changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
-index a4a8914bf7a4..2d9be91e8e87 100644
---- a/block/Kconfig.iosched
-+++ b/block/Kconfig.iosched
-@@ -82,7 +82,7 @@ config MQ_IOSCHED_KYBER
-
- config IOSCHED_BFQ
- tristate "BFQ I/O scheduler"
-- default n
-+ default y
- ---help---
- BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
- of the device among all processes according to their weights,
-diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
-index 8a739b74cfb7..9e939ee76e72 100644
---- a/drivers/scsi/Kconfig
-+++ b/drivers/scsi/Kconfig
-@@ -50,6 +50,7 @@ config SCSI_NETLINK
- config SCSI_MQ_DEFAULT
- bool "SCSI: use blk-mq I/O path by default"
- depends on SCSI
-+ default y
- ---help---
- This option enables the new blk-mq based I/O path for SCSI
- devices by default. With the option the scsi_mod.use_blk_mq
---
-2.14.1
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch b/sys-kernel/linux-sources-redcore-lts/files/0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch
new file mode 100644
index 00000000..bfa509a5
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch
@@ -0,0 +1,19 @@
+diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
+index e84d700709ff6..16364915cff53 100644
+--- a/kernel/sched/MuQSS.c
++++ b/kernel/sched/MuQSS.c
+@@ -70,6 +70,14 @@
+
+ #include "MuQSS.h"
+
++/* needing to include irq_regs.h, "because reasons"...
++ * implicit declaration of function ‘get_irq_regs’;
++ * did you mean ‘get_ibs_caps’?
++ * [-Werror=implicit-function-declaration]
++ * ^ this is because autodetect is not flawless
++ */
++#include <asm/irq_regs.h>
++
+ #define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
+ #define rt_task(p) rt_prio((p)->prio)
+ #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
diff --git a/sys-kernel/linux-sources-redcore-lts/files/0016-unfuck-MuQSS-on-linux-4_14_15+.patch b/sys-kernel/linux-sources-redcore-lts/files/0016-unfuck-MuQSS-on-linux-4_14_15+.patch
new file mode 100644
index 00000000..f7dc1d1c
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore-lts/files/0016-unfuck-MuQSS-on-linux-4_14_15+.patch
@@ -0,0 +1,48 @@
+diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
+index e84d700709ff6..b0be7fcfe41f9 100644
+--- a/kernel/sched/MuQSS.c
++++ b/kernel/sched/MuQSS.c
+@@ -55,6 +55,7 @@
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
+ #include <linux/tick.h>
++#include <linux/version.h>
+
+ #include <asm/switch_to.h>
+ #include <asm/tlb.h>
+@@ -1959,7 +1960,11 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ p->state = TASK_WAKING;
+
+ if (p->in_iowait) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 15)
+ delayacct_blkio_end();
++#else
++ delayacct_blkio_end(p);
++#endif
+ atomic_dec(&task_rq(p)->nr_iowait);
+ }
+
+@@ -1970,7 +1975,11 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ #else /* CONFIG_SMP */
+
+ if (p->in_iowait) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 15)
+ delayacct_blkio_end();
++#else
++ delayacct_blkio_end(p);
++#endif
+ atomic_dec(&task_rq(p)->nr_iowait);
+ }
+
+@@ -2022,7 +2031,11 @@ static void try_to_wake_up_local(struct task_struct *p)
+
+ if (!task_on_rq_queued(p)) {
+ if (p->in_iowait) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 15)
+ delayacct_blkio_end();
++#else
++ delayacct_blkio_end(p);
++#endif
+ atomic_dec(&rq->nr_iowait);
+ }
+ ttwu_activate(rq, p);
diff --git a/sys-kernel/linux-sources-redcore-lts/files/redcore-lts-amd64.config b/sys-kernel/linux-sources-redcore-lts/files/redcore-lts-amd64.config
index 73c7d194..f41bc39d 100644
--- a/sys-kernel/linux-sources-redcore-lts/files/redcore-lts-amd64.config
+++ b/sys-kernel/linux-sources-redcore-lts/files/redcore-lts-amd64.config
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 4.14.65-redcore-lts Kernel Configuration
+# Linux/x86 4.14.75-redcore-lts Kernel Configuration
#
CONFIG_64BIT=y
CONFIG_X86_64=y
@@ -50,6 +50,7 @@ CONFIG_THREAD_INFO_IN_TASK=y
#
# General setup
#
+CONFIG_SCHED_MUQSS=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_CROSS_COMPILE=""
# CONFIG_COMPILE_TEST is not set
@@ -98,6 +99,7 @@ CONFIG_GENERIC_MSI_IRQ=y
CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
# CONFIG_IRQ_DOMAIN_DEBUG is not set
CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_FORCE_IRQ_THREADING=y
CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_IRQ_DEBUGFS is not set
CONFIG_CLOCKSOURCE_WATCHDOG=y
@@ -113,11 +115,9 @@ CONFIG_GENERIC_CMOS_UPDATE=y
# Timers subsystem
#
CONFIG_TICK_ONESHOT=y
-CONFIG_NO_HZ_COMMON=y
-# CONFIG_HZ_PERIODIC is not set
+CONFIG_HZ_PERIODIC=y
# CONFIG_NO_HZ_IDLE is not set
-CONFIG_NO_HZ_FULL=y
-# CONFIG_NO_HZ_FULL_ALL is not set
+# CONFIG_NO_HZ_FULL is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -125,6 +125,7 @@ CONFIG_HIGH_RES_TIMERS=y
# CPU/Task time and stats accounting
#
CONFIG_VIRT_CPU_ACCOUNTING=y
+# CONFIG_TICK_CPU_ACCOUNTING is not set
CONFIG_VIRT_CPU_ACCOUNTING_GEN=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_BSD_PROCESS_ACCT=y
@@ -146,7 +147,6 @@ CONFIG_RCU_STALL_COMMON=y
CONFIG_RCU_NEED_SEGCBLIST=y
CONFIG_CONTEXT_TRACKING=y
# CONFIG_CONTEXT_TRACKING_FORCE is not set
-CONFIG_RCU_NOCB_CPU=y
CONFIG_BUILD_BIN2C=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@@ -157,8 +157,6 @@ CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
CONFIG_ARCH_SUPPORTS_INT128=y
-CONFIG_NUMA_BALANCING=y
-CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
CONFIG_CGROUPS=y
CONFIG_PAGE_COUNTER=y
CONFIG_MEMCG=y
@@ -168,9 +166,6 @@ CONFIG_BLK_CGROUP=y
# CONFIG_DEBUG_BLK_CGROUP is not set
CONFIG_CGROUP_WRITEBACK=y
CONFIG_CGROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
# CONFIG_CGROUP_RDMA is not set
CONFIG_CGROUP_FREEZER=y
@@ -178,7 +173,6 @@ CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
CONFIG_PROC_PID_CPUSET=y
CONFIG_CGROUP_DEVICE=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_BPF=y
# CONFIG_CGROUP_DEBUG is not set
@@ -190,7 +184,6 @@ CONFIG_IPC_NS=y
CONFIG_USER_NS=y
CONFIG_PID_NS=y
CONFIG_NET_NS=y
-CONFIG_SCHED_AUTOGROUP=y
# CONFIG_SYSFS_DEPRECATED is not set
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
@@ -306,6 +299,7 @@ CONFIG_HAVE_PERF_REGS=y
CONFIG_HAVE_PERF_USER_STACK_DUMP=y
CONFIG_HAVE_ARCH_JUMP_LABEL=y
CONFIG_HAVE_RCU_TABLE_FREE=y
+CONFIG_HAVE_RCU_TABLE_INVALIDATE=y
CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
CONFIG_HAVE_CMPXCHG_LOCAL=y
@@ -438,10 +432,15 @@ CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_IOSCHED_BFQ_SQ=y
+CONFIG_BFQ_SQ_GROUP_IOSCHED=y
# CONFIG_DEFAULT_DEADLINE is not set
-CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_BFQ_SQ=y
# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_DEFAULT_IOSCHED="bfq-sq"
+CONFIG_MQ_IOSCHED_BFQ=y
+CONFIG_MQ_BFQ_GROUP_IOSCHED=y
CONFIG_MQ_IOSCHED_DEADLINE=y
# CONFIG_MQ_IOSCHED_KYBER is not set
CONFIG_IOSCHED_BFQ=y
@@ -515,6 +514,7 @@ CONFIG_IOMMU_HELPER=y
CONFIG_MAXSMP=y
CONFIG_NR_CPUS=8192
CONFIG_SCHED_SMT=y
+CONFIG_SMT_NICE=y
CONFIG_SCHED_MC=y
CONFIG_SCHED_MC_PRIO=y
# CONFIG_PREEMPT_NONE is not set
@@ -655,11 +655,11 @@ CONFIG_EFI=y
CONFIG_EFI_STUB=y
CONFIG_EFI_MIXED=y
CONFIG_SECCOMP=y
-# CONFIG_HZ_100 is not set
-# CONFIG_HZ_250 is not set
-# CONFIG_HZ_300 is not set
-CONFIG_HZ_1000=y
-CONFIG_HZ=1000
+CONFIG_HZ_100=y
+# CONFIG_HZ_250_NODEF is not set
+# CONFIG_HZ_300_NODEF is not set
+# CONFIG_HZ_1000_NODEF is not set
+CONFIG_HZ=100
CONFIG_SCHED_HRTICK=y
CONFIG_KEXEC=y
# CONFIG_CRASH_DUMP is not set
@@ -4083,6 +4083,7 @@ CONFIG_HSI_BOARDINFO=y
CONFIG_HSI_CHAR=m
CONFIG_PPS=m
# CONFIG_PPS_DEBUG is not set
+# CONFIG_NTP_PPS is not set
#
# PPS clients support
diff --git a/sys-kernel/linux-sources-redcore-lts/files/uksm-for-linux-hardened.patch b/sys-kernel/linux-sources-redcore-lts/files/uksm-linux-hardened.patch
index f0596117..f0596117 100644
--- a/sys-kernel/linux-sources-redcore-lts/files/uksm-for-linux-hardened.patch
+++ b/sys-kernel/linux-sources-redcore-lts/files/uksm-linux-hardened.patch
diff --git a/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.14.65.ebuild b/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.14.75.ebuild
index 885311ba..1125acdd 100644
--- a/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.14.65.ebuild
+++ b/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.14.75.ebuild
@@ -31,8 +31,25 @@ PATCHES=( "${FILESDIR}"/introduce-NUMA-identity-node-sched-domain.patch
"${FILESDIR}"/restore-SD_PREFER_SIBLING-on-MC-domains.patch
"${FILESDIR}"/Revert-ath10k-activate-user-space-firmware-loading.patch
"${FILESDIR}"/linux-hardened.patch
- "${FILESDIR}"/uksm-for-linux-hardened.patch
- "${FILESDIR}"/0015-Enable-BFQ-io-scheduler-by-default.patch )
+ "${FILESDIR}"/uksm-linux-hardened.patch
+ "${FILESDIR}"/0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch
+ "${FILESDIR}"/0002-Make-preemptible-kernel-default.patch
+ "${FILESDIR}"/0003-Expose-vmsplit-for-our-poor-32-bit-users.patch
+ "${FILESDIR}"/0004-Create-highres-timeout-variants-of-schedule_timeout-.patch
+ "${FILESDIR}"/0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch
+ "${FILESDIR}"/0006-Convert-msleep-to-use-hrtimers-when-active.patch
+ "${FILESDIR}"/0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
+ "${FILESDIR}"/0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch
+ "${FILESDIR}"/0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch
+ "${FILESDIR}"/0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch
+ "${FILESDIR}"/0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch
+ "${FILESDIR}"/0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch
+ "${FILESDIR}"/0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch
+ "${FILESDIR}"/0014-Swap-sucks.patch
+ "${FILESDIR}"/0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch
+ "${FILESDIR}"/0016-unfuck-MuQSS-on-linux-4_14_15+.patch
+ "${FILESDIR}"/0001-BFQ-v8r12-20171108.patch
+ "${FILESDIR}"/0002-BFQ-v8r12-20180404.patch )
S="${WORKDIR}"/linux-"${PV}"
@@ -49,6 +66,7 @@ src_prepare() {
emake mrproper
sed -ri "s|^(EXTRAVERSION =).*|\1 -${EXTRAVERSION}|" Makefile
cp "${FILESDIR}"/"${EXTRAVERSION}"-amd64.config .config
+ rm -rf $(find . -type f|grep -F \.orig)
}
src_compile() {