diff options
Diffstat (limited to 'sys-kernel')
63 files changed, 38136 insertions, 3811 deletions
diff --git a/sys-kernel/linux-image-redcore/Manifest b/sys-kernel/linux-image-redcore/Manifest index 8028344a..7a19047c 100644 --- a/sys-kernel/linux-image-redcore/Manifest +++ b/sys-kernel/linux-image-redcore/Manifest @@ -1 +1 @@ -DIST linux-4.18.5.tar.xz 101796536 BLAKE2B db7c4a46aba53d38ccc5f28a7da4a21f3d53667751f61595123f89e47e956bf13bcd6df85df47e78d2794fe78a7ec6c9082e64cb3025d6c5c1743d9935dde375 SHA512 604b334ccd74b230faf21db8887e382c49d4877ec8ce8298c079001a12222a6c7be2542c8f37c025cb3d625905d30e4c8c37267f0285aea25bbbe5aa3457040c +DIST linux-5.1.15.tar.xz 106273964 BLAKE2B cd029f7f691b69847a0c58f9e4c3ed11eb31c57ccc72874fdd0e5abdff14b3c938543394a4376305d0720ca6df84c1c1446ece77ad6a3a5e4ff8c91af1643ec4 SHA512 c22988286f8eed176d54446222d5c9d15a7a1b3024dffdc4e4884a45c0d2d7ec24c9d52219a3f0b8fe69c8a92332cc37314301e3bd4f671f116376fd5ca45d61 diff --git a/sys-kernel/linux-image-redcore/files/0001-Revert-x86-ACPI-cstate-Allow-ACPI-C1-FFH-MWAIT-use-o.patch b/sys-kernel/linux-image-redcore/files/0001-Revert-x86-ACPI-cstate-Allow-ACPI-C1-FFH-MWAIT-use-o.patch deleted file mode 100644 index e8f3bfda..00000000 --- a/sys-kernel/linux-image-redcore/files/0001-Revert-x86-ACPI-cstate-Allow-ACPI-C1-FFH-MWAIT-use-o.patch +++ /dev/null @@ -1,29 +0,0 @@ -From f912ead404ffa24db7f4aee527aff411db39262a Mon Sep 17 00:00:00 2001 -From: Gabriel Craciunescu <crazy@frugalware.org> -Date: Wed, 11 Apr 2018 17:17:06 +0200 -Subject: [PATCH] Revert "x86/ACPI/cstate: Allow ACPI C1 FFH MWAIT use on AMD systems" - -This reverts commit 5209654a46ee71137ad9b06da99d4ef2794475af. -Please see: https://community.amd.com/thread/224000 - ---- - arch/x86/kernel/acpi/cstate.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c -index dde437f5d14f..8233a630280f 100644 ---- a/arch/x86/kernel/acpi/cstate.c -+++ b/arch/x86/kernel/acpi/cstate.c -@@ -167,8 +167,7 @@ static int __init ffh_cstate_init(void) - { - struct cpuinfo_x86 *c = &boot_cpu_data; - -- if (c->x86_vendor != X86_VENDOR_INTEL && -- c->x86_vendor != X86_VENDOR_AMD) -+ if (c->x86_vendor != X86_VENDOR_INTEL) - return -1; - - cpu_cstate_entry = alloc_percpu(struct cstate_entry); --- -2.17.0 - diff --git a/sys-kernel/linux-image-redcore/files/5.1-0001-MultiQueue-Skiplist-Scheduler-version-0.192-linux-hardened.patch b/sys-kernel/linux-image-redcore/files/5.1-0001-MultiQueue-Skiplist-Scheduler-version-0.192-linux-hardened.patch new file mode 100644 index 00000000..392477d4 --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0001-MultiQueue-Skiplist-Scheduler-version-0.192-linux-hardened.patch @@ -0,0 +1,10577 @@ +diff -Nur a/arch/alpha/Kconfig b/arch/alpha/Kconfig +--- a/arch/alpha/Kconfig 2019-06-25 04:34:56.000000000 +0100 ++++ b/arch/alpha/Kconfig 2019-07-07 09:17:41.251241479 +0100 +@@ -670,6 +670,8 @@ + default 1200 if HZ_1200 + default 1024 + ++source "kernel/Kconfig.MuQSS" ++ + config SRM_ENV + tristate "SRM environment through procfs" + depends on PROC_FS +diff -Nur a/arch/arm/Kconfig b/arch/arm/Kconfig +--- a/arch/arm/Kconfig 2019-06-25 04:34:56.000000000 +0100 ++++ b/arch/arm/Kconfig 2019-07-07 09:17:41.251241479 +0100 +@@ -1308,6 +1308,8 @@ + MultiThreading at a cost of slightly increased overhead in some + places. If unsure say N here. + ++source "kernel/Kconfig.MuQSS" ++ + config HAVE_ARM_SCU + bool + help +diff -Nur a/arch/arm64/Kconfig b/arch/arm64/Kconfig +--- a/arch/arm64/Kconfig 2019-07-07 09:08:19.122347611 +0100 ++++ b/arch/arm64/Kconfig 2019-07-07 09:17:41.251241479 +0100 +@@ -825,6 +825,8 @@ + MultiThreading at a cost of slightly increased overhead in some + places. If unsure say N here. + ++source "kernel/Kconfig.MuQSS" ++ + config NR_CPUS + int "Maximum number of CPUs (2-4096)" + range 2 4096 +diff -Nur a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig +--- a/arch/powerpc/Kconfig 2019-06-25 04:34:56.000000000 +0100 ++++ b/arch/powerpc/Kconfig 2019-07-07 09:17:41.251241479 +0100 +@@ -820,6 +820,8 @@ + when dealing with POWER5 cpus at a cost of slightly increased + overhead in some places. If unsure say N here. + ++source "kernel/Kconfig.MuQSS" ++ + config PPC_DENORMALISATION + bool "PowerPC denormalisation exception handling" + depends on PPC_BOOK3S_64 +diff -Nur a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c +--- a/arch/powerpc/platforms/cell/spufs/sched.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/arch/powerpc/platforms/cell/spufs/sched.c 2019-07-07 09:17:41.251241479 +0100 +@@ -65,11 +65,6 @@ + static struct timer_list spuloadavg_timer; + + /* +- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). +- */ +-#define NORMAL_PRIO 120 +- +-/* + * Frequency of the spu scheduler tick. By default we do one SPU scheduler + * tick for every 10 CPU scheduler ticks. + */ +diff -Nur a/arch/x86/Kconfig b/arch/x86/Kconfig +--- a/arch/x86/Kconfig 2019-07-07 09:08:19.122347611 +0100 ++++ b/arch/x86/Kconfig 2019-07-07 09:17:41.251241479 +0100 +@@ -1017,6 +1017,22 @@ + config SCHED_SMT + def_bool y if SMP + ++config SMT_NICE ++ bool "SMT (Hyperthreading) aware nice priority and policy support" ++ depends on SCHED_MUQSS && SCHED_SMT ++ default y ++ ---help--- ++ Enabling Hyperthreading on Intel CPUs decreases the effectiveness ++ of the use of 'nice' levels and different scheduling policies ++ (e.g. realtime) due to sharing of CPU power between hyperthreads. ++ SMT nice support makes each logical CPU aware of what is running on ++ its hyperthread siblings, maintaining appropriate distribution of ++ CPU according to nice levels and scheduling policies at the expense ++ of slightly increased overhead. ++ ++ If unsure say Y here. ++ ++ + config SCHED_MC + def_bool y + prompt "Multi-core scheduler support" +@@ -1047,6 +1063,8 @@ + + If unsure say Y here. + ++source "kernel/Kconfig.MuQSS" ++ + config UP_LATE_INIT + def_bool y + depends on !SMP && X86_LOCAL_APIC +diff -Nur a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +--- a/Documentation/admin-guide/kernel-parameters.txt 2019-07-07 09:08:19.122347611 +0100 ++++ b/Documentation/admin-guide/kernel-parameters.txt 2019-07-07 09:17:41.241241144 +0100 +@@ -4149,6 +4149,14 @@ + Memory area to be used by remote processor image, + managed by CMA. + ++ rqshare= [X86] Select the MuQSS scheduler runqueue sharing type. ++ Format: <string> ++ smt -- Share SMT (hyperthread) sibling runqueues ++ mc -- Share MC (multicore) sibling runqueues ++ smp -- Share SMP runqueues ++ none -- So not share any runqueues ++ Default value is mc ++ + rw [KNL] Mount root device read-write on boot + + S [KNL] Run init in single mode +diff -Nur a/Documentation/scheduler/sched-BFS.txt b/Documentation/scheduler/sched-BFS.txt +--- a/Documentation/scheduler/sched-BFS.txt 1970-01-01 01:00:00.000000000 +0100 ++++ b/Documentation/scheduler/sched-BFS.txt 2019-07-07 09:17:41.241241144 +0100 +@@ -0,0 +1,351 @@ ++BFS - The Brain Fuck Scheduler by Con Kolivas. ++ ++Goals. ++ ++The goal of the Brain Fuck Scheduler, referred to as BFS from here on, is to ++completely do away with the complex designs of the past for the cpu process ++scheduler and instead implement one that is very simple in basic design. ++The main focus of BFS is to achieve excellent desktop interactivity and ++responsiveness without heuristics and tuning knobs that are difficult to ++understand, impossible to model and predict the effect of, and when tuned to ++one workload cause massive detriment to another. ++ ++ ++Design summary. ++ ++BFS is best described as a single runqueue, O(n) lookup, earliest effective ++virtual deadline first design, loosely based on EEVDF (earliest eligible virtual ++deadline first) and my previous Staircase Deadline scheduler. Each component ++shall be described in order to understand the significance of, and reasoning for ++it. The codebase when the first stable version was released was approximately ++9000 lines less code than the existing mainline linux kernel scheduler (in ++2.6.31). This does not even take into account the removal of documentation and ++the cgroups code that is not used. ++ ++Design reasoning. ++ ++The single runqueue refers to the queued but not running processes for the ++entire system, regardless of the number of CPUs. The reason for going back to ++a single runqueue design is that once multiple runqueues are introduced, ++per-CPU or otherwise, there will be complex interactions as each runqueue will ++be responsible for the scheduling latency and fairness of the tasks only on its ++own runqueue, and to achieve fairness and low latency across multiple CPUs, any ++advantage in throughput of having CPU local tasks causes other disadvantages. ++This is due to requiring a very complex balancing system to at best achieve some ++semblance of fairness across CPUs and can only maintain relatively low latency ++for tasks bound to the same CPUs, not across them. To increase said fairness ++and latency across CPUs, the advantage of local runqueue locking, which makes ++for better scalability, is lost due to having to grab multiple locks. ++ ++A significant feature of BFS is that all accounting is done purely based on CPU ++used and nowhere is sleep time used in any way to determine entitlement or ++interactivity. Interactivity "estimators" that use some kind of sleep/run ++algorithm are doomed to fail to detect all interactive tasks, and to falsely tag ++tasks that aren't interactive as being so. The reason for this is that it is ++close to impossible to determine that when a task is sleeping, whether it is ++doing it voluntarily, as in a userspace application waiting for input in the ++form of a mouse click or otherwise, or involuntarily, because it is waiting for ++another thread, process, I/O, kernel activity or whatever. Thus, such an ++estimator will introduce corner cases, and more heuristics will be required to ++cope with those corner cases, introducing more corner cases and failed ++interactivity detection and so on. Interactivity in BFS is built into the design ++by virtue of the fact that tasks that are waking up have not used up their quota ++of CPU time, and have earlier effective deadlines, thereby making it very likely ++they will preempt any CPU bound task of equivalent nice level. See below for ++more information on the virtual deadline mechanism. Even if they do not preempt ++a running task, because the rr interval is guaranteed to have a bound upper ++limit on how long a task will wait for, it will be scheduled within a timeframe ++that will not cause visible interface jitter. ++ ++ ++Design details. ++ ++Task insertion. ++ ++BFS inserts tasks into each relevant queue as an O(1) insertion into a double ++linked list. On insertion, *every* running queue is checked to see if the newly ++queued task can run on any idle queue, or preempt the lowest running task on the ++system. This is how the cross-CPU scheduling of BFS achieves significantly lower ++latency per extra CPU the system has. In this case the lookup is, in the worst ++case scenario, O(n) where n is the number of CPUs on the system. ++ ++Data protection. ++ ++BFS has one single lock protecting the process local data of every task in the ++global queue. Thus every insertion, removal and modification of task data in the ++global runqueue needs to grab the global lock. However, once a task is taken by ++a CPU, the CPU has its own local data copy of the running process' accounting ++information which only that CPU accesses and modifies (such as during a ++timer tick) thus allowing the accounting data to be updated lockless. Once a ++CPU has taken a task to run, it removes it from the global queue. Thus the ++global queue only ever has, at most, ++ ++ (number of tasks requesting cpu time) - (number of logical CPUs) + 1 ++ ++tasks in the global queue. This value is relevant for the time taken to look up ++tasks during scheduling. This will increase if many tasks with CPU affinity set ++in their policy to limit which CPUs they're allowed to run on if they outnumber ++the number of CPUs. The +1 is because when rescheduling a task, the CPU's ++currently running task is put back on the queue. Lookup will be described after ++the virtual deadline mechanism is explained. ++ ++Virtual deadline. ++ ++The key to achieving low latency, scheduling fairness, and "nice level" ++distribution in BFS is entirely in the virtual deadline mechanism. The one ++tunable in BFS is the rr_interval, or "round robin interval". This is the ++maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy) ++tasks of the same nice level will be running for, or looking at it the other ++way around, the longest duration two tasks of the same nice level will be ++delayed for. When a task requests cpu time, it is given a quota (time_slice) ++equal to the rr_interval and a virtual deadline. The virtual deadline is ++offset from the current time in jiffies by this equation: ++ ++ jiffies + (prio_ratio * rr_interval) ++ ++The prio_ratio is determined as a ratio compared to the baseline of nice -20 ++and increases by 10% per nice level. The deadline is a virtual one only in that ++no guarantee is placed that a task will actually be scheduled by this time, but ++it is used to compare which task should go next. There are three components to ++how a task is next chosen. First is time_slice expiration. If a task runs out ++of its time_slice, it is descheduled, the time_slice is refilled, and the ++deadline reset to that formula above. Second is sleep, where a task no longer ++is requesting CPU for whatever reason. The time_slice and deadline are _not_ ++adjusted in this case and are just carried over for when the task is next ++scheduled. Third is preemption, and that is when a newly waking task is deemed ++higher priority than a currently running task on any cpu by virtue of the fact ++that it has an earlier virtual deadline than the currently running task. The ++earlier deadline is the key to which task is next chosen for the first and ++second cases. Once a task is descheduled, it is put back on the queue, and an ++O(n) lookup of all queued-but-not-running tasks is done to determine which has ++the earliest deadline and that task is chosen to receive CPU next. ++ ++The CPU proportion of different nice tasks works out to be approximately the ++ ++ (prio_ratio difference)^2 ++ ++The reason it is squared is that a task's deadline does not change while it is ++running unless it runs out of time_slice. Thus, even if the time actually ++passes the deadline of another task that is queued, it will not get CPU time ++unless the current running task deschedules, and the time "base" (jiffies) is ++constantly moving. ++ ++Task lookup. ++ ++BFS has 103 priority queues. 100 of these are dedicated to the static priority ++of realtime tasks, and the remaining 3 are, in order of best to worst priority, ++SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority ++scheduling). When a task of these priorities is queued, a bitmap of running ++priorities is set showing which of these priorities has tasks waiting for CPU ++time. When a CPU is made to reschedule, the lookup for the next task to get ++CPU time is performed in the following way: ++ ++First the bitmap is checked to see what static priority tasks are queued. If ++any realtime priorities are found, the corresponding queue is checked and the ++first task listed there is taken (provided CPU affinity is suitable) and lookup ++is complete. If the priority corresponds to a SCHED_ISO task, they are also ++taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds ++to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this ++stage, every task in the runlist that corresponds to that priority is checked ++to see which has the earliest set deadline, and (provided it has suitable CPU ++affinity) it is taken off the runqueue and given the CPU. If a task has an ++expired deadline, it is taken and the rest of the lookup aborted (as they are ++chosen in FIFO order). ++ ++Thus, the lookup is O(n) in the worst case only, where n is as described ++earlier, as tasks may be chosen before the whole task list is looked over. ++ ++ ++Scalability. ++ ++The major limitations of BFS will be that of scalability, as the separate ++runqueue designs will have less lock contention as the number of CPUs rises. ++However they do not scale linearly even with separate runqueues as multiple ++runqueues will need to be locked concurrently on such designs to be able to ++achieve fair CPU balancing, to try and achieve some sort of nice-level fairness ++across CPUs, and to achieve low enough latency for tasks on a busy CPU when ++other CPUs would be more suited. BFS has the advantage that it requires no ++balancing algorithm whatsoever, as balancing occurs by proxy simply because ++all CPUs draw off the global runqueue, in priority and deadline order. Despite ++the fact that scalability is _not_ the prime concern of BFS, it both shows very ++good scalability to smaller numbers of CPUs and is likely a more scalable design ++at these numbers of CPUs. ++ ++It also has some very low overhead scalability features built into the design ++when it has been deemed their overhead is so marginal that they're worth adding. ++The first is the local copy of the running process' data to the CPU it's running ++on to allow that data to be updated lockless where possible. Then there is ++deference paid to the last CPU a task was running on, by trying that CPU first ++when looking for an idle CPU to use the next time it's scheduled. Finally there ++is the notion of cache locality beyond the last running CPU. The sched_domains ++information is used to determine the relative virtual "cache distance" that ++other CPUs have from the last CPU a task was running on. CPUs with shared ++caches, such as SMT siblings, or multicore CPUs with shared caches, are treated ++as cache local. CPUs without shared caches are treated as not cache local, and ++CPUs on different NUMA nodes are treated as very distant. This "relative cache ++distance" is used by modifying the virtual deadline value when doing lookups. ++Effectively, the deadline is unaltered between "cache local" CPUs, doubled for ++"cache distant" CPUs, and quadrupled for "very distant" CPUs. The reasoning ++behind the doubling of deadlines is as follows. The real cost of migrating a ++task from one CPU to another is entirely dependant on the cache footprint of ++the task, how cache intensive the task is, how long it's been running on that ++CPU to take up the bulk of its cache, how big the CPU cache is, how fast and ++how layered the CPU cache is, how fast a context switch is... and so on. In ++other words, it's close to random in the real world where we do more than just ++one sole workload. The only thing we can be sure of is that it's not free. So ++BFS uses the principle that an idle CPU is a wasted CPU and utilising idle CPUs ++is more important than cache locality, and cache locality only plays a part ++after that. Doubling the effective deadline is based on the premise that the ++"cache local" CPUs will tend to work on the same tasks up to double the number ++of cache local CPUs, and once the workload is beyond that amount, it is likely ++that none of the tasks are cache warm anywhere anyway. The quadrupling for NUMA ++is a value I pulled out of my arse. ++ ++When choosing an idle CPU for a waking task, the cache locality is determined ++according to where the task last ran and then idle CPUs are ranked from best ++to worst to choose the most suitable idle CPU based on cache locality, NUMA ++node locality and hyperthread sibling business. They are chosen in the ++following preference (if idle): ++ ++* Same core, idle or busy cache, idle threads ++* Other core, same cache, idle or busy cache, idle threads. ++* Same node, other CPU, idle cache, idle threads. ++* Same node, other CPU, busy cache, idle threads. ++* Same core, busy threads. ++* Other core, same cache, busy threads. ++* Same node, other CPU, busy threads. ++* Other node, other CPU, idle cache, idle threads. ++* Other node, other CPU, busy cache, idle threads. ++* Other node, other CPU, busy threads. ++ ++This shows the SMT or "hyperthread" awareness in the design as well which will ++choose a real idle core first before a logical SMT sibling which already has ++tasks on the physical CPU. ++ ++Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark. ++However this benchmarking was performed on an earlier design that was far less ++scalable than the current one so it's hard to know how scalable it is in terms ++of both CPUs (due to the global runqueue) and heavily loaded machines (due to ++O(n) lookup) at this stage. Note that in terms of scalability, the number of ++_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x) ++quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark ++results are very promising indeed, without needing to tweak any knobs, features ++or options. Benchmark contributions are most welcome. ++ ++ ++Features ++ ++As the initial prime target audience for BFS was the average desktop user, it ++was designed to not need tweaking, tuning or have features set to obtain benefit ++from it. Thus the number of knobs and features has been kept to an absolute ++minimum and should not require extra user input for the vast majority of cases. ++There are precisely 2 tunables, and 2 extra scheduling policies. The rr_interval ++and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition ++to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is ++support for CGROUPS. The average user should neither need to know what these ++are, nor should they need to be using them to have good desktop behaviour. ++ ++rr_interval ++ ++There is only one "scheduler" tunable, the round robin interval. This can be ++accessed in ++ ++ /proc/sys/kernel/rr_interval ++ ++The value is in milliseconds, and the default value is set to 6 on a ++uniprocessor machine, and automatically set to a progressively higher value on ++multiprocessor machines. The reasoning behind increasing the value on more CPUs ++is that the effective latency is decreased by virtue of there being more CPUs on ++BFS (for reasons explained above), and increasing the value allows for less ++cache contention and more throughput. Valid values are from 1 to 1000 ++Decreasing the value will decrease latencies at the cost of decreasing ++throughput, while increasing it will improve throughput, but at the cost of ++worsening latencies. The accuracy of the rr interval is limited by HZ resolution ++of the kernel configuration. Thus, the worst case latencies are usually slightly ++higher than this actual value. The default value of 6 is not an arbitrary one. ++It is based on the fact that humans can detect jitter at approximately 7ms, so ++aiming for much lower latencies is pointless under most circumstances. It is ++worth noting this fact when comparing the latency performance of BFS to other ++schedulers. Worst case latencies being higher than 7ms are far worse than ++average latencies not being in the microsecond range. ++ ++Isochronous scheduling. ++ ++Isochronous scheduling is a unique scheduling policy designed to provide ++near-real-time performance to unprivileged (ie non-root) users without the ++ability to starve the machine indefinitely. Isochronous tasks (which means ++"same time") are set using, for example, the schedtool application like so: ++ ++ schedtool -I -e amarok ++ ++This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works ++is that it has a priority level between true realtime tasks and SCHED_NORMAL ++which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie, ++if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval ++rate). However if ISO tasks run for more than a tunable finite amount of time, ++they are then demoted back to SCHED_NORMAL scheduling. This finite amount of ++time is the percentage of _total CPU_ available across the machine, configurable ++as a percentage in the following "resource handling" tunable (as opposed to a ++scheduler tunable): ++ ++ /proc/sys/kernel/iso_cpu ++ ++and is set to 70% by default. It is calculated over a rolling 5 second average ++Because it is the total CPU available, it means that on a multi CPU machine, it ++is possible to have an ISO task running as realtime scheduling indefinitely on ++just one CPU, as the other CPUs will be available. Setting this to 100 is the ++equivalent of giving all users SCHED_RR access and setting it to 0 removes the ++ability to run any pseudo-realtime tasks. ++ ++A feature of BFS is that it detects when an application tries to obtain a ++realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the ++appropriate privileges to use those policies. When it detects this, it will ++give the task SCHED_ISO policy instead. Thus it is transparent to the user. ++Because some applications constantly set their policy as well as their nice ++level, there is potential for them to undo the override specified by the user ++on the command line of setting the policy to SCHED_ISO. To counter this, once ++a task has been set to SCHED_ISO policy, it needs superuser privileges to set ++it back to SCHED_NORMAL. This will ensure the task remains ISO and all child ++processes and threads will also inherit the ISO policy. ++ ++Idleprio scheduling. ++ ++Idleprio scheduling is a scheduling policy designed to give out CPU to a task ++_only_ when the CPU would be otherwise idle. The idea behind this is to allow ++ultra low priority tasks to be run in the background that have virtually no ++effect on the foreground tasks. This is ideally suited to distributed computing ++clients (like setiathome, folding, mprime etc) but can also be used to start ++a video encode or so on without any slowdown of other tasks. To avoid this ++policy from grabbing shared resources and holding them indefinitely, if it ++detects a state where the task is waiting on I/O, the machine is about to ++suspend to ram and so on, it will transiently schedule them as SCHED_NORMAL. As ++per the Isochronous task management, once a task has been scheduled as IDLEPRIO, ++it cannot be put back to SCHED_NORMAL without superuser privileges. Tasks can ++be set to start as SCHED_IDLEPRIO with the schedtool command like so: ++ ++ schedtool -D -e ./mprime ++ ++Subtick accounting. ++ ++It is surprisingly difficult to get accurate CPU accounting, and in many cases, ++the accounting is done by simply determining what is happening at the precise ++moment a timer tick fires off. This becomes increasingly inaccurate as the ++timer tick frequency (HZ) is lowered. It is possible to create an application ++which uses almost 100% CPU, yet by being descheduled at the right time, records ++zero CPU usage. While the main problem with this is that there are possible ++security implications, it is also difficult to determine how much CPU a task ++really does use. BFS tries to use the sub-tick accounting from the TSC clock, ++where possible, to determine real CPU usage. This is not entirely reliable, but ++is far more likely to produce accurate CPU usage data than the existing designs ++and will not show tasks as consuming no CPU usage when they actually are. Thus, ++the amount of CPU reported as being used by BFS will more accurately represent ++how much CPU the task itself is using (as is shown for example by the 'time' ++application), so the reported values may be quite different to other schedulers. ++Values reported as the 'load' are more prone to problems with this design, but ++per process values are closer to real usage. When comparing throughput of BFS ++to other designs, it is important to compare the actual completed work in terms ++of total wall clock time taken and total work done, rather than the reported ++"cpu usage". ++ ++ ++Con Kolivas <kernel@kolivas.org> Fri Aug 27 2010 +diff -Nur a/Documentation/scheduler/sched-MuQSS.txt b/Documentation/scheduler/sched-MuQSS.txt +--- a/Documentation/scheduler/sched-MuQSS.txt 1970-01-01 01:00:00.000000000 +0100 ++++ b/Documentation/scheduler/sched-MuQSS.txt 2019-07-07 09:17:41.241241144 +0100 +@@ -0,0 +1,373 @@ ++MuQSS - The Multiple Queue Skiplist Scheduler by Con Kolivas. ++ ++MuQSS is a per-cpu runqueue variant of the original BFS scheduler with ++one 8 level skiplist per runqueue, and fine grained locking for much more ++scalability. ++ ++ ++Goals. ++ ++The goal of the Multiple Queue Skiplist Scheduler, referred to as MuQSS from ++here on (pronounced mux) is to completely do away with the complex designs of ++the past for the cpu process scheduler and instead implement one that is very ++simple in basic design. The main focus of MuQSS is to achieve excellent desktop ++interactivity and responsiveness without heuristics and tuning knobs that are ++difficult to understand, impossible to model and predict the effect of, and when ++tuned to one workload cause massive detriment to another, while still being ++scalable to many CPUs and processes. ++ ++ ++Design summary. ++ ++MuQSS is best described as per-cpu multiple runqueue, O(log n) insertion, O(1) ++lookup, earliest effective virtual deadline first tickless design, loosely based ++on EEVDF (earliest eligible virtual deadline first) and my previous Staircase ++Deadline scheduler, and evolved from the single runqueue O(n) BFS scheduler. ++Each component shall be described in order to understand the significance of, ++and reasoning for it. ++ ++ ++Design reasoning. ++ ++In BFS, the use of a single runqueue across all CPUs meant that each CPU would ++need to scan the entire runqueue looking for the process with the earliest ++deadline and schedule that next, regardless of which CPU it originally came ++from. This made BFS deterministic with respect to latency and provided ++guaranteed latencies dependent on number of processes and CPUs. The single ++runqueue, however, meant that all CPUs would compete for the single lock ++protecting it, which would lead to increasing lock contention as the number of ++CPUs rose and appeared to limit scalability of common workloads beyond 16 ++logical CPUs. Additionally, the O(n) lookup of the runqueue list obviously ++increased overhead proportionate to the number of queued proecesses and led to ++cache thrashing while iterating over the linked list. ++ ++MuQSS is an evolution of BFS, designed to maintain the same scheduling ++decision mechanism and be virtually deterministic without relying on the ++constrained design of the single runqueue by splitting out the single runqueue ++to be per-CPU and use skiplists instead of linked lists. ++ ++The original reason for going back to a single runqueue design for BFS was that ++once multiple runqueues are introduced, per-CPU or otherwise, there will be ++complex interactions as each runqueue will be responsible for the scheduling ++latency and fairness of the tasks only on its own runqueue, and to achieve ++fairness and low latency across multiple CPUs, any advantage in throughput of ++having CPU local tasks causes other disadvantages. This is due to requiring a ++very complex balancing system to at best achieve some semblance of fairness ++across CPUs and can only maintain relatively low latency for tasks bound to the ++same CPUs, not across them. To increase said fairness and latency across CPUs, ++the advantage of local runqueue locking, which makes for better scalability, is ++lost due to having to grab multiple locks. ++ ++MuQSS works around the problems inherent in multiple runqueue designs by ++making its skip lists priority ordered and through novel use of lockless ++examination of each other runqueue it can decide if it should take the earliest ++deadline task from another runqueue for latency reasons, or for CPU balancing ++reasons. It still does not have a balancing system, choosing to allow the ++next task scheduling decision and task wakeup CPU choice to allow balancing to ++happen by virtue of its choices. ++ ++As a further evolution of the design, MuQSS normally configures sharing of ++runqueues in a logical fashion for when CPU resources are shared for improved ++latency and throughput. By default it shares runqueues and locks between ++multicore siblings. Optionally it can be configured to run with sharing of ++SMT siblings only, all SMP packages or no sharing at all. Additionally it can ++be selected at boot time. ++ ++ ++Design details. ++ ++Custom skip list implementation: ++ ++To avoid the overhead of building up and tearing down skip list structures, ++the variant used by MuQSS has a number of optimisations making it specific for ++its use case in the scheduler. It uses static arrays of 8 'levels' instead of ++building up and tearing down structures dynamically. This makes each runqueue ++only scale O(log N) up to 64k tasks. However as there is one runqueue per CPU ++it means that it scales O(log N) up to 64k x number of logical CPUs which is ++far beyond the realistic task limits each CPU could handle. By being 8 levels ++it also makes the array exactly one cacheline in size. Additionally, each ++skip list node is bidirectional making insertion and removal amortised O(1), ++being O(k) where k is 1-8. Uniquely, we are only ever interested in the very ++first entry in each list at all times with MuQSS, so there is never a need to ++do a search and thus look up is always O(1). In interactive mode, the queues ++will be searched beyond their first entry if the first task is not suitable ++for affinity or SMT nice reasons. ++ ++Task insertion: ++ ++MuQSS inserts tasks into a per CPU runqueue as an O(log N) insertion into ++a custom skip list as described above (based on the original design by William ++Pugh). Insertion is ordered in such a way that there is never a need to do a ++search by ordering tasks according to static priority primarily, and then ++virtual deadline at the time of insertion. ++ ++Niffies: ++ ++Niffies are a monotonic forward moving timer not unlike the "jiffies" but are ++of nanosecond resolution. Niffies are calculated per-runqueue from the high ++resolution TSC timers, and in order to maintain fairness are synchronised ++between CPUs whenever both runqueues are locked concurrently. ++ ++Virtual deadline: ++ ++The key to achieving low latency, scheduling fairness, and "nice level" ++distribution in MuQSS is entirely in the virtual deadline mechanism. The one ++tunable in MuQSS is the rr_interval, or "round robin interval". This is the ++maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy) ++tasks of the same nice level will be running for, or looking at it the other ++way around, the longest duration two tasks of the same nice level will be ++delayed for. When a task requests cpu time, it is given a quota (time_slice) ++equal to the rr_interval and a virtual deadline. The virtual deadline is ++offset from the current time in niffies by this equation: ++ ++ niffies + (prio_ratio * rr_interval) ++ ++The prio_ratio is determined as a ratio compared to the baseline of nice -20 ++and increases by 10% per nice level. The deadline is a virtual one only in that ++no guarantee is placed that a task will actually be scheduled by this time, but ++it is used to compare which task should go next. There are three components to ++how a task is next chosen. First is time_slice expiration. If a task runs out ++of its time_slice, it is descheduled, the time_slice is refilled, and the ++deadline reset to that formula above. Second is sleep, where a task no longer ++is requesting CPU for whatever reason. The time_slice and deadline are _not_ ++adjusted in this case and are just carried over for when the task is next ++scheduled. Third is preemption, and that is when a newly waking task is deemed ++higher priority than a currently running task on any cpu by virtue of the fact ++that it has an earlier virtual deadline than the currently running task. The ++earlier deadline is the key to which task is next chosen for the first and ++second cases. ++ ++The CPU proportion of different nice tasks works out to be approximately the ++ ++ (prio_ratio difference)^2 ++ ++The reason it is squared is that a task's deadline does not change while it is ++running unless it runs out of time_slice. Thus, even if the time actually ++passes the deadline of another task that is queued, it will not get CPU time ++unless the current running task deschedules, and the time "base" (niffies) is ++constantly moving. ++ ++Task lookup: ++ ++As tasks are already pre-ordered according to anticipated scheduling order in ++the skip lists, lookup for the next suitable task per-runqueue is always a ++matter of simply selecting the first task in the 0th level skip list entry. ++In order to maintain optimal latency and fairness across CPUs, MuQSS does a ++novel examination of every other runqueue in cache locality order, choosing the ++best task across all runqueues. This provides near-determinism of how long any ++task across the entire system may wait before receiving CPU time. The other ++runqueues are first examine lockless and then trylocked to minimise the ++potential lock contention if they are likely to have a suitable better task. ++Each other runqueue lock is only held for as long as it takes to examine the ++entry for suitability. In "interactive" mode, the default setting, MuQSS will ++look for the best deadline task across all CPUs, while in !interactive mode, ++it will only select a better deadline task from another CPU if it is more ++heavily laden than the current one. ++ ++Lookup is therefore O(k) where k is number of CPUs. ++ ++ ++Latency. ++ ++Through the use of virtual deadlines to govern the scheduling order of normal ++tasks, queue-to-activation latency per runqueue is guaranteed to be bound by ++the rr_interval tunable which is set to 6ms by default. This means that the ++longest a CPU bound task will wait for more CPU is proportional to the number ++of running tasks and in the common case of 0-2 running tasks per CPU, will be ++under the 7ms threshold for human perception of jitter. Additionally, as newly ++woken tasks will have an early deadline from their previous runtime, the very ++tasks that are usually latency sensitive will have the shortest interval for ++activation, usually preempting any existing CPU bound tasks. ++ ++Tickless expiry: ++ ++A feature of MuQSS is that it is not tied to the resolution of the chosen tick ++rate in Hz, instead depending entirely on the high resolution timers where ++possible for sub-millisecond accuracy on timeouts regarless of the underlying ++tick rate. This allows MuQSS to be run with the low overhead of low Hz rates ++such as 100 by default, benefiting from the improved throughput and lower ++power usage it provides. Another advantage of this approach is that in ++combination with the Full No HZ option, which disables ticks on running task ++CPUs instead of just idle CPUs, the tick can be disabled at all times ++regardless of how many tasks are running instead of being limited to just one ++running task. Note that this option is NOT recommended for regular desktop ++users. ++ ++ ++Scalability and balancing. ++ ++Unlike traditional approaches where balancing is a combination of CPU selection ++at task wakeup and intermittent balancing based on a vast array of rules set ++according to architecture, busyness calculations and special case management, ++MuQSS indirectly balances on the fly at task wakeup and next task selection. ++During initialisation, MuQSS creates a cache coherency ordered list of CPUs for ++each logical CPU and uses this to aid task/CPU selection when CPUs are busy. ++Additionally it selects any idle CPUs, if they are available, at any time over ++busy CPUs according to the following preference: ++ ++ * Same thread, idle or busy cache, idle or busy threads ++ * Other core, same cache, idle or busy cache, idle threads. ++ * Same node, other CPU, idle cache, idle threads. ++ * Same node, other CPU, busy cache, idle threads. ++ * Other core, same cache, busy threads. ++ * Same node, other CPU, busy threads. ++ * Other node, other CPU, idle cache, idle threads. ++ * Other node, other CPU, busy cache, idle threads. ++ * Other node, other CPU, busy threads. ++ ++Mux is therefore SMT, MC and Numa aware without the need for extra ++intermittent balancing to maintain CPUs busy and make the most of cache ++coherency. ++ ++ ++Features ++ ++As the initial prime target audience for MuQSS was the average desktop user, it ++was designed to not need tweaking, tuning or have features set to obtain benefit ++from it. Thus the number of knobs and features has been kept to an absolute ++minimum and should not require extra user input for the vast majority of cases. ++There are 3 optional tunables, and 2 extra scheduling policies. The rr_interval, ++interactive, and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO ++policies. In addition to this, MuQSS also uses sub-tick accounting. What MuQSS ++does _not_ now feature is support for CGROUPS. The average user should neither ++need to know what these are, nor should they need to be using them to have good ++desktop behaviour. However since some applications refuse to work without ++cgroups, one can enable them with MuQSS as a stub and the filesystem will be ++created which will allow the applications to work. ++ ++rr_interval: ++ ++ /proc/sys/kernel/rr_interval ++ ++The value is in milliseconds, and the default value is set to 6. Valid values ++are from 1 to 1000 Decreasing the value will decrease latencies at the cost of ++decreasing throughput, while increasing it will improve throughput, but at the ++cost of worsening latencies. It is based on the fact that humans can detect ++jitter at approximately 7ms, so aiming for much lower latencies is pointless ++under most circumstances. It is worth noting this fact when comparing the ++latency performance of MuQSS to other schedulers. Worst case latencies being ++higher than 7ms are far worse than average latencies not being in the ++microsecond range. ++ ++interactive: ++ ++ /proc/sys/kernel/interactive ++ ++The value is a simple boolean of 1 for on and 0 for off and is set to on by ++default. Disabling this will disable the near-determinism of MuQSS when ++selecting the next task by not examining all CPUs for the earliest deadline ++task, or which CPU to wake to, instead prioritising CPU balancing for improved ++throughput. Latency will still be bound by rr_interval, but on a per-CPU basis ++instead of across the whole system. ++ ++Runqueue sharing. ++ ++By default MuQSS chooses to share runqueue resources (specifically the skip ++list and locking) between multicore siblings. It is configurable at build time ++to select between None, SMT, MC and SMP, corresponding to no sharing, sharing ++only between simultaneous mulithreading siblings, multicore siblings, or ++symmetric multiprocessing physical packages. Additionally it can be se at ++bootime with the use of the rqshare parameter. The reason for configurability ++is that some architectures have CPUs with many multicore siblings (>= 16) ++where it may be detrimental to throughput to share runqueues and another ++sharing option may be desirable. Additionally, more sharing than usual can ++improve latency on a system-wide level at the expense of throughput if desired. ++ ++The options are: ++none, smt, mc, smp ++ ++eg: ++ rqshare=mc ++ ++Isochronous scheduling: ++ ++Isochronous scheduling is a unique scheduling policy designed to provide ++near-real-time performance to unprivileged (ie non-root) users without the ++ability to starve the machine indefinitely. Isochronous tasks (which means ++"same time") are set using, for example, the schedtool application like so: ++ ++ schedtool -I -e amarok ++ ++This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works ++is that it has a priority level between true realtime tasks and SCHED_NORMAL ++which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie, ++if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval ++rate). However if ISO tasks run for more than a tunable finite amount of time, ++they are then demoted back to SCHED_NORMAL scheduling. This finite amount of ++time is the percentage of CPU available per CPU, configurable as a percentage in ++the following "resource handling" tunable (as opposed to a scheduler tunable): ++ ++iso_cpu: ++ ++ /proc/sys/kernel/iso_cpu ++ ++and is set to 70% by default. It is calculated over a rolling 5 second average ++Because it is the total CPU available, it means that on a multi CPU machine, it ++is possible to have an ISO task running as realtime scheduling indefinitely on ++just one CPU, as the other CPUs will be available. Setting this to 100 is the ++equivalent of giving all users SCHED_RR access and setting it to 0 removes the ++ability to run any pseudo-realtime tasks. ++ ++A feature of MuQSS is that it detects when an application tries to obtain a ++realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the ++appropriate privileges to use those policies. When it detects this, it will ++give the task SCHED_ISO policy instead. Thus it is transparent to the user. ++ ++ ++Idleprio scheduling: ++ ++Idleprio scheduling is a scheduling policy designed to give out CPU to a task ++_only_ when the CPU would be otherwise idle. The idea behind this is to allow ++ultra low priority tasks to be run in the background that have virtually no ++effect on the foreground tasks. This is ideally suited to distributed computing ++clients (like setiathome, folding, mprime etc) but can also be used to start a ++video encode or so on without any slowdown of other tasks. To avoid this policy ++from grabbing shared resources and holding them indefinitely, if it detects a ++state where the task is waiting on I/O, the machine is about to suspend to ram ++and so on, it will transiently schedule them as SCHED_NORMAL. Once a task has ++been scheduled as IDLEPRIO, it cannot be put back to SCHED_NORMAL without ++superuser privileges since it is effectively a lower scheduling policy. Tasks ++can be set to start as SCHED_IDLEPRIO with the schedtool command like so: ++ ++schedtool -D -e ./mprime ++ ++Subtick accounting: ++ ++It is surprisingly difficult to get accurate CPU accounting, and in many cases, ++the accounting is done by simply determining what is happening at the precise ++moment a timer tick fires off. This becomes increasingly inaccurate as the timer ++tick frequency (HZ) is lowered. It is possible to create an application which ++uses almost 100% CPU, yet by being descheduled at the right time, records zero ++CPU usage. While the main problem with this is that there are possible security ++implications, it is also difficult to determine how much CPU a task really does ++use. Mux uses sub-tick accounting from the TSC clock to determine real CPU ++usage. Thus, the amount of CPU reported as being used by MuQSS will more ++accurately represent how much CPU the task itself is using (as is shown for ++example by the 'time' application), so the reported values may be quite ++different to other schedulers. When comparing throughput of MuQSS to other ++designs, it is important to compare the actual completed work in terms of total ++wall clock time taken and total work done, rather than the reported "cpu usage". ++ ++Symmetric MultiThreading (SMT) aware nice: ++ ++SMT, a.k.a. hyperthreading, is a very common feature on modern CPUs. While the ++logical CPU count rises by adding thread units to each CPU core, allowing more ++than one task to be run simultaneously on the same core, the disadvantage of it ++is that the CPU power is shared between the tasks, not summating to the power ++of two CPUs. The practical upshot of this is that two tasks running on ++separate threads of the same core run significantly slower than if they had one ++core each to run on. While smart CPU selection allows each task to have a core ++to itself whenever available (as is done on MuQSS), it cannot offset the ++slowdown that occurs when the cores are all loaded and only a thread is left. ++Most of the time this is harmless as the CPU is effectively overloaded at this ++point and the extra thread is of benefit. However when running a niced task in ++the presence of an un-niced task (say nice 19 v nice 0), the nice task gets ++precisely the same amount of CPU power as the unniced one. MuQSS has an ++optional configuration feature known as SMT-NICE which selectively idles the ++secondary niced thread for a period proportional to the nice difference, ++allowing CPU distribution according to nice level to be maintained, at the ++expense of a small amount of extra overhead. If this is configured in on a ++machine without SMT threads, the overhead is minimal. ++ ++ ++Con Kolivas <kernel@kolivas.org> Sat, 29th October 2016 +diff -Nur a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt +--- a/Documentation/sysctl/kernel.txt 2019-07-07 09:08:19.122347611 +0100 ++++ b/Documentation/sysctl/kernel.txt 2019-07-07 09:17:41.251241479 +0100 +@@ -41,6 +41,7 @@ + - hung_task_check_interval_secs + - hung_task_warnings + - hyperv_record_panic_msg ++- iso_cpu + - kexec_load_disabled + - kptr_restrict + - l2cr [ PPC only ] +@@ -77,6 +78,7 @@ + - randomize_va_space + - real-root-dev ==> Documentation/admin-guide/initrd.rst + - reboot-cmd [ SPARC only ] ++- rr_interval + - rtsig-max + - rtsig-nr + - sched_energy_aware +@@ -101,6 +103,7 @@ + - unknown_nmi_panic + - watchdog + - watchdog_thresh ++- yield_type + - version + + ============================================================== +@@ -439,6 +442,16 @@ + + ============================================================== + ++iso_cpu: (MuQSS CPU scheduler only). ++ ++This sets the percentage cpu that the unprivileged SCHED_ISO tasks can ++run effectively at realtime priority, averaged over a rolling five ++seconds over the -whole- system, meaning all cpus. ++ ++Set to 70 (percent) by default. ++ ++============================================================== ++ + l2cr: (PPC only) + + This flag controls the L2 cache of G3 processor boards. If +@@ -882,6 +895,20 @@ + + ============================================================== + ++rr_interval: (MuQSS CPU scheduler only) ++ ++This is the smallest duration that any cpu process scheduling unit ++will run for. Increasing this value can increase throughput of cpu ++bound tasks substantially but at the expense of increased latencies ++overall. Conversely decreasing it will decrease average and maximum ++latencies but at the expense of throughput. This value is in ++milliseconds and the default value chosen depends on the number of ++cpus available at scheduler initialisation with a minimum of 6. ++ ++Valid values are from 1-1000. ++ ++============================================================== ++ + rtsig-max & rtsig-nr: + + The file rtsig-max can be used to tune the maximum number +@@ -1164,3 +1191,13 @@ + tunable to zero will disable lockup detection altogether. + + ============================================================== ++ ++yield_type: (MuQSS CPU scheduler only) ++ ++This determines what type of yield calls to sched_yield will perform. ++ ++ 0: No yield. ++ 1: Yield only to better priority/deadline tasks. (default) ++ 2: Expire timeslice and recalculate deadline. ++ ++============================================================== +diff -Nur a/fs/proc/base.c b/fs/proc/base.c +--- a/fs/proc/base.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/fs/proc/base.c 2019-07-07 09:17:41.251241479 +0100 +@@ -463,7 +463,7 @@ + seq_puts(m, "0 0 0\n"); + else + seq_printf(m, "%llu %llu %lu\n", +- (unsigned long long)task->se.sum_exec_runtime, ++ (unsigned long long)tsk_seruntime(task), + (unsigned long long)task->sched_info.run_delay, + task->sched_info.pcount); + +diff -Nur a/include/linux/init_task.h b/include/linux/init_task.h +--- a/include/linux/init_task.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/init_task.h 2019-07-07 09:17:41.251241479 +0100 +@@ -47,7 +47,11 @@ + #define INIT_CPU_TIMERS(s) + #endif + ++#ifdef CONFIG_SCHED_MUQSS ++#define INIT_TASK_COMM "MuQSS" ++#else + #define INIT_TASK_COMM "swapper" ++#endif + + /* Attach to the init_task data structure for proper alignment */ + #ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK +diff -Nur a/include/linux/ioprio.h b/include/linux/ioprio.h +--- a/include/linux/ioprio.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/ioprio.h 2019-07-07 09:17:41.251241479 +0100 +@@ -53,6 +53,8 @@ + */ + static inline int task_nice_ioprio(struct task_struct *task) + { ++ if (iso_task(task)) ++ return 0; + return (task_nice(task) + 20) / 5; + } + +diff -Nur a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h +--- a/include/linux/sched/nohz.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/sched/nohz.h 2019-07-07 09:17:41.251241479 +0100 +@@ -6,7 +6,7 @@ + * This is the interface between the scheduler and nohz/dynticks: + */ + +-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) ++#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS) + extern void cpu_load_update_nohz_start(void); + extern void cpu_load_update_nohz_stop(void); + #else +@@ -21,7 +21,7 @@ + static inline void nohz_balance_enter_idle(int cpu) { } + #endif + +-#ifdef CONFIG_NO_HZ_COMMON ++#if defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS) + void calc_load_nohz_start(void); + void calc_load_nohz_stop(void); + #else +diff -Nur a/include/linux/sched/prio.h b/include/linux/sched/prio.h +--- a/include/linux/sched/prio.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/sched/prio.h 2019-07-07 09:17:41.251241479 +0100 +@@ -20,8 +20,20 @@ + */ + + #define MAX_USER_RT_PRIO 100 ++ ++#ifdef CONFIG_SCHED_MUQSS ++/* Note different MAX_RT_PRIO */ ++#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1) ++ ++#define ISO_PRIO (MAX_RT_PRIO) ++#define NORMAL_PRIO (MAX_RT_PRIO + 1) ++#define IDLE_PRIO (MAX_RT_PRIO + 2) ++#define PRIO_LIMIT ((IDLE_PRIO) + 1) ++#else /* CONFIG_SCHED_MUQSS */ + #define MAX_RT_PRIO MAX_USER_RT_PRIO + ++#endif /* CONFIG_SCHED_MUQSS */ ++ + #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) + #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) + +diff -Nur a/include/linux/sched/rt.h b/include/linux/sched/rt.h +--- a/include/linux/sched/rt.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/sched/rt.h 2019-07-07 09:17:41.251241479 +0100 +@@ -24,8 +24,10 @@ + + if (policy == SCHED_FIFO || policy == SCHED_RR) + return true; ++#ifndef CONFIG_SCHED_MUQSS + if (policy == SCHED_DEADLINE) + return true; ++#endif + return false; + } + +diff -Nur a/include/linux/sched/task.h b/include/linux/sched/task.h +--- a/include/linux/sched/task.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/sched/task.h 2019-07-07 09:17:41.251241479 +0100 +@@ -82,7 +82,7 @@ + extern void free_task(struct task_struct *tsk); + + /* sched_exec is called by processes performing an exec */ +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_MUQSS) + extern void sched_exec(void); + #else + #define sched_exec() {} +diff -Nur a/include/linux/sched.h b/include/linux/sched.h +--- a/include/linux/sched.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/sched.h 2019-07-07 09:17:41.251241479 +0100 +@@ -30,6 +30,9 @@ + #include <linux/mm_types_task.h> + #include <linux/task_io_accounting.h> + #include <linux/rseq.h> ++#ifdef CONFIG_SCHED_MUQSS ++#include <linux/skip_list.h> ++#endif + + /* task_struct member predeclarations (sorted alphabetically): */ + struct audit_context; +@@ -605,9 +608,11 @@ + unsigned int flags; + unsigned int ptrace; + ++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_MUQSS) ++ int on_cpu; ++#endif + #ifdef CONFIG_SMP + struct llist_node wake_entry; +- int on_cpu; + #ifdef CONFIG_THREAD_INFO_IN_TASK + /* Current CPU: */ + unsigned int cpu; +@@ -632,10 +637,25 @@ + int static_prio; + int normal_prio; + unsigned int rt_priority; ++#ifdef CONFIG_SCHED_MUQSS ++ int time_slice; ++ u64 deadline; ++ skiplist_node node; /* Skip list node */ ++ u64 last_ran; ++ u64 sched_time; /* sched_clock time spent running */ ++#ifdef CONFIG_SMT_NICE ++ int smt_bias; /* Policy/nice level bias across smt siblings */ ++#endif ++#ifdef CONFIG_HOTPLUG_CPU ++ bool zerobound; /* Bound to CPU0 for hotplug */ ++#endif ++ unsigned long rt_timeout; ++#else /* CONFIG_SCHED_MUQSS */ + + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; ++#endif + #ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; + #endif +@@ -791,6 +811,10 @@ + u64 utimescaled; + u64 stimescaled; + #endif ++#ifdef CONFIG_SCHED_MUQSS ++ /* Unbanked cpu time */ ++ unsigned long utime_ns, stime_ns; ++#endif + u64 gtime; + struct prev_cputime prev_cputime; + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +@@ -1217,6 +1241,40 @@ + */ + }; + ++#ifdef CONFIG_SCHED_MUQSS ++#define tsk_seruntime(t) ((t)->sched_time) ++#define tsk_rttimeout(t) ((t)->rt_timeout) ++ ++static inline void tsk_cpus_current(struct task_struct *p) ++{ ++} ++ ++void print_scheduler_version(void); ++ ++static inline bool iso_task(struct task_struct *p) ++{ ++ return (p->policy == SCHED_ISO); ++} ++#else /* CFS */ ++#define tsk_seruntime(t) ((t)->se.sum_exec_runtime) ++#define tsk_rttimeout(t) ((t)->rt.timeout) ++ ++static inline void tsk_cpus_current(struct task_struct *p) ++{ ++ p->nr_cpus_allowed = current->nr_cpus_allowed; ++} ++ ++static inline void print_scheduler_version(void) ++{ ++ printk(KERN_INFO "CFS CPU scheduler.\n"); ++} ++ ++static inline bool iso_task(struct task_struct *p) ++{ ++ return false; ++} ++#endif /* CONFIG_SCHED_MUQSS */ ++ + static inline struct pid *task_pid(struct task_struct *task) + { + return task->thread_pid; +diff -Nur a/include/linux/skip_list.h b/include/linux/skip_list.h +--- a/include/linux/skip_list.h 1970-01-01 01:00:00.000000000 +0100 ++++ b/include/linux/skip_list.h 2019-07-07 09:17:41.251241479 +0100 +@@ -0,0 +1,33 @@ ++#ifndef _LINUX_SKIP_LISTS_H ++#define _LINUX_SKIP_LISTS_H ++typedef u64 keyType; ++typedef void *valueType; ++ ++typedef struct nodeStructure skiplist_node; ++ ++struct nodeStructure { ++ int level; /* Levels in this structure */ ++ keyType key; ++ valueType value; ++ skiplist_node *next[8]; ++ skiplist_node *prev[8]; ++}; ++ ++typedef struct listStructure { ++ int entries; ++ int level; /* Maximum level of the list ++ (1 more than the number of levels in the list) */ ++ skiplist_node *header; /* pointer to header */ ++} skiplist; ++ ++void skiplist_init(skiplist_node *slnode); ++skiplist *new_skiplist(skiplist_node *slnode); ++void free_skiplist(skiplist *l); ++void skiplist_node_init(skiplist_node *node); ++void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed); ++void skiplist_delete(skiplist *l, skiplist_node *node); ++ ++static inline bool skiplist_node_empty(skiplist_node *node) { ++ return (!node->next[0]); ++} ++#endif /* _LINUX_SKIP_LISTS_H */ +diff -Nur a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h +--- a/include/uapi/linux/sched.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/uapi/linux/sched.h 2019-07-07 09:17:41.251241479 +0100 +@@ -37,9 +37,16 @@ + #define SCHED_FIFO 1 + #define SCHED_RR 2 + #define SCHED_BATCH 3 +-/* SCHED_ISO: reserved but not implemented yet */ ++/* SCHED_ISO: Implemented on MuQSS only */ + #define SCHED_IDLE 5 ++#ifdef CONFIG_SCHED_MUQSS ++#define SCHED_ISO 4 ++#define SCHED_IDLEPRIO SCHED_IDLE ++#define SCHED_MAX (SCHED_IDLEPRIO) ++#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX) ++#else /* CONFIG_SCHED_MUQSS */ + #define SCHED_DEADLINE 6 ++#endif /* CONFIG_SCHED_MUQSS */ + + /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ + #define SCHED_RESET_ON_FORK 0x40000000 +diff -Nur a/init/init_task.c b/init/init_task.c +--- a/init/init_task.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/init/init_task.c 2019-07-07 09:17:41.251241479 +0100 +@@ -68,9 +68,17 @@ + .stack = init_stack, + .usage = REFCOUNT_INIT(2), + .flags = PF_KTHREAD, ++#ifdef CONFIG_SCHED_MUQSS ++ .prio = NORMAL_PRIO, ++ .static_prio = MAX_PRIO-20, ++ .normal_prio = NORMAL_PRIO, ++ .deadline = 0, ++ .time_slice = 1000000, ++#else + .prio = MAX_PRIO - 20, + .static_prio = MAX_PRIO - 20, + .normal_prio = MAX_PRIO - 20, ++#endif + .policy = SCHED_NORMAL, + .cpus_allowed = CPU_MASK_ALL, + .nr_cpus_allowed= NR_CPUS, +@@ -79,6 +87,7 @@ + .restart_block = { + .fn = do_no_restart_syscall, + }, ++#ifndef CONFIG_SCHED_MUQSS + .se = { + .group_node = LIST_HEAD_INIT(init_task.se.group_node), + }, +@@ -86,6 +95,7 @@ + .run_list = LIST_HEAD_INIT(init_task.rt.run_list), + .time_slice = RR_TIMESLICE, + }, ++#endif + .tasks = LIST_HEAD_INIT(init_task.tasks), + #ifdef CONFIG_SMP + .pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO), +diff -Nur a/init/Kconfig b/init/Kconfig +--- a/init/Kconfig 2019-07-07 09:08:19.142348283 +0100 ++++ b/init/Kconfig 2019-07-07 09:17:41.251241479 +0100 +@@ -64,6 +64,18 @@ + + menu "General setup" + ++config SCHED_MUQSS ++ bool "MuQSS cpu scheduler" ++ select HIGH_RES_TIMERS ++ ---help--- ++ The Multiple Queue Skiplist Scheduler for excellent interactivity and ++ responsiveness on the desktop and highly scalable deterministic ++ low latency on any hardware. ++ ++ Say Y here. ++ default y ++ ++ + config BROKEN + bool + +@@ -703,6 +715,7 @@ + depends on ARCH_SUPPORTS_NUMA_BALANCING + depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY + depends on SMP && NUMA && MIGRATION ++ depends on !SCHED_MUQSS + help + This option adds support for automatic NUMA aware memory/task placement. + The mechanism is quite primitive and is based on migrating memory when +@@ -810,9 +823,13 @@ + help + This feature lets CPU scheduler recognize task groups and control CPU + bandwidth allocation to such task groups. It uses cgroups to group +- tasks. ++ tasks. In combination with MuQSS this is purely a STUB to create the ++ files associated with the CPU controller cgroup but most of the ++ controls do nothing. This is useful for working in environments and ++ with applications that will only work if this control group is ++ present. + +-if CGROUP_SCHED ++if CGROUP_SCHED && !SCHED_MUQSS + config FAIR_GROUP_SCHED + bool "Group scheduling for SCHED_OTHER" + depends on CGROUP_SCHED +@@ -919,6 +936,7 @@ + + config CGROUP_CPUACCT + bool "Simple CPU accounting controller" ++ depends on !SCHED_MUQSS + help + Provides a simple controller for monitoring the + total CPU consumed by the tasks in a cgroup. +@@ -1037,6 +1055,7 @@ + + config SCHED_AUTOGROUP + bool "Automatic process group scheduling" ++ depends on !SCHED_MUQSS + select CGROUPS + select CGROUP_SCHED + select FAIR_GROUP_SCHED +diff -Nur a/init/main.c b/init/main.c +--- a/init/main.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/init/main.c 2019-07-07 09:17:41.251241479 +0100 +@@ -1083,6 +1083,8 @@ + + rcu_end_inkernel_boot(); + ++ print_scheduler_version(); ++ + if (ramdisk_execute_command) { + ret = run_init_process(ramdisk_execute_command); + if (!ret) +diff -Nur a/kernel/delayacct.c b/kernel/delayacct.c +--- a/kernel/delayacct.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/delayacct.c 2019-07-07 09:17:41.251241479 +0100 +@@ -115,7 +115,7 @@ + */ + t1 = tsk->sched_info.pcount; + t2 = tsk->sched_info.run_delay; +- t3 = tsk->se.sum_exec_runtime; ++ t3 = tsk_seruntime(tsk); + + d->cpu_count += t1; + +diff -Nur a/kernel/exit.c b/kernel/exit.c +--- a/kernel/exit.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/exit.c 2019-07-07 09:17:41.251241479 +0100 +@@ -130,7 +130,7 @@ + sig->curr_target = next_thread(tsk); + } + +- add_device_randomness((const void*) &tsk->se.sum_exec_runtime, ++ add_device_randomness((const void*) &tsk_seruntime(tsk), + sizeof(unsigned long long)); + + /* +@@ -151,7 +151,7 @@ + sig->inblock += task_io_get_inblock(tsk); + sig->oublock += task_io_get_oublock(tsk); + task_io_accounting_add(&sig->ioac, &tsk->ioac); +- sig->sum_sched_runtime += tsk->se.sum_exec_runtime; ++ sig->sum_sched_runtime += tsk_seruntime(tsk); + sig->nr_threads--; + __unhash_process(tsk, group_dead); + write_sequnlock(&sig->stats_lock); +diff -Nur a/kernel/Kconfig.MuQSS b/kernel/Kconfig.MuQSS +--- a/kernel/Kconfig.MuQSS 1970-01-01 01:00:00.000000000 +0100 ++++ b/kernel/Kconfig.MuQSS 2019-07-07 09:17:41.251241479 +0100 +@@ -0,0 +1,89 @@ ++choice ++ prompt "CPU scheduler runqueue sharing" ++ default RQ_MC if SCHED_MUQSS ++ default RQ_NONE ++ ++config RQ_NONE ++ bool "No sharing" ++ help ++ This is the default behaviour where the CPU scheduler has one runqueue ++ per CPU, whether it is a physical or logical CPU (hyperthread). ++ ++ This can still be enabled runtime with the boot parameter ++ rqshare=none ++ ++ If unsure, say N. ++ ++config RQ_SMT ++ bool "SMT (hyperthread) siblings" ++ depends on SCHED_SMT && SCHED_MUQSS ++ ++ help ++ With this option enabled, the CPU scheduler will have one runqueue ++ shared by SMT (hyperthread) siblings. As these logical cores share ++ one physical core, sharing the runqueue resource can lead to decreased ++ overhead, lower latency and higher throughput. ++ ++ This can still be enabled runtime with the boot parameter ++ rqshare=smt ++ ++ If unsure, say N. ++ ++config RQ_MC ++ bool "Multicore siblings" ++ depends on SCHED_MC && SCHED_MUQSS ++ help ++ With this option enabled, the CPU scheduler will have one runqueue ++ shared by multicore siblings in addition to any SMT siblings. ++ As these physical cores share caches, sharing the runqueue resource ++ will lead to lower latency, but its effects on overhead and throughput ++ are less predictable. As a general rule, 6 or fewer cores will likely ++ benefit from this, while larger CPUs will only derive a latency ++ benefit. If your workloads are primarily single threaded, this will ++ possibly worsen throughput. If you are only concerned about latency ++ then enable this regardless of how many cores you have. ++ ++ This can still be enabled runtime with the boot parameter ++ rqshare=mc ++ ++ If unsure, say Y. ++ ++config RQ_SMP ++ bool "Symmetric Multi-Processing" ++ depends on SMP && SCHED_MUQSS ++ help ++ With this option enabled, the CPU scheduler will have one runqueue ++ shared by all physical CPUs unless they are on separate NUMA nodes. ++ As physical CPUs usually do not share resources, sharing the runqueue ++ will normally worsen throughput but improve latency. If you only ++ care about latency enable this. ++ ++ This can still be enabled runtime with the boot parameter ++ rqshare=smp ++ ++ If unsure, say N. ++ ++config RQ_ALL ++ bool "NUMA" ++ depends on SMP && SCHED_MUQSS ++ help ++ With this option enabled, the CPU scheduler will have one runqueue ++ regardless of the architecture configuration, including across NUMA ++ nodes. This can substantially decrease throughput in NUMA ++ configurations, but light NUMA designs will not be dramatically ++ affected. This option should only be chosen if latency is the prime ++ concern. ++ ++ This can still be enabled runtime with the boot parameter ++ rqshare=all ++ ++ If unsure, say N. ++endchoice ++ ++config SHARERQ ++ int ++ default 0 if RQ_NONE ++ default 1 if RQ_SMT ++ default 2 if RQ_MC ++ default 3 if RQ_SMP ++ default 4 if RQ_ALL +diff -Nur a/kernel/kthread.c b/kernel/kthread.c +--- a/kernel/kthread.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/kthread.c 2019-07-07 09:17:41.261241813 +0100 +@@ -431,6 +431,34 @@ + } + EXPORT_SYMBOL(kthread_bind); + ++#if defined(CONFIG_SCHED_MUQSS) && defined(CONFIG_SMP) ++extern void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); ++ ++/* ++ * new_kthread_bind is a special variant of __kthread_bind_mask. ++ * For new threads to work on muqss we want to call do_set_cpus_allowed ++ * without the task_cpu being set and the task rescheduled until they're ++ * rescheduled on their own so we call __do_set_cpus_allowed directly which ++ * only changes the cpumask. This is particularly important for smpboot threads ++ * to work. ++ */ ++static void new_kthread_bind(struct task_struct *p, unsigned int cpu) ++{ ++ unsigned long flags; ++ ++ if (WARN_ON(!wait_task_inactive(p, TASK_UNINTERRUPTIBLE))) ++ return; ++ ++ /* It's safe because the task is inactive. */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ __do_set_cpus_allowed(p, cpumask_of(cpu)); ++ p->flags |= PF_NO_SETAFFINITY; ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++} ++#else ++#define new_kthread_bind(p, cpu) kthread_bind(p, cpu) ++#endif ++ + /** + * kthread_create_on_cpu - Create a cpu bound kthread + * @threadfn: the function to run until signal_pending(current). +@@ -452,7 +480,7 @@ + cpu); + if (IS_ERR(p)) + return p; +- kthread_bind(p, cpu); ++ new_kthread_bind(p, cpu); + /* CPU hotplug need to bind once again when unparking the thread. */ + set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); + to_kthread(p)->cpu = cpu; +diff -Nur a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c +--- a/kernel/livepatch/transition.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/livepatch/transition.c 2019-07-07 09:17:41.261241813 +0100 +@@ -298,7 +298,7 @@ + static bool klp_try_switch_task(struct task_struct *task) + { + struct rq *rq; +- struct rq_flags flags; ++ struct rq_flags rf; + int ret; + bool success = false; + char err_buf[STACK_ERR_BUF_SIZE]; +@@ -314,7 +314,7 @@ + * functions. If all goes well, switch the task to the target patch + * state. + */ +- rq = task_rq_lock(task, &flags); ++ rq = task_rq_lock(task, &rf); + + if (task_running(rq, task) && task != current) { + snprintf(err_buf, STACK_ERR_BUF_SIZE, +@@ -333,7 +333,7 @@ + task->patch_state = klp_target_state; + + done: +- task_rq_unlock(rq, task, &flags); ++ task_rq_unlock(rq, task, &rf); + + /* + * Due to console deadlock issues, pr_debug() can't be used while +diff -Nur a/kernel/Makefile b/kernel/Makefile +--- a/kernel/Makefile 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/Makefile 2019-07-07 09:17:41.251241479 +0100 +@@ -10,7 +10,7 @@ + extable.o params.o \ + kthread.o sys_ni.o nsproxy.o \ + notifier.o ksysfs.o cred.o reboot.o \ +- async.o range.o smpboot.o ucount.o ++ async.o range.o smpboot.o ucount.o skip_list.o + + obj-$(CONFIG_MODULES) += kmod.o + obj-$(CONFIG_MULTIUSER) += groups.o +diff -Nur a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c +--- a/kernel/sched/cpufreq_schedutil.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/cpufreq_schedutil.c 2019-07-07 09:17:41.261241813 +0100 +@@ -175,6 +175,12 @@ + return cpufreq_driver_resolve_freq(policy, freq); + } + ++#ifdef CONFIG_SCHED_MUQSS ++#define rt_rq_runnable(rq_rt) rt_rq_is_runnable(rq) ++#else ++#define rt_rq_runnable(rq_rt) rt_rq_is_runnable(&rq->rt) ++#endif ++ + /* + * This function computes an effective utilization for the given CPU, to be + * used for frequency selection given the linear relation: f = u * f_max. +@@ -201,7 +207,7 @@ + unsigned long dl_util, util, irq; + struct rq *rq = cpu_rq(cpu); + +- if (type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) ++ if (type == FREQUENCY_UTIL && rt_rq_runnable(rq)) + return max; + + /* +@@ -635,7 +641,11 @@ + struct task_struct *thread; + struct sched_attr attr = { + .size = sizeof(struct sched_attr), ++#ifdef CONFIG_SCHED_MUQSS ++ .sched_policy = SCHED_RR, ++#else + .sched_policy = SCHED_DEADLINE, ++#endif + .sched_flags = SCHED_FLAG_SUGOV, + .sched_nice = 0, + .sched_priority = 0, +diff -Nur a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h +--- a/kernel/sched/cpupri.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/cpupri.h 2019-07-07 09:17:41.261241813 +0100 +@@ -17,9 +17,11 @@ + int *cpu_to_pri; + }; + ++#ifndef CONFIG_SCHED_MUQSS + #ifdef CONFIG_SMP + int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask); + void cpupri_set(struct cpupri *cp, int cpu, int pri); + int cpupri_init(struct cpupri *cp); + void cpupri_cleanup(struct cpupri *cp); + #endif ++#endif +diff -Nur a/kernel/sched/cputime.c b/kernel/sched/cputime.c +--- a/kernel/sched/cputime.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/cputime.c 2019-07-07 09:17:41.261241813 +0100 +@@ -265,26 +265,6 @@ + return accounted; + } + +-#ifdef CONFIG_64BIT +-static inline u64 read_sum_exec_runtime(struct task_struct *t) +-{ +- return t->se.sum_exec_runtime; +-} +-#else +-static u64 read_sum_exec_runtime(struct task_struct *t) +-{ +- u64 ns; +- struct rq_flags rf; +- struct rq *rq; +- +- rq = task_rq_lock(t, &rf); +- ns = t->se.sum_exec_runtime; +- task_rq_unlock(rq, t, &rf); +- +- return ns; +-} +-#endif +- + /* + * Accumulate raw cputime values of dead tasks (sig->[us]time) and live + * tasks (sum on group iteration) belonging to @tsk's group. +@@ -662,7 +642,7 @@ + void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) + { + struct task_cputime cputime = { +- .sum_exec_runtime = p->se.sum_exec_runtime, ++ .sum_exec_runtime = tsk_seruntime(p), + }; + + task_cputime(p, &cputime.utime, &cputime.stime); +diff -Nur a/kernel/sched/idle.c b/kernel/sched/idle.c +--- a/kernel/sched/idle.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/idle.c 2019-07-07 09:17:41.261241813 +0100 +@@ -224,6 +224,8 @@ + static void do_idle(void) + { + int cpu = smp_processor_id(); ++ bool pending = false; ++ + /* + * If the arch has a polling bit, we maintain an invariant: + * +@@ -234,7 +236,10 @@ + */ + + __current_set_polling(); +- tick_nohz_idle_enter(); ++ if (unlikely(softirq_pending(cpu))) ++ pending = true; ++ else ++ tick_nohz_idle_enter(); + + while (!need_resched()) { + check_pgt_cache(); +@@ -272,7 +277,8 @@ + * an IPI to fold the state for us. + */ + preempt_set_need_resched(); +- tick_nohz_idle_exit(); ++ if (!pending) ++ tick_nohz_idle_exit(); + __current_clr_polling(); + + /* +@@ -353,6 +359,7 @@ + do_idle(); + } + ++#ifndef CONFIG_SCHED_MUQSS + /* + * idle-task scheduling class. + */ +@@ -465,3 +472,4 @@ + .switched_to = switched_to_idle, + .update_curr = update_curr_idle, + }; ++#endif /* CONFIG_SCHED_MUQSS */ +diff -Nur a/kernel/sched/Makefile b/kernel/sched/Makefile +--- a/kernel/sched/Makefile 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/Makefile 2019-07-07 09:17:41.261241813 +0100 +@@ -16,15 +16,23 @@ + CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer + endif + ++ifdef CONFIG_SCHED_MUQSS ++obj-y += MuQSS.o clock.o cputime.o ++obj-y += idle.o ++obj-y += wait.o wait_bit.o swait.o completion.o ++ ++obj-$(CONFIG_SMP) += topology.o ++else + obj-y += core.o loadavg.o clock.o cputime.o + obj-y += idle.o fair.o rt.o deadline.o + obj-y += wait.o wait_bit.o swait.o completion.o + + obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o + obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o +-obj-$(CONFIG_SCHEDSTATS) += stats.o + obj-$(CONFIG_SCHED_DEBUG) += debug.o + obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o ++endif ++obj-$(CONFIG_SCHEDSTATS) += stats.o + obj-$(CONFIG_CPU_FREQ) += cpufreq.o + obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o + obj-$(CONFIG_MEMBARRIER) += membarrier.o +diff -Nur a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c +--- a/kernel/sched/MuQSS.c 1970-01-01 01:00:00.000000000 +0100 ++++ b/kernel/sched/MuQSS.c 2019-07-07 09:17:41.261241813 +0100 +@@ -0,0 +1,7496 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * kernel/sched/MuQSS.c, was kernel/sched.c ++ * ++ * Kernel scheduler and related syscalls ++ * ++ * Copyright (C) 1991-2002 Linus Torvalds ++ * ++ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and ++ * make semaphores SMP safe ++ * 1998-11-19 Implemented schedule_timeout() and related stuff ++ * by Andrea Arcangeli ++ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: ++ * hybrid priority-list and round-robin design with ++ * an array-switch method of distributing timeslices ++ * and per-CPU runqueues. Cleanups and useful suggestions ++ * by Davide Libenzi, preemptible kernel bits by Robert Love. ++ * 2003-09-03 Interactivity tuning by Con Kolivas. ++ * 2004-04-02 Scheduler domains code by Nick Piggin ++ * 2007-04-15 Work begun on replacing all interactivity tuning with a ++ * fair scheduling design by Con Kolivas. ++ * 2007-05-05 Load balancing (smp-nice) and other improvements ++ * by Peter Williams ++ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith ++ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri ++ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, ++ * Thomas Gleixner, Mike Kravetz ++ * 2009-08-13 Brainfuck deadline scheduling policy by Con Kolivas deletes ++ * a whole lot of those previous things. ++ * 2016-10-01 Multiple Queue Skiplist Scheduler scalable evolution of BFS ++ * scheduler by Con Kolivas. ++ */ ++ ++#include <linux/sched/isolation.h> ++#include <linux/sched/loadavg.h> ++ ++#include <linux/binfmts.h> ++#include <linux/blkdev.h> ++#include <linux/compat.h> ++#include <linux/context_tracking.h> ++#include <linux/cpuset.h> ++#include <linux/delayacct.h> ++#include <linux/init_task.h> ++#include <linux/kcov.h> ++#include <linux/kprobes.h> ++#include <linux/mmu_context.h> ++#include <linux/module.h> ++#include <linux/nmi.h> ++#include <linux/prefetch.h> ++#include <linux/profile.h> ++#include <linux/rcupdate_wait.h> ++#include <linux/sched.h> ++#include <linux/security.h> ++#include <linux/skip_list.h> ++#include <linux/syscalls.h> ++#include <linux/tick.h> ++#include <linux/wait_bit.h> ++ ++#include <asm/irq_regs.h> ++#include <asm/switch_to.h> ++#include <asm/tlb.h> ++ ++#include "../workqueue_internal.h" ++#include "../smpboot.h" ++ ++#define CREATE_TRACE_POINTS ++#include <trace/events/sched.h> ++ ++#include "MuQSS.h" ++ ++#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) ++#define rt_task(p) rt_prio((p)->prio) ++#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) ++#define is_rt_policy(policy) ((policy) == SCHED_FIFO || \ ++ (policy) == SCHED_RR) ++#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy)) ++ ++#define is_idle_policy(policy) ((policy) == SCHED_IDLEPRIO) ++#define idleprio_task(p) unlikely(is_idle_policy((p)->policy)) ++#define task_running_idle(p) unlikely((p)->prio == IDLE_PRIO) ++ ++#define is_iso_policy(policy) ((policy) == SCHED_ISO) ++#define iso_task(p) unlikely(is_iso_policy((p)->policy)) ++#define task_running_iso(p) unlikely((p)->prio == ISO_PRIO) ++ ++#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT) ++ ++#define ISO_PERIOD (5 * HZ) ++ ++#define STOP_PRIO (MAX_RT_PRIO - 1) ++ ++/* ++ * Some helpers for converting to/from various scales. Use shifts to get ++ * approximate multiples of ten for less overhead. ++ */ ++#define APPROX_NS_PS (1073741824) /* Approximate ns per second */ ++#define JIFFIES_TO_NS(TIME) ((TIME) * (APPROX_NS_PS / HZ)) ++#define JIFFY_NS (APPROX_NS_PS / HZ) ++#define JIFFY_US (1048576 / HZ) ++#define NS_TO_JIFFIES(TIME) ((TIME) / JIFFY_NS) ++#define HALF_JIFFY_NS (APPROX_NS_PS / HZ / 2) ++#define HALF_JIFFY_US (1048576 / HZ / 2) ++#define MS_TO_NS(TIME) ((TIME) << 20) ++#define MS_TO_US(TIME) ((TIME) << 10) ++#define NS_TO_MS(TIME) ((TIME) >> 20) ++#define NS_TO_US(TIME) ((TIME) >> 10) ++#define US_TO_NS(TIME) ((TIME) << 10) ++#define TICK_APPROX_NS ((APPROX_NS_PS+HZ/2)/HZ) ++ ++#define RESCHED_US (100) /* Reschedule if less than this many μs left */ ++ ++void print_scheduler_version(void) ++{ ++ printk(KERN_INFO "MuQSS CPU scheduler v0.192 by Con Kolivas.\n"); ++} ++ ++#define RQSHARE_NONE 0 ++#define RQSHARE_SMT 1 ++#define RQSHARE_MC 2 ++#define RQSHARE_SMP 3 ++#define RQSHARE_ALL 4 ++ ++/* ++ * This determines what level of runqueue sharing will be done and is ++ * configurable at boot time with the bootparam rqshare = ++ */ ++static int rqshare __read_mostly = CONFIG_SHARERQ; /* Default RQSHARE_MC */ ++ ++static int __init set_rqshare(char *str) ++{ ++ if (!strncmp(str, "none", 4)) { ++ rqshare = RQSHARE_NONE; ++ return 0; ++ } ++ if (!strncmp(str, "smt", 3)) { ++ rqshare = RQSHARE_SMT; ++ return 0; ++ } ++ if (!strncmp(str, "mc", 2)) { ++ rqshare = RQSHARE_MC; ++ return 0; ++ } ++ if (!strncmp(str, "smp", 3)) { ++ rqshare = RQSHARE_SMP; ++ return 0; ++ } ++ if (!strncmp(str, "all", 3)) { ++ rqshare = RQSHARE_ALL; ++ return 0; ++ } ++ return 1; ++} ++__setup("rqshare=", set_rqshare); ++ ++/* ++ * This is the time all tasks within the same priority round robin. ++ * Value is in ms and set to a minimum of 6ms. ++ * Tunable via /proc interface. ++ */ ++int rr_interval __read_mostly = 6; ++ ++/* ++ * Tunable to choose whether to prioritise latency or throughput, simple ++ * binary yes or no ++ */ ++int sched_interactive __read_mostly = 1; ++ ++/* ++ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks ++ * are allowed to run five seconds as real time tasks. This is the total over ++ * all online cpus. ++ */ ++int sched_iso_cpu __read_mostly = 70; ++ ++/* ++ * sched_yield_type - Choose what sort of yield sched_yield will perform. ++ * 0: No yield. ++ * 1: Yield only to better priority/deadline tasks. (default) ++ * 2: Expire timeslice and recalculate deadline. ++ */ ++int sched_yield_type __read_mostly = 1; ++ ++/* ++ * The relative length of deadline for each priority(nice) level. ++ */ ++static int prio_ratios[NICE_WIDTH] __read_mostly; ++ ++ ++/* ++ * The quota handed out to tasks of all priority levels when refilling their ++ * time_slice. ++ */ ++static inline int timeslice(void) ++{ ++ return MS_TO_US(rr_interval); ++} ++ ++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++ ++#ifdef CONFIG_SMP ++/* ++ * Total number of runqueues. Equals number of CPUs when there is no runqueue ++ * sharing but is usually less with SMT/MC sharing of runqueues. ++ */ ++static int total_runqueues __read_mostly = 1; ++ ++static cpumask_t cpu_idle_map ____cacheline_aligned_in_smp; ++ ++struct rq *cpu_rq(int cpu) ++{ ++ return &per_cpu(runqueues, (cpu)); ++} ++#define cpu_curr(cpu) (cpu_rq(cpu)->curr) ++ ++/* ++ * For asym packing, by default the lower numbered cpu has higher priority. ++ */ ++int __weak arch_asym_cpu_priority(int cpu) ++{ ++ return -cpu; ++} ++ ++int __weak arch_sd_sibling_asym_packing(void) ++{ ++ return 0*SD_ASYM_PACKING; ++} ++ ++#ifdef CONFIG_SCHED_SMT ++DEFINE_STATIC_KEY_FALSE(sched_smt_present); ++EXPORT_SYMBOL_GPL(sched_smt_present); ++#endif ++ ++#else ++struct rq *uprq; ++#endif /* CONFIG_SMP */ ++ ++#include "stats.h" ++ ++/* ++ * All common locking functions performed on rq->lock. rq->clock is local to ++ * the CPU accessing it so it can be modified just with interrupts disabled ++ * when we're not updating niffies. ++ * Looking up task_rq must be done under rq->lock to be safe. ++ */ ++ ++/* ++ * RQ-clock updating methods: ++ */ ++ ++#ifdef HAVE_SCHED_AVG_IRQ ++static void update_irq_load_avg(struct rq *rq, long delta); ++#else ++static inline void update_irq_load_avg(struct rq *rq, long delta) {} ++#endif ++ ++static void update_rq_clock_task(struct rq *rq, s64 delta) ++{ ++/* ++ * In theory, the compile should just see 0 here, and optimize out the call ++ * to sched_rt_avg_update. But I don't trust it... ++ */ ++ s64 __maybe_unused steal = 0, irq_delta = 0; ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; ++ ++ /* ++ * Since irq_time is only updated on {soft,}irq_exit, we might run into ++ * this case when a previous update_rq_clock() happened inside a ++ * {soft,}irq region. ++ * ++ * When this happens, we stop ->clock_task and only update the ++ * prev_irq_time stamp to account for the part that fit, so that a next ++ * update will consume the rest. This ensures ->clock_task is ++ * monotonic. ++ * ++ * It does however cause some slight miss-attribution of {soft,}irq ++ * time, a more accurate solution would be to update the irq_time using ++ * the current rq->clock timestamp, except that would require using ++ * atomic ops. ++ */ ++ if (irq_delta > delta) ++ irq_delta = delta; ++ ++ rq->prev_irq_time += irq_delta; ++ delta -= irq_delta; ++#endif ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ if (static_key_false((¶virt_steal_rq_enabled))) { ++ steal = paravirt_steal_clock(cpu_of(rq)); ++ steal -= rq->prev_steal_time_rq; ++ ++ if (unlikely(steal > delta)) ++ steal = delta; ++ ++ rq->prev_steal_time_rq += steal; ++ delta -= steal; ++ } ++#endif ++ rq->clock_task += delta; ++ ++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ ++ if (irq_delta + steal) ++ update_irq_load_avg(rq, irq_delta + steal); ++#endif ++} ++ ++static inline void update_rq_clock(struct rq *rq) ++{ ++ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; ++ ++ if (unlikely(delta < 0)) ++ return; ++ rq->clock += delta; ++ update_rq_clock_task(rq, delta); ++} ++ ++/* ++ * Niffies are a globally increasing nanosecond counter. They're only used by ++ * update_load_avg and time_slice_expired, however deadlines are based on them ++ * across CPUs. Update them whenever we will call one of those functions, and ++ * synchronise them across CPUs whenever we hold both runqueue locks. ++ */ ++static inline void update_clocks(struct rq *rq) ++{ ++ s64 ndiff, minndiff; ++ long jdiff; ++ ++ update_rq_clock(rq); ++ ndiff = rq->clock - rq->old_clock; ++ rq->old_clock = rq->clock; ++ jdiff = jiffies - rq->last_jiffy; ++ ++ /* Subtract any niffies added by balancing with other rqs */ ++ ndiff -= rq->niffies - rq->last_niffy; ++ minndiff = JIFFIES_TO_NS(jdiff) - rq->niffies + rq->last_jiffy_niffies; ++ if (minndiff < 0) ++ minndiff = 0; ++ ndiff = max(ndiff, minndiff); ++ rq->niffies += ndiff; ++ rq->last_niffy = rq->niffies; ++ if (jdiff) { ++ rq->last_jiffy += jdiff; ++ rq->last_jiffy_niffies = rq->niffies; ++ } ++} ++ ++/* ++ * Any time we have two runqueues locked we use that as an opportunity to ++ * synchronise niffies to the highest value as idle ticks may have artificially ++ * kept niffies low on one CPU and the truth can only be later. ++ */ ++static inline void synchronise_niffies(struct rq *rq1, struct rq *rq2) ++{ ++ if (rq1->niffies > rq2->niffies) ++ rq2->niffies = rq1->niffies; ++ else ++ rq1->niffies = rq2->niffies; ++} ++ ++/* ++ * double_rq_lock - safely lock two runqueues ++ * ++ * Note this does not disable interrupts like task_rq_lock, ++ * you need to do so manually before calling. ++ */ ++ ++/* For when we know rq1 != rq2 */ ++static inline void __double_rq_lock(struct rq *rq1, struct rq *rq2) ++ __acquires(rq1->lock) ++ __acquires(rq2->lock) ++{ ++ if (rq1 < rq2) { ++ raw_spin_lock(rq1->lock); ++ raw_spin_lock_nested(rq2->lock, SINGLE_DEPTH_NESTING); ++ } else { ++ raw_spin_lock(rq2->lock); ++ raw_spin_lock_nested(rq1->lock, SINGLE_DEPTH_NESTING); ++ } ++} ++ ++static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) ++ __acquires(rq1->lock) ++ __acquires(rq2->lock) ++{ ++ BUG_ON(!irqs_disabled()); ++ if (rq1->lock == rq2->lock) { ++ raw_spin_lock(rq1->lock); ++ __acquire(rq2->lock); /* Fake it out ;) */ ++ } else ++ __double_rq_lock(rq1, rq2); ++ synchronise_niffies(rq1, rq2); ++} ++ ++/* ++ * double_rq_unlock - safely unlock two runqueues ++ * ++ * Note this does not restore interrupts like task_rq_unlock, ++ * you need to do so manually after calling. ++ */ ++static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) ++ __releases(rq1->lock) ++ __releases(rq2->lock) ++{ ++ raw_spin_unlock(rq1->lock); ++ if (rq1->lock != rq2->lock) ++ raw_spin_unlock(rq2->lock); ++ else ++ __release(rq2->lock); ++} ++ ++static inline void lock_all_rqs(void) ++{ ++ int cpu; ++ ++ preempt_disable(); ++ for_each_possible_cpu(cpu) { ++ struct rq *rq = cpu_rq(cpu); ++ ++ do_raw_spin_lock(rq->lock); ++ } ++} ++ ++static inline void unlock_all_rqs(void) ++{ ++ int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ struct rq *rq = cpu_rq(cpu); ++ ++ do_raw_spin_unlock(rq->lock); ++ } ++ preempt_enable(); ++} ++ ++/* Specially nest trylock an rq */ ++static inline bool trylock_rq(struct rq *this_rq, struct rq *rq) ++{ ++ if (unlikely(!do_raw_spin_trylock(rq->lock))) ++ return false; ++ spin_acquire(&rq->lock->dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_); ++ synchronise_niffies(this_rq, rq); ++ return true; ++} ++ ++/* Unlock a specially nested trylocked rq */ ++static inline void unlock_rq(struct rq *rq) ++{ ++ spin_release(&rq->lock->dep_map, 1, _RET_IP_); ++ do_raw_spin_unlock(rq->lock); ++} ++ ++/* ++ * cmpxchg based fetch_or, macro so it works for different integer types ++ */ ++#define fetch_or(ptr, mask) \ ++ ({ \ ++ typeof(ptr) _ptr = (ptr); \ ++ typeof(mask) _mask = (mask); \ ++ typeof(*_ptr) _old, _val = *_ptr; \ ++ \ ++ for (;;) { \ ++ _old = cmpxchg(_ptr, _val, _val | _mask); \ ++ if (_old == _val) \ ++ break; \ ++ _val = _old; \ ++ } \ ++ _old; \ ++}) ++ ++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) ++/* ++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, ++ * this avoids any races wrt polling state changes and thereby avoids ++ * spurious IPIs. ++ */ ++static bool set_nr_and_not_polling(struct task_struct *p) ++{ ++ struct thread_info *ti = task_thread_info(p); ++ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); ++} ++ ++/* ++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. ++ * ++ * If this returns true, then the idle task promises to call ++ * sched_ttwu_pending() and reschedule soon. ++ */ ++static bool set_nr_if_polling(struct task_struct *p) ++{ ++ struct thread_info *ti = task_thread_info(p); ++ typeof(ti->flags) old, val = READ_ONCE(ti->flags); ++ ++ for (;;) { ++ if (!(val & _TIF_POLLING_NRFLAG)) ++ return false; ++ if (val & _TIF_NEED_RESCHED) ++ return true; ++ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); ++ if (old == val) ++ break; ++ val = old; ++ } ++ return true; ++} ++ ++#else ++static bool set_nr_and_not_polling(struct task_struct *p) ++{ ++ set_tsk_need_resched(p); ++ return true; ++} ++ ++#ifdef CONFIG_SMP ++static bool set_nr_if_polling(struct task_struct *p) ++{ ++ return false; ++} ++#endif ++#endif ++ ++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) ++{ ++ struct wake_q_node *node = &task->wake_q; ++ ++ /* ++ * Atomically grab the task, if ->wake_q is !nil already it means ++ * its already queued (either by us or someone else) and will get the ++ * wakeup due to that. ++ * ++ * In order to ensure that a pending wakeup will observe our pending ++ * state, even in the failed case, an explicit smp_mb() must be used. ++ */ ++ smp_mb__before_atomic(); ++ if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) ++ return false; ++ ++ /* ++ * The head is context local, there can be no concurrency. ++ */ ++ *head->lastp = node; ++ head->lastp = &node->next; ++ return true; ++} ++ ++/** ++ * wake_q_add() - queue a wakeup for 'later' waking. ++ * @head: the wake_q_head to add @task to ++ * @task: the task to queue for 'later' wakeup ++ * ++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the ++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come ++ * instantly. ++ * ++ * This function must be used as-if it were wake_up_process(); IOW the task ++ * must be ready to be woken at this location. ++ */ ++void wake_q_add(struct wake_q_head *head, struct task_struct *task) ++{ ++ if (__wake_q_add(head, task)) ++ get_task_struct(task); ++} ++ ++/** ++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking. ++ * @head: the wake_q_head to add @task to ++ * @task: the task to queue for 'later' wakeup ++ * ++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the ++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come ++ * instantly. ++ * ++ * This function must be used as-if it were wake_up_process(); IOW the task ++ * must be ready to be woken at this location. ++ * ++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers ++ * that already hold reference to @task can call the 'safe' version and trust ++ * wake_q to do the right thing depending whether or not the @task is already ++ * queued for wakeup. ++ */ ++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) ++{ ++ if (!__wake_q_add(head, task)) ++ put_task_struct(task); ++} ++ ++void wake_up_q(struct wake_q_head *head) ++{ ++ struct wake_q_node *node = head->first; ++ ++ while (node != WAKE_Q_TAIL) { ++ struct task_struct *task; ++ ++ task = container_of(node, struct task_struct, wake_q); ++ BUG_ON(!task); ++ /* Task can safely be re-inserted now */ ++ node = node->next; ++ task->wake_q.next = NULL; ++ ++ /* ++ * wake_up_process() executes a full barrier, which pairs with ++ * the queueing in wake_q_add() so as not to miss wakeups. ++ */ ++ wake_up_process(task); ++ put_task_struct(task); ++ } ++} ++ ++static inline void smp_sched_reschedule(int cpu) ++{ ++ if (likely(cpu_online(cpu))) ++ smp_send_reschedule(cpu); ++} ++ ++/* ++ * resched_task - mark a task 'to be rescheduled now'. ++ * ++ * On UP this means the setting of the need_resched flag, on SMP it ++ * might also involve a cross-CPU call to trigger the scheduler on ++ * the target CPU. ++ */ ++void resched_task(struct task_struct *p) ++{ ++ int cpu; ++#ifdef CONFIG_LOCKDEP ++ /* Kernel threads call this when creating workqueues while still ++ * inactive from __kthread_bind_mask, holding only the pi_lock */ ++ if (!(p->flags & PF_KTHREAD)) { ++ struct rq *rq = task_rq(p); ++ ++ lockdep_assert_held(rq->lock); ++ } ++#endif ++ if (test_tsk_need_resched(p)) ++ return; ++ ++ cpu = task_cpu(p); ++ if (cpu == smp_processor_id()) { ++ set_tsk_need_resched(p); ++ set_preempt_need_resched(); ++ return; ++ } ++ ++ if (set_nr_and_not_polling(p)) ++ smp_sched_reschedule(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++/* ++ * A task that is not running or queued will not have a node set. ++ * A task that is queued but not running will have a node set. ++ * A task that is currently running will have ->on_cpu set but no node set. ++ */ ++static inline bool task_queued(struct task_struct *p) ++{ ++ return !skiplist_node_empty(&p->node); ++} ++ ++static void enqueue_task(struct rq *rq, struct task_struct *p, int flags); ++static inline void resched_if_idle(struct rq *rq); ++ ++/* Dodgy workaround till we figure out where the softirqs are going */ ++static inline void do_pending_softirq(struct rq *rq, struct task_struct *next) ++{ ++ if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt())) ++ do_softirq_own_stack(); ++} ++ ++static inline bool deadline_before(u64 deadline, u64 time) ++{ ++ return (deadline < time); ++} ++ ++/* ++ * Deadline is "now" in niffies + (offset by priority). Setting the deadline ++ * is the key to everything. It distributes cpu fairly amongst tasks of the ++ * same nice value, it proportions cpu according to nice level, it means the ++ * task that last woke up the longest ago has the earliest deadline, thus ++ * ensuring that interactive tasks get low latency on wake up. The CPU ++ * proportion works out to the square of the virtual deadline difference, so ++ * this equation will give nice 19 3% CPU compared to nice 0. ++ */ ++static inline u64 prio_deadline_diff(int user_prio) ++{ ++ return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128)); ++} ++ ++static inline u64 task_deadline_diff(struct task_struct *p) ++{ ++ return prio_deadline_diff(TASK_USER_PRIO(p)); ++} ++ ++static inline u64 static_deadline_diff(int static_prio) ++{ ++ return prio_deadline_diff(USER_PRIO(static_prio)); ++} ++ ++static inline int longest_deadline_diff(void) ++{ ++ return prio_deadline_diff(39); ++} ++ ++static inline int ms_longest_deadline_diff(void) ++{ ++ return NS_TO_MS(longest_deadline_diff()); ++} ++ ++static inline bool rq_local(struct rq *rq); ++ ++#ifndef SCHED_CAPACITY_SCALE ++#define SCHED_CAPACITY_SCALE 1024 ++#endif ++ ++static inline int rq_load(struct rq *rq) ++{ ++ return rq->nr_running; ++} ++ ++/* ++ * Update the load average for feeding into cpu frequency governors. Use a ++ * rough estimate of a rolling average with ~ time constant of 32ms. ++ * 80/128 ~ 0.63. * 80 / 32768 / 128 == * 5 / 262144 ++ * Make sure a call to update_clocks has been made before calling this to get ++ * an updated rq->niffies. ++ */ ++static void update_load_avg(struct rq *rq, unsigned int flags) ++{ ++ long us_interval, load; ++ unsigned long curload; ++ ++ us_interval = NS_TO_US(rq->niffies - rq->load_update); ++ if (unlikely(us_interval <= 0)) ++ return; ++ ++ curload = rq_load(rq); ++ load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144); ++ if (unlikely(load < 0)) ++ load = 0; ++ load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144; ++ rq->load_avg = load; ++ ++ rq->load_update = rq->niffies; ++ update_irq_load_avg(rq, 0); ++ if (likely(rq_local(rq))) ++ cpufreq_trigger(rq, flags); ++} ++ ++#ifdef HAVE_SCHED_AVG_IRQ ++/* ++ * IRQ variant of update_load_avg below. delta is actually time in nanoseconds ++ * here so we scale curload to how long it's been since the last update. ++ */ ++static void update_irq_load_avg(struct rq *rq, long delta) ++{ ++ long us_interval, load; ++ unsigned long curload; ++ ++ us_interval = NS_TO_US(rq->niffies - rq->irq_load_update); ++ if (unlikely(us_interval <= 0)) ++ return; ++ ++ curload = NS_TO_US(delta) / us_interval; ++ load = rq->irq_load_avg - (rq->irq_load_avg * us_interval * 5 / 262144); ++ if (unlikely(load < 0)) ++ load = 0; ++ load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144; ++ rq->irq_load_avg = load; ++ ++ rq->irq_load_update = rq->niffies; ++} ++#endif ++ ++/* ++ * Removing from the runqueue. Enter with rq locked. Deleting a task ++ * from the skip list is done via the stored node reference in the task struct ++ * and does not require a full look up. Thus it occurs in O(k) time where k ++ * is the "level" of the list the task was stored at - usually < 4, max 8. ++ */ ++static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) ++{ ++ skiplist_delete(rq->sl, &p->node); ++ rq->best_key = rq->node->next[0]->key; ++ update_clocks(rq); ++ ++ if (!(flags & DEQUEUE_SAVE)) { ++ sched_info_dequeued(rq, p); ++ psi_dequeue(p, flags & DEQUEUE_SLEEP); ++ } ++ rq->nr_running--; ++ if (rt_task(p)) ++ rq->rt_nr_running--; ++ update_load_avg(rq, flags); ++} ++ ++#ifdef CONFIG_PREEMPT_RCU ++static bool rcu_read_critical(struct task_struct *p) ++{ ++ return p->rcu_read_unlock_special.b.blocked; ++} ++#else /* CONFIG_PREEMPT_RCU */ ++#define rcu_read_critical(p) (false) ++#endif /* CONFIG_PREEMPT_RCU */ ++ ++/* ++ * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as ++ * an idle task, we ensure none of the following conditions are met. ++ */ ++static bool idleprio_suitable(struct task_struct *p) ++{ ++ return (!(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING)) && ++ !signal_pending(p) && !rcu_read_critical(p) && !freezing(p)); ++} ++ ++/* ++ * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check ++ * that the iso_refractory flag is not set. ++ */ ++static inline bool isoprio_suitable(struct rq *rq) ++{ ++ return !rq->iso_refractory; ++} ++ ++/* ++ * Adding to the runqueue. Enter with rq locked. ++ */ ++static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) ++{ ++ unsigned int randseed, cflags = 0; ++ u64 sl_id; ++ ++ if (!rt_task(p)) { ++ /* Check it hasn't gotten rt from PI */ ++ if ((idleprio_task(p) && idleprio_suitable(p)) || ++ (iso_task(p) && isoprio_suitable(rq))) ++ p->prio = p->normal_prio; ++ else ++ p->prio = NORMAL_PRIO; ++ } else ++ rq->rt_nr_running++; ++ /* ++ * The sl_id key passed to the skiplist generates a sorted list. ++ * Realtime and sched iso tasks run FIFO so they only need be sorted ++ * according to priority. The skiplist will put tasks of the same ++ * key inserted later in FIFO order. Tasks of sched normal, batch ++ * and idleprio are sorted according to their deadlines. Idleprio ++ * tasks are offset by an impossibly large deadline value ensuring ++ * they get sorted into last positions, but still according to their ++ * own deadlines. This creates a "landscape" of skiplists running ++ * from priority 0 realtime in first place to the lowest priority ++ * idleprio tasks last. Skiplist insertion is an O(log n) process. ++ */ ++ if (p->prio <= ISO_PRIO) { ++ sl_id = p->prio; ++ } else { ++ sl_id = p->deadline; ++ if (idleprio_task(p)) { ++ if (p->prio == IDLE_PRIO) ++ sl_id |= 0xF000000000000000; ++ else ++ sl_id += longest_deadline_diff(); ++ } ++ } ++ /* ++ * Some architectures don't have better than microsecond resolution ++ * so mask out ~microseconds as the random seed for skiplist insertion. ++ */ ++ update_clocks(rq); ++ if (!(flags & ENQUEUE_RESTORE)) { ++ sched_info_queued(rq, p); ++ psi_enqueue(p, flags & ENQUEUE_WAKEUP); ++ } ++ ++ randseed = (rq->niffies >> 10) & 0xFFFFFFFF; ++ skiplist_insert(rq->sl, &p->node, sl_id, p, randseed); ++ rq->best_key = rq->node->next[0]->key; ++ if (p->in_iowait) ++ cflags |= SCHED_CPUFREQ_IOWAIT; ++ rq->nr_running++; ++ update_load_avg(rq, cflags); ++} ++ ++/* ++ * Returns the relative length of deadline all compared to the shortest ++ * deadline which is that of nice -20. ++ */ ++static inline int task_prio_ratio(struct task_struct *p) ++{ ++ return prio_ratios[TASK_USER_PRIO(p)]; ++} ++ ++/* ++ * task_timeslice - all tasks of all priorities get the exact same timeslice ++ * length. CPU distribution is handled by giving different deadlines to ++ * tasks of different priorities. Use 128 as the base value for fast shifts. ++ */ ++static inline int task_timeslice(struct task_struct *p) ++{ ++ return (rr_interval * task_prio_ratio(p) / 128); ++} ++ ++#ifdef CONFIG_SMP ++/* Entered with rq locked */ ++static inline void resched_if_idle(struct rq *rq) ++{ ++ if (rq_idle(rq)) ++ resched_task(rq->curr); ++} ++ ++static inline bool rq_local(struct rq *rq) ++{ ++ return (rq->cpu == smp_processor_id()); ++} ++#ifdef CONFIG_SMT_NICE ++static const cpumask_t *thread_cpumask(int cpu); ++ ++/* Find the best real time priority running on any SMT siblings of cpu and if ++ * none are running, the static priority of the best deadline task running. ++ * The lookups to the other runqueues is done lockless as the occasional wrong ++ * value would be harmless. */ ++static int best_smt_bias(struct rq *this_rq) ++{ ++ int other_cpu, best_bias = 0; ++ ++ for_each_cpu(other_cpu, &this_rq->thread_mask) { ++ struct rq *rq = cpu_rq(other_cpu); ++ ++ if (rq_idle(rq)) ++ continue; ++ if (unlikely(!rq->online)) ++ continue; ++ if (!rq->rq_mm) ++ continue; ++ if (likely(rq->rq_smt_bias > best_bias)) ++ best_bias = rq->rq_smt_bias; ++ } ++ return best_bias; ++} ++ ++static int task_prio_bias(struct task_struct *p) ++{ ++ if (rt_task(p)) ++ return 1 << 30; ++ else if (task_running_iso(p)) ++ return 1 << 29; ++ else if (task_running_idle(p)) ++ return 0; ++ return MAX_PRIO - p->static_prio; ++} ++ ++static bool smt_always_schedule(struct task_struct __maybe_unused *p, struct rq __maybe_unused *this_rq) ++{ ++ return true; ++} ++ ++static bool (*smt_schedule)(struct task_struct *p, struct rq *this_rq) = &smt_always_schedule; ++ ++/* We've already decided p can run on CPU, now test if it shouldn't for SMT ++ * nice reasons. */ ++static bool smt_should_schedule(struct task_struct *p, struct rq *this_rq) ++{ ++ int best_bias, task_bias; ++ ++ /* Kernel threads always run */ ++ if (unlikely(!p->mm)) ++ return true; ++ if (rt_task(p)) ++ return true; ++ if (!idleprio_suitable(p)) ++ return true; ++ best_bias = best_smt_bias(this_rq); ++ /* The smt siblings are all idle or running IDLEPRIO */ ++ if (best_bias < 1) ++ return true; ++ task_bias = task_prio_bias(p); ++ if (task_bias < 1) ++ return false; ++ if (task_bias >= best_bias) ++ return true; ++ /* Dither 25% cpu of normal tasks regardless of nice difference */ ++ if (best_bias % 4 == 1) ++ return true; ++ /* Sorry, you lose */ ++ return false; ++} ++#else /* CONFIG_SMT_NICE */ ++#define smt_schedule(p, this_rq) (true) ++#endif /* CONFIG_SMT_NICE */ ++ ++static inline void atomic_set_cpu(int cpu, cpumask_t *cpumask) ++{ ++ set_bit(cpu, (volatile unsigned long *)cpumask); ++} ++ ++/* ++ * The cpu_idle_map stores a bitmap of all the CPUs currently idle to ++ * allow easy lookup of whether any suitable idle CPUs are available. ++ * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the ++ * idle_cpus variable than to do a full bitmask check when we are busy. The ++ * bits are set atomically but read locklessly as occasional false positive / ++ * negative is harmless. ++ */ ++static inline void set_cpuidle_map(int cpu) ++{ ++ if (likely(cpu_online(cpu))) ++ atomic_set_cpu(cpu, &cpu_idle_map); ++} ++ ++static inline void atomic_clear_cpu(int cpu, cpumask_t *cpumask) ++{ ++ clear_bit(cpu, (volatile unsigned long *)cpumask); ++} ++ ++static inline void clear_cpuidle_map(int cpu) ++{ ++ atomic_clear_cpu(cpu, &cpu_idle_map); ++} ++ ++static bool suitable_idle_cpus(struct task_struct *p) ++{ ++ return (cpumask_intersects(&p->cpus_allowed, &cpu_idle_map)); ++} ++ ++/* ++ * Resched current on rq. We don't know if rq is local to this CPU nor if it ++ * is locked so we do not use an intermediate variable for the task to avoid ++ * having it dereferenced. ++ */ ++static void resched_curr(struct rq *rq) ++{ ++ int cpu; ++ ++ if (test_tsk_need_resched(rq->curr)) ++ return; ++ ++ rq->preempt = rq->curr; ++ cpu = rq->cpu; ++ ++ /* We're doing this without holding the rq lock if it's not task_rq */ ++ ++ if (cpu == smp_processor_id()) { ++ set_tsk_need_resched(rq->curr); ++ set_preempt_need_resched(); ++ return; ++ } ++ ++ if (set_nr_and_not_polling(rq->curr)) ++ smp_sched_reschedule(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++#define CPUIDLE_DIFF_THREAD (1) ++#define CPUIDLE_DIFF_CORE (2) ++#define CPUIDLE_CACHE_BUSY (4) ++#define CPUIDLE_DIFF_CPU (8) ++#define CPUIDLE_THREAD_BUSY (16) ++#define CPUIDLE_DIFF_NODE (32) ++ ++/* ++ * The best idle CPU is chosen according to the CPUIDLE ranking above where the ++ * lowest value would give the most suitable CPU to schedule p onto next. The ++ * order works out to be the following: ++ * ++ * Same thread, idle or busy cache, idle or busy threads ++ * Other core, same cache, idle or busy cache, idle threads. ++ * Same node, other CPU, idle cache, idle threads. ++ * Same node, other CPU, busy cache, idle threads. ++ * Other core, same cache, busy threads. ++ * Same node, other CPU, busy threads. ++ * Other node, other CPU, idle cache, idle threads. ++ * Other node, other CPU, busy cache, idle threads. ++ * Other node, other CPU, busy threads. ++ */ ++static int best_mask_cpu(int best_cpu, struct rq *rq, cpumask_t *tmpmask) ++{ ++ int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THREAD_BUSY | ++ CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | CPUIDLE_DIFF_CORE | ++ CPUIDLE_DIFF_THREAD; ++ int cpu_tmp; ++ ++ if (cpumask_test_cpu(best_cpu, tmpmask)) ++ goto out; ++ ++ for_each_cpu(cpu_tmp, tmpmask) { ++ int ranking, locality; ++ struct rq *tmp_rq; ++ ++ ranking = 0; ++ tmp_rq = cpu_rq(cpu_tmp); ++ ++ locality = rq->cpu_locality[cpu_tmp]; ++#ifdef CONFIG_NUMA ++ if (locality > 3) ++ ranking |= CPUIDLE_DIFF_NODE; ++ else ++#endif ++ if (locality > 2) ++ ranking |= CPUIDLE_DIFF_CPU; ++#ifdef CONFIG_SCHED_MC ++ else if (locality == 2) ++ ranking |= CPUIDLE_DIFF_CORE; ++ else if (!(tmp_rq->cache_idle(tmp_rq))) ++ ranking |= CPUIDLE_CACHE_BUSY; ++#endif ++#ifdef CONFIG_SCHED_SMT ++ if (locality == 1) ++ ranking |= CPUIDLE_DIFF_THREAD; ++ if (!(tmp_rq->siblings_idle(tmp_rq))) ++ ranking |= CPUIDLE_THREAD_BUSY; ++#endif ++ if (ranking < best_ranking) { ++ best_cpu = cpu_tmp; ++ best_ranking = ranking; ++ } ++ } ++out: ++ return best_cpu; ++} ++ ++bool cpus_share_cache(int this_cpu, int that_cpu) ++{ ++ struct rq *this_rq = cpu_rq(this_cpu); ++ ++ return (this_rq->cpu_locality[that_cpu] < 3); ++} ++ ++/* As per resched_curr but only will resched idle task */ ++static inline void resched_idle(struct rq *rq) ++{ ++ if (test_tsk_need_resched(rq->idle)) ++ return; ++ ++ rq->preempt = rq->idle; ++ ++ set_tsk_need_resched(rq->idle); ++ ++ if (rq_local(rq)) { ++ set_preempt_need_resched(); ++ return; ++ } ++ ++ smp_sched_reschedule(rq->cpu); ++} ++ ++static struct rq *resched_best_idle(struct task_struct *p, int cpu) ++{ ++ cpumask_t tmpmask; ++ struct rq *rq; ++ int best_cpu; ++ ++ cpumask_and(&tmpmask, &p->cpus_allowed, &cpu_idle_map); ++ best_cpu = best_mask_cpu(cpu, task_rq(p), &tmpmask); ++ rq = cpu_rq(best_cpu); ++ if (!smt_schedule(p, rq)) ++ return NULL; ++ rq->preempt = p; ++ resched_idle(rq); ++ return rq; ++} ++ ++static inline void resched_suitable_idle(struct task_struct *p) ++{ ++ if (suitable_idle_cpus(p)) ++ resched_best_idle(p, task_cpu(p)); ++} ++ ++static inline struct rq *rq_order(struct rq *rq, int cpu) ++{ ++ return rq->rq_order[cpu]; ++} ++#else /* CONFIG_SMP */ ++static inline void set_cpuidle_map(int cpu) ++{ ++} ++ ++static inline void clear_cpuidle_map(int cpu) ++{ ++} ++ ++static inline bool suitable_idle_cpus(struct task_struct *p) ++{ ++ return uprq->curr == uprq->idle; ++} ++ ++static inline void resched_suitable_idle(struct task_struct *p) ++{ ++} ++ ++static inline void resched_curr(struct rq *rq) ++{ ++ resched_task(rq->curr); ++} ++ ++static inline void resched_if_idle(struct rq *rq) ++{ ++} ++ ++static inline bool rq_local(struct rq *rq) ++{ ++ return true; ++} ++ ++static inline struct rq *rq_order(struct rq *rq, int cpu) ++{ ++ return rq; ++} ++ ++static inline bool smt_schedule(struct task_struct *p, struct rq *rq) ++{ ++ return true; ++} ++#endif /* CONFIG_SMP */ ++ ++static inline int normal_prio(struct task_struct *p) ++{ ++ if (has_rt_policy(p)) ++ return MAX_RT_PRIO - 1 - p->rt_priority; ++ if (idleprio_task(p)) ++ return IDLE_PRIO; ++ if (iso_task(p)) ++ return ISO_PRIO; ++ return NORMAL_PRIO; ++} ++ ++/* ++ * Calculate the current priority, i.e. the priority ++ * taken into account by the scheduler. This value might ++ * be boosted by RT tasks as it will be RT if the task got ++ * RT-boosted. If not then it returns p->normal_prio. ++ */ ++static int effective_prio(struct task_struct *p) ++{ ++ p->normal_prio = normal_prio(p); ++ /* ++ * If we are RT tasks or we were boosted to RT priority, ++ * keep the priority unchanged. Otherwise, update priority ++ * to the normal priority: ++ */ ++ if (!rt_prio(p->prio)) ++ return p->normal_prio; ++ return p->prio; ++} ++ ++/* ++ * activate_task - move a task to the runqueue. Enter with rq locked. ++ */ ++static void activate_task(struct task_struct *p, struct rq *rq, int flags) ++{ ++ resched_if_idle(rq); ++ ++ /* ++ * Sleep time is in units of nanosecs, so shift by 20 to get a ++ * milliseconds-range estimation of the amount of time that the task ++ * spent sleeping: ++ */ ++ if (unlikely(prof_on == SLEEP_PROFILING)) { ++ if (p->state == TASK_UNINTERRUPTIBLE) ++ profile_hits(SLEEP_PROFILING, (void *)get_wchan(p), ++ (rq->niffies - p->last_ran) >> 20); ++ } ++ ++ p->prio = effective_prio(p); ++ if (task_contributes_to_load(p)) ++ rq->nr_uninterruptible--; ++ ++ enqueue_task(rq, p, flags); ++ p->on_rq = TASK_ON_RQ_QUEUED; ++} ++ ++/* ++ * deactivate_task - If it's running, it's not on the runqueue and we can just ++ * decrement the nr_running. Enter with rq locked. ++ */ ++static inline void deactivate_task(struct task_struct *p, struct rq *rq, int flags) ++{ ++ if (task_contributes_to_load(p)) ++ rq->nr_uninterruptible++; ++ ++ p->on_rq = 0; ++ if (!(flags & DEQUEUE_SAVE)) { ++ sched_info_dequeued(rq, p); ++ psi_dequeue(p, flags & DEQUEUE_SLEEP); ++ } ++} ++ ++#ifdef CONFIG_SMP ++void set_task_cpu(struct task_struct *p, unsigned int new_cpu) ++{ ++ struct rq *rq; ++ ++ if (task_cpu(p) == new_cpu) ++ return; ++ ++ /* Do NOT call set_task_cpu on a currently queued task as we will not ++ * be reliably holding the rq lock after changing CPU. */ ++ BUG_ON(task_queued(p)); ++ rq = task_rq(p); ++ ++#ifdef CONFIG_LOCKDEP ++ /* ++ * The caller should hold either p->pi_lock or rq->lock, when changing ++ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. ++ * ++ * Furthermore, all task_rq users should acquire both locks, see ++ * task_rq_lock(). ++ */ ++ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || ++ lockdep_is_held(rq->lock))); ++#endif ++ ++ trace_sched_migrate_task(p, new_cpu); ++ rseq_migrate(p); ++ perf_event_task_migrate(p); ++ ++ /* ++ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be ++ * successfully executed on another CPU. We must ensure that updates of ++ * per-task data have been completed by this moment. ++ */ ++ smp_wmb(); ++ ++ p->wake_cpu = new_cpu; ++ ++ if (task_running(rq, p)) { ++ /* ++ * We should only be calling this on a running task if we're ++ * holding rq lock. ++ */ ++ lockdep_assert_held(rq->lock); ++ ++ /* ++ * We can't change the task_thread_info CPU on a running task ++ * as p will still be protected by the rq lock of the CPU it ++ * is still running on so we only set the wake_cpu for it to be ++ * lazily updated once off the CPU. ++ */ ++ return; ++ } ++ ++#ifdef CONFIG_THREAD_INFO_IN_TASK ++ WRITE_ONCE(p->cpu, new_cpu); ++#else ++ WRITE_ONCE(task_thread_info(p)->cpu, new_cpu); ++#endif ++ /* We're no longer protecting p after this point since we're holding ++ * the wrong runqueue lock. */ ++} ++#endif /* CONFIG_SMP */ ++ ++/* ++ * Move a task off the runqueue and take it to a cpu for it will ++ * become the running task. ++ */ ++static inline void take_task(struct rq *rq, int cpu, struct task_struct *p) ++{ ++ struct rq *p_rq = task_rq(p); ++ ++ dequeue_task(p_rq, p, DEQUEUE_SAVE); ++ if (p_rq != rq) { ++ sched_info_dequeued(p_rq, p); ++ sched_info_queued(rq, p); ++ } ++ set_task_cpu(p, cpu); ++} ++ ++/* ++ * Returns a descheduling task to the runqueue unless it is being ++ * deactivated. ++ */ ++static inline void return_task(struct task_struct *p, struct rq *rq, ++ int cpu, bool deactivate) ++{ ++ if (deactivate) ++ deactivate_task(p, rq, DEQUEUE_SLEEP); ++ else { ++#ifdef CONFIG_SMP ++ /* ++ * set_task_cpu was called on the running task that doesn't ++ * want to deactivate so it has to be enqueued to a different ++ * CPU and we need its lock. Tag it to be moved with as the ++ * lock is dropped in finish_lock_switch. ++ */ ++ if (unlikely(p->wake_cpu != cpu)) ++ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); ++ else ++#endif ++ enqueue_task(rq, p, ENQUEUE_RESTORE); ++ } ++} ++ ++/* Enter with rq lock held. We know p is on the local cpu */ ++static inline void __set_tsk_resched(struct task_struct *p) ++{ ++ set_tsk_need_resched(p); ++ set_preempt_need_resched(); ++} ++ ++/** ++ * task_curr - is this task currently executing on a CPU? ++ * @p: the task in question. ++ * ++ * Return: 1 if the task is currently executing. 0 otherwise. ++ */ ++inline int task_curr(const struct task_struct *p) ++{ ++ return cpu_curr(task_cpu(p)) == p; ++} ++ ++#ifdef CONFIG_SMP ++/* ++ * wait_task_inactive - wait for a thread to unschedule. ++ * ++ * If @match_state is nonzero, it's the @p->state value just checked and ++ * not expected to change. If it changes, i.e. @p might have woken up, ++ * then return zero. When we succeed in waiting for @p to be off its CPU, ++ * we return a positive number (its total switch count). If a second call ++ * a short while later returns the same number, the caller can be sure that ++ * @p has remained unscheduled the whole time. ++ * ++ * The caller must ensure that the task *will* unschedule sometime soon, ++ * else this function might spin for a *long* time. This function can't ++ * be called with interrupts off, or it may introduce deadlock with ++ * smp_call_function() if an IPI is sent by the same process we are ++ * waiting to become inactive. ++ */ ++unsigned long wait_task_inactive(struct task_struct *p, long match_state) ++{ ++ int running, queued; ++ struct rq_flags rf; ++ unsigned long ncsw; ++ struct rq *rq; ++ ++ for (;;) { ++ rq = task_rq(p); ++ ++ /* ++ * If the task is actively running on another CPU ++ * still, just relax and busy-wait without holding ++ * any locks. ++ * ++ * NOTE! Since we don't hold any locks, it's not ++ * even sure that "rq" stays as the right runqueue! ++ * But we don't care, since this will return false ++ * if the runqueue has changed and p is actually now ++ * running somewhere else! ++ */ ++ while (task_running(rq, p)) { ++ if (match_state && unlikely(p->state != match_state)) ++ return 0; ++ cpu_relax(); ++ } ++ ++ /* ++ * Ok, time to look more closely! We need the rq ++ * lock now, to be *sure*. If we're wrong, we'll ++ * just go back and repeat. ++ */ ++ rq = task_rq_lock(p, &rf); ++ trace_sched_wait_task(p); ++ running = task_running(rq, p); ++ queued = task_on_rq_queued(p); ++ ncsw = 0; ++ if (!match_state || p->state == match_state) ++ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ ++ task_rq_unlock(rq, p, &rf); ++ ++ /* ++ * If it changed from the expected state, bail out now. ++ */ ++ if (unlikely(!ncsw)) ++ break; ++ ++ /* ++ * Was it really running after all now that we ++ * checked with the proper locks actually held? ++ * ++ * Oops. Go back and try again.. ++ */ ++ if (unlikely(running)) { ++ cpu_relax(); ++ continue; ++ } ++ ++ /* ++ * It's not enough that it's not actively running, ++ * it must be off the runqueue _entirely_, and not ++ * preempted! ++ * ++ * So if it was still runnable (but just not actively ++ * running right now), it's preempted, and we should ++ * yield - it could be a while. ++ */ ++ if (unlikely(queued)) { ++ ktime_t to = NSEC_PER_SEC / HZ; ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_hrtimeout(&to, HRTIMER_MODE_REL); ++ continue; ++ } ++ ++ /* ++ * Ahh, all good. It wasn't running, and it wasn't ++ * runnable, which means that it will never become ++ * running in the future either. We're all done! ++ */ ++ break; ++ } ++ ++ return ncsw; ++} ++ ++/*** ++ * kick_process - kick a running thread to enter/exit the kernel ++ * @p: the to-be-kicked thread ++ * ++ * Cause a process which is running on another CPU to enter ++ * kernel-mode, without any delay. (to get signals handled.) ++ * ++ * NOTE: this function doesn't have to take the runqueue lock, ++ * because all it wants to ensure is that the remote task enters ++ * the kernel. If the IPI races and the task has been migrated ++ * to another CPU then no harm is done and the purpose has been ++ * achieved as well. ++ */ ++void kick_process(struct task_struct *p) ++{ ++ int cpu; ++ ++ preempt_disable(); ++ cpu = task_cpu(p); ++ if ((cpu != smp_processor_id()) && task_curr(p)) ++ smp_sched_reschedule(cpu); ++ preempt_enable(); ++} ++EXPORT_SYMBOL_GPL(kick_process); ++#endif ++ ++/* ++ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the ++ * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or ++ * between themselves, they cooperatively multitask. An idle rq scores as ++ * prio PRIO_LIMIT so it is always preempted. ++ */ ++static inline bool ++can_preempt(struct task_struct *p, int prio, u64 deadline) ++{ ++ /* Better static priority RT task or better policy preemption */ ++ if (p->prio < prio) ++ return true; ++ if (p->prio > prio) ++ return false; ++ if (p->policy == SCHED_BATCH) ++ return false; ++ /* SCHED_NORMAL and ISO will preempt based on deadline */ ++ if (!deadline_before(p->deadline, deadline)) ++ return false; ++ return true; ++} ++ ++#ifdef CONFIG_SMP ++ ++static inline bool is_per_cpu_kthread(struct task_struct *p) ++{ ++ if (!(p->flags & PF_KTHREAD)) ++ return false; ++ ++ if (p->nr_cpus_allowed != 1) ++ return false; ++ ++ return true; ++} ++ ++/* ++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see ++ * __set_cpus_allowed_ptr(). ++ */ ++static inline bool is_cpu_allowed(struct task_struct *p, int cpu) ++{ ++ if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) ++ return false; ++ ++ if (is_per_cpu_kthread(p)) ++ return cpu_online(cpu); ++ ++ return cpu_active(cpu); ++} ++ ++/* ++ * Check to see if p can run on cpu, and if not, whether there are any online ++ * CPUs it can run on instead. This only happens with the hotplug threads that ++ * bring up the CPUs. ++ */ ++static inline bool sched_other_cpu(struct task_struct *p, int cpu) ++{ ++ if (likely(cpumask_test_cpu(cpu, &p->cpus_allowed))) ++ return false; ++ if (p->nr_cpus_allowed == 1) { ++ cpumask_t valid_mask; ++ ++ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_online_mask); ++ if (unlikely(cpumask_empty(&valid_mask))) ++ return false; ++ } ++ return true; ++} ++ ++static inline bool needs_other_cpu(struct task_struct *p, int cpu) ++{ ++ if (cpumask_test_cpu(cpu, &p->cpus_allowed)) ++ return false; ++ return true; ++} ++ ++#define cpu_online_map (*(cpumask_t *)cpu_online_mask) ++ ++static void try_preempt(struct task_struct *p, struct rq *this_rq) ++{ ++ int i, this_entries = rq_load(this_rq); ++ cpumask_t tmp; ++ ++ if (suitable_idle_cpus(p) && resched_best_idle(p, task_cpu(p))) ++ return; ++ ++ /* IDLEPRIO tasks never preempt anything but idle */ ++ if (p->policy == SCHED_IDLEPRIO) ++ return; ++ ++ cpumask_and(&tmp, &cpu_online_map, &p->cpus_allowed); ++ ++ for (i = 0; i < num_possible_cpus(); i++) { ++ struct rq *rq = this_rq->cpu_order[i]; ++ ++ if (!cpumask_test_cpu(rq->cpu, &tmp)) ++ continue; ++ ++ if (!sched_interactive && rq != this_rq && rq_load(rq) <= this_entries) ++ continue; ++ if (smt_schedule(p, rq) && can_preempt(p, rq->rq_prio, rq->rq_deadline)) { ++ /* We set rq->preempting lockless, it's a hint only */ ++ rq->preempting = p; ++ resched_curr(rq); ++ return; ++ } ++ } ++} ++ ++static int __set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, bool check); ++#else /* CONFIG_SMP */ ++static inline bool needs_other_cpu(struct task_struct *p, int cpu) ++{ ++ return false; ++} ++ ++static void try_preempt(struct task_struct *p, struct rq *this_rq) ++{ ++ if (p->policy == SCHED_IDLEPRIO) ++ return; ++ if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline)) ++ resched_curr(uprq); ++} ++ ++static inline int __set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, bool check) ++{ ++ return set_cpus_allowed_ptr(p, new_mask); ++} ++#endif /* CONFIG_SMP */ ++ ++/* ++ * wake flags ++ */ ++#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ ++#define WF_FORK 0x02 /* child wakeup after fork */ ++#define WF_MIGRATED 0x04 /* internal use, task got migrated */ ++ ++static void ++ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ++{ ++ struct rq *rq; ++ ++ if (!schedstat_enabled()) ++ return; ++ ++ rq = this_rq(); ++ ++#ifdef CONFIG_SMP ++ if (cpu == rq->cpu) { ++ __schedstat_inc(rq->ttwu_local); ++ } else { ++ struct sched_domain *sd; ++ ++ rcu_read_lock(); ++ for_each_domain(rq->cpu, sd) { ++ if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { ++ __schedstat_inc(sd->ttwu_wake_remote); ++ break; ++ } ++ } ++ rcu_read_unlock(); ++ } ++ ++#endif /* CONFIG_SMP */ ++ ++ __schedstat_inc(rq->ttwu_count); ++} ++ ++static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) ++{ ++ activate_task(p, rq, en_flags); ++ ++ /* if a worker is waking up, notify the workqueue */ ++ if (p->flags & PF_WQ_WORKER) ++ wq_worker_waking_up(p, cpu_of(rq)); ++} ++ ++/* ++ * Mark the task runnable and perform wakeup-preemption. ++ */ ++static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) ++{ ++ /* ++ * Sync wakeups (i.e. those types of wakeups where the waker ++ * has indicated that it will leave the CPU in short order) ++ * don't trigger a preemption if there are no idle cpus, ++ * instead waiting for current to deschedule. ++ */ ++ if (wake_flags & WF_SYNC) ++ resched_suitable_idle(p); ++ else ++ try_preempt(p, rq); ++ p->state = TASK_RUNNING; ++ trace_sched_wakeup(p); ++} ++ ++static void ++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) ++{ ++ int en_flags = ENQUEUE_WAKEUP; ++ ++ lockdep_assert_held(rq->lock); ++ ++#ifdef CONFIG_SMP ++ if (p->sched_contributes_to_load) ++ rq->nr_uninterruptible--; ++ ++ if (wake_flags & WF_MIGRATED) ++ en_flags |= ENQUEUE_MIGRATED; ++#endif ++ ++ ttwu_activate(rq, p, en_flags); ++ ttwu_do_wakeup(rq, p, wake_flags); ++} ++ ++/* ++ * Called in case the task @p isn't fully descheduled from its runqueue, ++ * in this case we must do a remote wakeup. Its a 'light' wakeup though, ++ * since all we need to do is flip p->state to TASK_RUNNING, since ++ * the task is still ->on_rq. ++ */ ++static int ttwu_remote(struct task_struct *p, int wake_flags) ++{ ++ struct rq *rq; ++ int ret = 0; ++ ++ rq = __task_rq_lock(p, NULL); ++ if (likely(task_on_rq_queued(p))) { ++ ttwu_do_wakeup(rq, p, wake_flags); ++ ret = 1; ++ } ++ __task_rq_unlock(rq, NULL); ++ ++ return ret; ++} ++ ++#ifdef CONFIG_SMP ++void sched_ttwu_pending(void) ++{ ++ struct rq *rq = this_rq(); ++ struct llist_node *llist = llist_del_all(&rq->wake_list); ++ struct task_struct *p, *t; ++ struct rq_flags rf; ++ ++ if (!llist) ++ return; ++ ++ rq_lock_irqsave(rq, &rf); ++ ++ llist_for_each_entry_safe(p, t, llist, wake_entry) ++ ttwu_do_activate(rq, p, 0); ++ ++ rq_unlock_irqrestore(rq, &rf); ++} ++ ++void scheduler_ipi(void) ++{ ++ /* ++ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting ++ * TIF_NEED_RESCHED remotely (for the first time) will also send ++ * this IPI. ++ */ ++ preempt_fold_need_resched(); ++ ++ if (llist_empty(&this_rq()->wake_list) && (!idle_cpu(smp_processor_id()) || need_resched())) ++ return; ++ ++ /* ++ * Not all reschedule IPI handlers call irq_enter/irq_exit, since ++ * traditionally all their work was done from the interrupt return ++ * path. Now that we actually do some work, we need to make sure ++ * we do call them. ++ * ++ * Some archs already do call them, luckily irq_enter/exit nest ++ * properly. ++ * ++ * Arguably we should visit all archs and update all handlers, ++ * however a fair share of IPIs are still resched only so this would ++ * somewhat pessimize the simple resched case. ++ */ ++ irq_enter(); ++ sched_ttwu_pending(); ++ irq_exit(); ++} ++ ++static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { ++ if (!set_nr_if_polling(rq->idle)) ++ smp_sched_reschedule(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++ } ++} ++ ++void wake_up_if_idle(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ struct rq_flags rf; ++ ++ rcu_read_lock(); ++ ++ if (!is_idle_task(rcu_dereference(rq->curr))) ++ goto out; ++ ++ if (set_nr_if_polling(rq->idle)) { ++ trace_sched_wake_idle_without_ipi(cpu); ++ } else { ++ rq_lock_irqsave(rq, &rf); ++ if (likely(is_idle_task(rq->curr))) ++ smp_sched_reschedule(cpu); ++ /* Else cpu is not in idle, do nothing here */ ++ rq_unlock_irqrestore(rq, &rf); ++ } ++ ++out: ++ rcu_read_unlock(); ++} ++ ++static int valid_task_cpu(struct task_struct *p) ++{ ++ cpumask_t valid_mask; ++ ++ if (p->flags & PF_KTHREAD) ++ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_all_mask); ++ else ++ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_active_mask); ++ ++ if (unlikely(!cpumask_weight(&valid_mask))) { ++ /* We shouldn't be hitting this any more */ ++ printk(KERN_WARNING "SCHED: No cpumask for %s/%d weight %d\n", p->comm, ++ p->pid, cpumask_weight(&p->cpus_allowed)); ++ return cpumask_any(&p->cpus_allowed); ++ } ++ return cpumask_any(&valid_mask); ++} ++ ++/* ++ * For a task that's just being woken up we have a valuable balancing ++ * opportunity so choose the nearest cache most lightly loaded runqueue. ++ * Entered with rq locked and returns with the chosen runqueue locked. ++ */ ++static inline int select_best_cpu(struct task_struct *p) ++{ ++ unsigned int idlest = ~0U; ++ struct rq *rq = NULL; ++ int i; ++ ++ if (suitable_idle_cpus(p)) { ++ int cpu = task_cpu(p); ++ ++ if (unlikely(needs_other_cpu(p, cpu))) ++ cpu = valid_task_cpu(p); ++ rq = resched_best_idle(p, cpu); ++ if (likely(rq)) ++ return rq->cpu; ++ } ++ ++ for (i = 0; i < num_possible_cpus(); i++) { ++ struct rq *other_rq = task_rq(p)->cpu_order[i]; ++ int entries; ++ ++ if (!other_rq->online) ++ continue; ++ if (needs_other_cpu(p, other_rq->cpu)) ++ continue; ++ entries = rq_load(other_rq); ++ if (entries >= idlest) ++ continue; ++ idlest = entries; ++ rq = other_rq; ++ } ++ if (unlikely(!rq)) ++ return task_cpu(p); ++ return rq->cpu; ++} ++#else /* CONFIG_SMP */ ++static int valid_task_cpu(struct task_struct *p) ++{ ++ return 0; ++} ++ ++static inline int select_best_cpu(struct task_struct *p) ++{ ++ return 0; ++} ++ ++static struct rq *resched_best_idle(struct task_struct *p, int cpu) ++{ ++ return NULL; ++} ++#endif /* CONFIG_SMP */ ++ ++static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++#if defined(CONFIG_SMP) ++ if (!cpus_share_cache(smp_processor_id(), cpu)) { ++ sched_clock_cpu(cpu); /* Sync clocks across CPUs */ ++ ttwu_queue_remote(p, cpu, wake_flags); ++ return; ++ } ++#endif ++ rq_lock(rq); ++ ttwu_do_activate(rq, p, wake_flags); ++ rq_unlock(rq); ++} ++ ++/*** ++ * try_to_wake_up - wake up a thread ++ * @p: the thread to be awakened ++ * @state: the mask of task states that can be woken ++ * @wake_flags: wake modifier flags (WF_*) ++ * ++ * Put it on the run-queue if it's not already there. The "current" ++ * thread is always on the run-queue (except when the actual ++ * re-schedule is in progress), and as such you're allowed to do ++ * the simpler "current->state = TASK_RUNNING" to mark yourself ++ * runnable without the overhead of this. ++ * ++ * Return: %true if @p was woken up, %false if it was already running. ++ * or @state didn't match @p's state. ++ */ ++static int ++try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) ++{ ++ unsigned long flags; ++ int cpu, success = 0; ++ ++ /* ++ * If we are going to wake up a thread waiting for CONDITION we ++ * need to ensure that CONDITION=1 done by the caller can not be ++ * reordered with p->state check below. This pairs with mb() in ++ * set_current_state() the waiting thread does. ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ smp_mb__after_spinlock(); ++ /* state is a volatile long, どうして、分からない */ ++ if (!((unsigned int)p->state & state)) ++ goto out; ++ ++ trace_sched_waking(p); ++ ++ /* We're going to change ->state: */ ++ success = 1; ++ cpu = task_cpu(p); ++ ++ /* ++ * Ensure we load p->on_rq _after_ p->state, otherwise it would ++ * be possible to, falsely, observe p->on_rq == 0 and get stuck ++ * in smp_cond_load_acquire() below. ++ * ++ * sched_ttwu_pending() try_to_wake_up() ++ * STORE p->on_rq = 1 LOAD p->state ++ * UNLOCK rq->lock ++ * ++ * __schedule() (switch to task 'p') ++ * LOCK rq->lock smp_rmb(); ++ * smp_mb__after_spinlock(); ++ * UNLOCK rq->lock ++ * ++ * [task p] ++ * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq ++ * ++ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in ++ * __schedule(). See the comment for smp_mb__after_spinlock(). ++ */ ++ smp_rmb(); ++ if (p->on_rq && ttwu_remote(p, wake_flags)) ++ goto stat; ++ ++#ifdef CONFIG_SMP ++ /* ++ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be ++ * possible to, falsely, observe p->on_cpu == 0. ++ * ++ * One must be running (->on_cpu == 1) in order to remove oneself ++ * from the runqueue. ++ * ++ * __schedule() (switch to task 'p') try_to_wake_up() ++ * STORE p->on_cpu = 1 LOAD p->on_rq ++ * UNLOCK rq->lock ++ * ++ * __schedule() (put 'p' to sleep) ++ * LOCK rq->lock smp_rmb(); ++ * smp_mb__after_spinlock(); ++ * STORE p->on_rq = 0 LOAD p->on_cpu ++ * ++ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in ++ * __schedule(). See the comment for smp_mb__after_spinlock(). ++ */ ++ smp_rmb(); ++ ++ /* ++ * If the owning (remote) CPU is still in the middle of schedule() with ++ * this task as prev, wait until its done referencing the task. ++ * ++ * Pairs with the smp_store_release() in finish_task(). ++ * ++ * This ensures that tasks getting woken will be fully ordered against ++ * their previous state and preserve Program Order. ++ */ ++ smp_cond_load_acquire(&p->on_cpu, !VAL); ++ ++ p->sched_contributes_to_load = !!task_contributes_to_load(p); ++ p->state = TASK_WAKING; ++ ++ if (p->in_iowait) { ++ delayacct_blkio_end(p); ++ atomic_dec(&task_rq(p)->nr_iowait); ++ } ++ ++ cpu = select_best_cpu(p); ++ if (task_cpu(p) != cpu) { ++ wake_flags |= WF_MIGRATED; ++ psi_ttwu_dequeue(p); ++ set_task_cpu(p, cpu); ++ } ++ ++#else /* CONFIG_SMP */ ++ ++ if (p->in_iowait) { ++ delayacct_blkio_end(p); ++ atomic_dec(&task_rq(p)->nr_iowait); ++ } ++ ++#endif /* CONFIG_SMP */ ++ ++ ttwu_queue(p, cpu, wake_flags); ++stat: ++ ttwu_stat(p, cpu, wake_flags); ++out: ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++ return success; ++} ++ ++/** ++ * try_to_wake_up_local - try to wake up a local task with rq lock held ++ * @p: the thread to be awakened ++ * ++ * Put @p on the run-queue if it's not already there. The caller must ++ * ensure that rq is locked and, @p is not the current task. ++ * rq stays locked over invocation. ++ */ ++static void try_to_wake_up_local(struct task_struct *p) ++{ ++ struct rq *rq = task_rq(p); ++ ++ if (WARN_ON_ONCE(rq != this_rq()) || ++ WARN_ON_ONCE(p == current)) ++ return; ++ ++ lockdep_assert_held(rq->lock); ++ ++ if (!raw_spin_trylock(&p->pi_lock)) { ++ /* ++ * This is OK, because current is on_cpu, which avoids it being ++ * picked for load-balance and preemption/IRQs are still ++ * disabled avoiding further scheduler activity on it and we've ++ * not yet picked a replacement task. ++ */ ++ rq_unlock(rq); ++ raw_spin_lock(&p->pi_lock); ++ rq_lock(rq); ++ } ++ ++ if (!(p->state & TASK_NORMAL)) ++ goto out; ++ ++ trace_sched_waking(p); ++ ++ if (!task_on_rq_queued(p)) { ++ if (p->in_iowait) { ++ delayacct_blkio_end(p); ++ atomic_dec(&rq->nr_iowait); ++ } ++ ttwu_activate(rq, p, ENQUEUE_WAKEUP); ++ } ++ ++ ttwu_do_wakeup(rq, p, 0); ++ ttwu_stat(p, smp_processor_id(), 0); ++out: ++ raw_spin_unlock(&p->pi_lock); ++} ++ ++/** ++ * wake_up_process - Wake up a specific process ++ * @p: The process to be woken up. ++ * ++ * Attempt to wake up the nominated process and move it to the set of runnable ++ * processes. ++ * ++ * Return: 1 if the process was woken up, 0 if it was already running. ++ * ++ * This function executes a full memory barrier before accessing the task state. ++ */ ++int wake_up_process(struct task_struct *p) ++{ ++ return try_to_wake_up(p, TASK_NORMAL, 0); ++} ++EXPORT_SYMBOL(wake_up_process); ++ ++int wake_up_state(struct task_struct *p, unsigned int state) ++{ ++ return try_to_wake_up(p, state, 0); ++} ++ ++static void time_slice_expired(struct task_struct *p, struct rq *rq); ++ ++/* ++ * Perform scheduler related setup for a newly forked process p. ++ * p is forked by current. ++ */ ++int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p) ++{ ++ unsigned long flags; ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ INIT_HLIST_HEAD(&p->preempt_notifiers); ++#endif ++ ++#ifdef CONFIG_COMPACTION ++ p->capture_control = NULL; ++#endif ++ ++ /* ++ * We mark the process as NEW here. This guarantees that ++ * nobody will actually run it, and a signal or other external ++ * event cannot wake it up and insert it on the runqueue either. ++ */ ++ p->state = TASK_NEW; ++ ++ /* ++ * The process state is set to the same value of the process executing ++ * do_fork() code. That is running. This guarantees that nobody will ++ * actually run it, and a signal or other external event cannot wake ++ * it up and insert it on the runqueue either. ++ */ ++ ++ /* Should be reset in fork.c but done here for ease of MuQSS patching */ ++ p->on_cpu = ++ p->on_rq = ++ p->utime = ++ p->stime = ++ p->sched_time = ++ p->stime_ns = ++ p->utime_ns = 0; ++ skiplist_node_init(&p->node); ++ ++ /* ++ * Revert to default priority/policy on fork if requested. ++ */ ++ if (unlikely(p->sched_reset_on_fork)) { ++ if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { ++ p->policy = SCHED_NORMAL; ++ p->normal_prio = normal_prio(p); ++ } ++ ++ if (PRIO_TO_NICE(p->static_prio) < 0) { ++ p->static_prio = NICE_TO_PRIO(0); ++ p->normal_prio = p->static_prio; ++ } ++ ++ /* ++ * We don't need the reset flag anymore after the fork. It has ++ * fulfilled its duty: ++ */ ++ p->sched_reset_on_fork = 0; ++ } ++ ++ /* ++ * Silence PROVE_RCU. ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ set_task_cpu(p, smp_processor_id()); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++#ifdef CONFIG_SCHED_INFO ++ if (unlikely(sched_info_on())) ++ memset(&p->sched_info, 0, sizeof(p->sched_info)); ++#endif ++ init_task_preempt_count(p); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_SCHEDSTATS ++ ++DEFINE_STATIC_KEY_FALSE(sched_schedstats); ++static bool __initdata __sched_schedstats = false; ++ ++static void set_schedstats(bool enabled) ++{ ++ if (enabled) ++ static_branch_enable(&sched_schedstats); ++ else ++ static_branch_disable(&sched_schedstats); ++} ++ ++void force_schedstat_enabled(void) ++{ ++ if (!schedstat_enabled()) { ++ pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); ++ static_branch_enable(&sched_schedstats); ++ } ++} ++ ++static int __init setup_schedstats(char *str) ++{ ++ int ret = 0; ++ if (!str) ++ goto out; ++ ++ /* ++ * This code is called before jump labels have been set up, so we can't ++ * change the static branch directly just yet. Instead set a temporary ++ * variable so init_schedstats() can do it later. ++ */ ++ if (!strcmp(str, "enable")) { ++ __sched_schedstats = true; ++ ret = 1; ++ } else if (!strcmp(str, "disable")) { ++ __sched_schedstats = false; ++ ret = 1; ++ } ++out: ++ if (!ret) ++ pr_warn("Unable to parse schedstats=\n"); ++ ++ return ret; ++} ++__setup("schedstats=", setup_schedstats); ++ ++static void __init init_schedstats(void) ++{ ++ set_schedstats(__sched_schedstats); ++} ++ ++#ifdef CONFIG_PROC_SYSCTL ++int sysctl_schedstats(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos) ++{ ++ struct ctl_table t; ++ int err; ++ int state = static_branch_likely(&sched_schedstats); ++ ++ if (write && !capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ t = *table; ++ t.data = &state; ++ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); ++ if (err < 0) ++ return err; ++ if (write) ++ set_schedstats(state); ++ return err; ++} ++#endif /* CONFIG_PROC_SYSCTL */ ++#else /* !CONFIG_SCHEDSTATS */ ++static inline void init_schedstats(void) {} ++#endif /* CONFIG_SCHEDSTATS */ ++ ++static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p); ++ ++static void account_task_cpu(struct rq *rq, struct task_struct *p) ++{ ++ update_clocks(rq); ++ /* This isn't really a context switch but accounting is the same */ ++ update_cpu_clock_switch(rq, p); ++ p->last_ran = rq->niffies; ++} ++ ++bool sched_smp_initialized __read_mostly; ++ ++static inline int hrexpiry_enabled(struct rq *rq) ++{ ++ if (unlikely(!cpu_active(cpu_of(rq)) || !sched_smp_initialized)) ++ return 0; ++ return hrtimer_is_hres_active(&rq->hrexpiry_timer); ++} ++ ++/* ++ * Use HR-timers to deliver accurate preemption points. ++ */ ++static inline void hrexpiry_clear(struct rq *rq) ++{ ++ if (!hrexpiry_enabled(rq)) ++ return; ++ if (hrtimer_active(&rq->hrexpiry_timer)) ++ hrtimer_cancel(&rq->hrexpiry_timer); ++} ++ ++/* ++ * High-resolution time_slice expiry. ++ * Runs from hardirq context with interrupts disabled. ++ */ ++static enum hrtimer_restart hrexpiry(struct hrtimer *timer) ++{ ++ struct rq *rq = container_of(timer, struct rq, hrexpiry_timer); ++ struct task_struct *p; ++ ++ /* This can happen during CPU hotplug / resume */ ++ if (unlikely(cpu_of(rq) != smp_processor_id())) ++ goto out; ++ ++ /* ++ * We're doing this without the runqueue lock but this should always ++ * be run on the local CPU. Time slice should run out in __schedule ++ * but we set it to zero here in case niffies is slightly less. ++ */ ++ p = rq->curr; ++ p->time_slice = 0; ++ __set_tsk_resched(p); ++out: ++ return HRTIMER_NORESTART; ++} ++ ++/* ++ * Called to set the hrexpiry timer state. ++ * ++ * called with irqs disabled from the local CPU only ++ */ ++static void hrexpiry_start(struct rq *rq, u64 delay) ++{ ++ if (!hrexpiry_enabled(rq)) ++ return; ++ ++ hrtimer_start(&rq->hrexpiry_timer, ns_to_ktime(delay), ++ HRTIMER_MODE_REL_PINNED); ++} ++ ++static void init_rq_hrexpiry(struct rq *rq) ++{ ++ hrtimer_init(&rq->hrexpiry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ rq->hrexpiry_timer.function = hrexpiry; ++} ++ ++static inline int rq_dither(struct rq *rq) ++{ ++ if (!hrexpiry_enabled(rq)) ++ return HALF_JIFFY_US; ++ return 0; ++} ++ ++/* ++ * wake_up_new_task - wake up a newly created task for the first time. ++ * ++ * This function will do some initial scheduler statistics housekeeping ++ * that must be done for every newly created context, then puts the task ++ * on the runqueue and wakes it. ++ */ ++void wake_up_new_task(struct task_struct *p) ++{ ++ struct task_struct *parent, *rq_curr; ++ struct rq *rq, *new_rq; ++ unsigned long flags; ++ ++ parent = p->parent; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ p->state = TASK_RUNNING; ++ /* Task_rq can't change yet on a new task */ ++ new_rq = rq = task_rq(p); ++ if (unlikely(needs_other_cpu(p, task_cpu(p)))) { ++ set_task_cpu(p, valid_task_cpu(p)); ++ new_rq = task_rq(p); ++ } ++ ++ double_rq_lock(rq, new_rq); ++ rq_curr = rq->curr; ++ ++ /* ++ * Make sure we do not leak PI boosting priority to the child. ++ */ ++ p->prio = rq_curr->normal_prio; ++ ++ trace_sched_wakeup_new(p); ++ ++ /* ++ * Share the timeslice between parent and child, thus the ++ * total amount of pending timeslices in the system doesn't change, ++ * resulting in more scheduling fairness. If it's negative, it won't ++ * matter since that's the same as being 0. rq->rq_deadline is only ++ * modified within schedule() so it is always equal to ++ * current->deadline. ++ */ ++ account_task_cpu(rq, rq_curr); ++ p->last_ran = rq_curr->last_ran; ++ if (likely(rq_curr->policy != SCHED_FIFO)) { ++ rq_curr->time_slice /= 2; ++ if (rq_curr->time_slice < RESCHED_US) { ++ /* ++ * Forking task has run out of timeslice. Reschedule it and ++ * start its child with a new time slice and deadline. The ++ * child will end up running first because its deadline will ++ * be slightly earlier. ++ */ ++ __set_tsk_resched(rq_curr); ++ time_slice_expired(p, new_rq); ++ if (suitable_idle_cpus(p)) ++ resched_best_idle(p, task_cpu(p)); ++ else if (unlikely(rq != new_rq)) ++ try_preempt(p, new_rq); ++ } else { ++ p->time_slice = rq_curr->time_slice; ++ if (rq_curr == parent && rq == new_rq && !suitable_idle_cpus(p)) { ++ /* ++ * The VM isn't cloned, so we're in a good position to ++ * do child-runs-first in anticipation of an exec. This ++ * usually avoids a lot of COW overhead. ++ */ ++ __set_tsk_resched(rq_curr); ++ } else { ++ /* ++ * Adjust the hrexpiry since rq_curr will keep ++ * running and its timeslice has been shortened. ++ */ ++ hrexpiry_start(rq, US_TO_NS(rq_curr->time_slice)); ++ try_preempt(p, new_rq); ++ } ++ } ++ } else { ++ time_slice_expired(p, new_rq); ++ try_preempt(p, new_rq); ++ } ++ activate_task(p, new_rq, 0); ++ double_rq_unlock(rq, new_rq); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++} ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ ++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); ++ ++void preempt_notifier_inc(void) ++{ ++ static_branch_inc(&preempt_notifier_key); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_inc); ++ ++void preempt_notifier_dec(void) ++{ ++ static_branch_dec(&preempt_notifier_key); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_dec); ++ ++/** ++ * preempt_notifier_register - tell me when current is being preempted & rescheduled ++ * @notifier: notifier struct to register ++ */ ++void preempt_notifier_register(struct preempt_notifier *notifier) ++{ ++ if (!static_branch_unlikely(&preempt_notifier_key)) ++ WARN(1, "registering preempt_notifier while notifiers disabled\n"); ++ ++ hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_register); ++ ++/** ++ * preempt_notifier_unregister - no longer interested in preemption notifications ++ * @notifier: notifier struct to unregister ++ * ++ * This is *not* safe to call from within a preemption notifier. ++ */ ++void preempt_notifier_unregister(struct preempt_notifier *notifier) ++{ ++ hlist_del(¬ifier->link); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_unregister); ++ ++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_in(notifier, raw_smp_processor_id()); ++} ++ ++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ if (static_branch_unlikely(&preempt_notifier_key)) ++ __fire_sched_in_preempt_notifiers(curr); ++} ++ ++static void ++__fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_out(notifier, next); ++} ++ ++static __always_inline void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ if (static_branch_unlikely(&preempt_notifier_key)) ++ __fire_sched_out_preempt_notifiers(curr, next); ++} ++ ++#else /* !CONFIG_PREEMPT_NOTIFIERS */ ++ ++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++} ++ ++static inline void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++} ++ ++#endif /* CONFIG_PREEMPT_NOTIFIERS */ ++ ++static inline void prepare_task(struct task_struct *next) ++{ ++ /* ++ * Claim the task as running, we do this before switching to it ++ * such that any running task will have this set. ++ */ ++ next->on_cpu = 1; ++} ++ ++static inline void finish_task(struct task_struct *prev) ++{ ++#ifdef CONFIG_SMP ++ /* ++ * After ->on_cpu is cleared, the task can be moved to a different CPU. ++ * We must ensure this doesn't happen until the switch is completely ++ * finished. ++ * ++ * In particular, the load of prev->state in finish_task_switch() must ++ * happen before this. ++ * ++ * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). ++ */ ++ smp_store_release(&prev->on_cpu, 0); ++#endif ++} ++ ++static inline void ++prepare_lock_switch(struct rq *rq, struct task_struct *next) ++{ ++ /* ++ * Since the runqueue lock will be released by the next ++ * task (which is an invalid locking op but in the case ++ * of the scheduler it's an obvious special-case), so we ++ * do an early lockdep release here: ++ */ ++ spin_release(&rq->lock->dep_map, 1, _THIS_IP_); ++#ifdef CONFIG_DEBUG_SPINLOCK ++ /* this is a valid case when another task releases the spinlock */ ++ rq->lock->owner = next; ++#endif ++} ++ ++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ++{ ++ /* ++ * If we are tracking spinlock dependencies then we have to ++ * fix up the runqueue lock - which gets 'carried over' from ++ * prev into current: ++ */ ++ spin_acquire(&rq->lock->dep_map, 0, 0, _THIS_IP_); ++ ++#ifdef CONFIG_SMP ++ /* ++ * If prev was marked as migrating to another CPU in return_task, drop ++ * the local runqueue lock but leave interrupts disabled and grab the ++ * remote lock we're migrating it to before enabling them. ++ */ ++ if (unlikely(task_on_rq_migrating(prev))) { ++ sched_info_dequeued(rq, prev); ++ /* ++ * We move the ownership of prev to the new cpu now. ttwu can't ++ * activate prev to the wrong cpu since it has to grab this ++ * runqueue in ttwu_remote. ++ */ ++#ifdef CONFIG_THREAD_INFO_IN_TASK ++ prev->cpu = prev->wake_cpu; ++#else ++ task_thread_info(prev)->cpu = prev->wake_cpu; ++#endif ++ raw_spin_unlock(rq->lock); ++ ++ raw_spin_lock(&prev->pi_lock); ++ rq = __task_rq_lock(prev, NULL); ++ /* Check that someone else hasn't already queued prev */ ++ if (likely(!task_queued(prev))) { ++ enqueue_task(rq, prev, 0); ++ prev->on_rq = TASK_ON_RQ_QUEUED; ++ /* Wake up the CPU if it's not already running */ ++ resched_if_idle(rq); ++ } ++ raw_spin_unlock(&prev->pi_lock); ++ } ++#endif ++ rq_unlock(rq); ++ ++ do_pending_softirq(rq, current); ++ ++ local_irq_enable(); ++} ++ ++#ifndef prepare_arch_switch ++# define prepare_arch_switch(next) do { } while (0) ++#endif ++#ifndef finish_arch_switch ++# define finish_arch_switch(prev) do { } while (0) ++#endif ++#ifndef finish_arch_post_lock_switch ++# define finish_arch_post_lock_switch() do { } while (0) ++#endif ++ ++/** ++ * prepare_task_switch - prepare to switch tasks ++ * @rq: the runqueue preparing to switch ++ * @next: the task we are going to switch to. ++ * ++ * This is called with the rq lock held and interrupts off. It must ++ * be paired with a subsequent finish_task_switch after the context ++ * switch. ++ * ++ * prepare_task_switch sets up locking and calls architecture specific ++ * hooks. ++ */ ++static inline void ++prepare_task_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ kcov_prepare_switch(prev); ++ sched_info_switch(rq, prev, next); ++ perf_event_task_sched_out(prev, next); ++ rseq_preempt(prev); ++ fire_sched_out_preempt_notifiers(prev, next); ++ prepare_task(next); ++ prepare_arch_switch(next); ++} ++ ++/** ++ * finish_task_switch - clean up after a task-switch ++ * @rq: runqueue associated with task-switch ++ * @prev: the thread we just switched away from. ++ * ++ * finish_task_switch must be called after the context switch, paired ++ * with a prepare_task_switch call before the context switch. ++ * finish_task_switch will reconcile locking set up by prepare_task_switch, ++ * and do any other architecture-specific cleanup actions. ++ * ++ * Note that we may have delayed dropping an mm in context_switch(). If ++ * so, we finish that here outside of the runqueue lock. (Doing it ++ * with the lock held can cause deadlocks; see schedule() for ++ * details.) ++ * ++ * The context switch have flipped the stack from under us and restored the ++ * local variables which were saved when this task called schedule() in the ++ * past. prev == current is still correct but we need to recalculate this_rq ++ * because prev may have moved to another CPU. ++ */ ++static void finish_task_switch(struct task_struct *prev) ++ __releases(rq->lock) ++{ ++ struct rq *rq = this_rq(); ++ struct mm_struct *mm = rq->prev_mm; ++ long prev_state; ++ ++ /* ++ * The previous task will have left us with a preempt_count of 2 ++ * because it left us after: ++ * ++ * schedule() ++ * preempt_disable(); // 1 ++ * __schedule() ++ * raw_spin_lock_irq(rq->lock) // 2 ++ * ++ * Also, see FORK_PREEMPT_COUNT. ++ */ ++ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, ++ "corrupted preempt_count: %s/%d/0x%x\n", ++ current->comm, current->pid, preempt_count())) ++ preempt_count_set(FORK_PREEMPT_COUNT); ++ ++ rq->prev_mm = NULL; ++ ++ /* ++ * A task struct has one reference for the use as "current". ++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls ++ * schedule one last time. The schedule call will never return, and ++ * the scheduled task must drop that reference. ++ * ++ * We must observe prev->state before clearing prev->on_cpu (in ++ * finish_task), otherwise a concurrent wakeup can get prev ++ * running on another CPU and we could rave with its RUNNING -> DEAD ++ * transition, resulting in a double drop. ++ */ ++ prev_state = prev->state; ++ vtime_task_switch(prev); ++ perf_event_task_sched_in(prev, current); ++ finish_task(prev); ++ finish_lock_switch(rq, prev); ++ finish_arch_post_lock_switch(); ++ kcov_finish_switch(current); ++ ++ fire_sched_in_preempt_notifiers(current); ++ /* ++ * When switching through a kernel thread, the loop in ++ * membarrier_{private,global}_expedited() may have observed that ++ * kernel thread and not issued an IPI. It is therefore possible to ++ * schedule between user->kernel->user threads without passing though ++ * switch_mm(). Membarrier requires a barrier after storing to ++ * rq->curr, before returning to userspace, so provide them here: ++ * ++ * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly ++ * provided by mmdrop(), ++ * - a sync_core for SYNC_CORE. ++ */ ++ if (mm) { ++ membarrier_mm_sync_core_before_usermode(mm); ++ mmdrop(mm); ++ } ++ if (unlikely(prev_state == TASK_DEAD)) { ++ /* ++ * Remove function-return probe instances associated with this ++ * task and put them back on the free list. ++ */ ++ kprobe_flush_task(prev); ++ ++ /* Task is done with its stack. */ ++ put_task_stack(prev); ++ ++ put_task_struct(prev); ++ } ++} ++ ++/** ++ * schedule_tail - first thing a freshly forked thread must call. ++ * @prev: the thread we just switched away from. ++ */ ++asmlinkage __visible void schedule_tail(struct task_struct *prev) ++{ ++ /* ++ * New tasks start with FORK_PREEMPT_COUNT, see there and ++ * finish_task_switch() for details. ++ * ++ * finish_task_switch() will drop rq->lock() and lower preempt_count ++ * and the preempt_enable() will end up enabling preemption (on ++ * PREEMPT_COUNT kernels). ++ */ ++ ++ finish_task_switch(prev); ++ preempt_enable(); ++ ++ if (current->set_child_tid) ++ put_user(task_pid_vnr(current), current->set_child_tid); ++ ++ calculate_sigpending(); ++} ++ ++/* ++ * context_switch - switch to the new MM and the new thread's register state. ++ */ ++static __always_inline void ++context_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ struct mm_struct *mm, *oldmm; ++ ++ prepare_task_switch(rq, prev, next); ++ ++ mm = next->mm; ++ oldmm = prev->active_mm; ++ /* ++ * For paravirt, this is coupled with an exit in switch_to to ++ * combine the page table reload and the switch backend into ++ * one hypercall. ++ */ ++ arch_start_context_switch(prev); ++ ++ /* ++ * If mm is non-NULL, we pass through switch_mm(). If mm is ++ * NULL, we will pass through mmdrop() in finish_task_switch(). ++ * Both of these contain the full memory barrier required by ++ * membarrier after storing to rq->curr, before returning to ++ * user-space. ++ */ ++ if (!mm) { ++ next->active_mm = oldmm; ++ mmgrab(oldmm); ++ enter_lazy_tlb(oldmm, next); ++ } else ++ switch_mm_irqs_off(oldmm, mm, next); ++ ++ if (!prev->mm) { ++ prev->active_mm = NULL; ++ rq->prev_mm = oldmm; ++ } ++ prepare_lock_switch(rq, next); ++ ++ /* Here we just switch the register state and the stack. */ ++ switch_to(prev, next, prev); ++ barrier(); ++ ++ finish_task_switch(prev); ++} ++ ++/* ++ * nr_running, nr_uninterruptible and nr_context_switches: ++ * ++ * externally visible scheduler statistics: current number of runnable ++ * threads, total number of context switches performed since bootup. ++ */ ++unsigned long nr_running(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_online_cpu(i) ++ sum += cpu_rq(i)->nr_running; ++ ++ return sum; ++} ++ ++static unsigned long nr_uninterruptible(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_online_cpu(i) ++ sum += cpu_rq(i)->nr_uninterruptible; ++ ++ return sum; ++} ++ ++/* ++ * Check if only the current task is running on the CPU. ++ * ++ * Caution: this function does not check that the caller has disabled ++ * preemption, thus the result might have a time-of-check-to-time-of-use ++ * race. The caller is responsible to use it correctly, for example: ++ * ++ * - from a non-preemptible section (of course) ++ * ++ * - from a thread that is bound to a single CPU ++ * ++ * - in a loop with very short iterations (e.g. a polling loop) ++ */ ++bool single_task_running(void) ++{ ++ struct rq *rq = cpu_rq(smp_processor_id()); ++ ++ if (rq_load(rq) == 1) ++ return true; ++ else ++ return false; ++} ++EXPORT_SYMBOL(single_task_running); ++ ++unsigned long long nr_context_switches(void) ++{ ++ int i; ++ unsigned long long sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += cpu_rq(i)->nr_switches; ++ ++ return sum; ++} ++ ++/* ++ * Consumers of these two interfaces, like for example the cpufreq menu ++ * governor are using nonsensical data. Boosting frequency for a CPU that has ++ * IO-wait which might not even end up running the task when it does become ++ * runnable. ++ */ ++ ++unsigned long nr_iowait_cpu(int cpu) ++{ ++ return atomic_read(&cpu_rq(cpu)->nr_iowait); ++} ++ ++/* ++ * IO-wait accounting, and how its mostly bollocks (on SMP). ++ * ++ * The idea behind IO-wait account is to account the idle time that we could ++ * have spend running if it were not for IO. That is, if we were to improve the ++ * storage performance, we'd have a proportional reduction in IO-wait time. ++ * ++ * This all works nicely on UP, where, when a task blocks on IO, we account ++ * idle time as IO-wait, because if the storage were faster, it could've been ++ * running and we'd not be idle. ++ * ++ * This has been extended to SMP, by doing the same for each CPU. This however ++ * is broken. ++ * ++ * Imagine for instance the case where two tasks block on one CPU, only the one ++ * CPU will have IO-wait accounted, while the other has regular idle. Even ++ * though, if the storage were faster, both could've ran at the same time, ++ * utilising both CPUs. ++ * ++ * This means, that when looking globally, the current IO-wait accounting on ++ * SMP is a lower bound, by reason of under accounting. ++ * ++ * Worse, since the numbers are provided per CPU, they are sometimes ++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly ++ * associated with any one particular CPU, it can wake to another CPU than it ++ * blocked on. This means the per CPU IO-wait number is meaningless. ++ * ++ * Task CPU affinities can make all that even more 'interesting'. ++ */ ++ ++unsigned long nr_iowait(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += nr_iowait_cpu(i); ++ ++ return sum; ++} ++ ++unsigned long nr_active(void) ++{ ++ return nr_running() + nr_uninterruptible(); ++} ++ ++/* Variables and functions for calc_load */ ++static unsigned long calc_load_update; ++unsigned long avenrun[3]; ++EXPORT_SYMBOL(avenrun); ++ ++/** ++ * get_avenrun - get the load average array ++ * @loads: pointer to dest load array ++ * @offset: offset to add ++ * @shift: shift count to shift the result left ++ * ++ * These values are estimates at best, so no need for locking. ++ */ ++void get_avenrun(unsigned long *loads, unsigned long offset, int shift) ++{ ++ loads[0] = (avenrun[0] + offset) << shift; ++ loads[1] = (avenrun[1] + offset) << shift; ++ loads[2] = (avenrun[2] + offset) << shift; ++} ++ ++/* ++ * calc_load - update the avenrun load estimates every LOAD_FREQ seconds. ++ */ ++void calc_global_load(unsigned long ticks) ++{ ++ long active; ++ ++ if (time_before(jiffies, READ_ONCE(calc_load_update))) ++ return; ++ active = nr_active() * FIXED_1; ++ ++ avenrun[0] = calc_load(avenrun[0], EXP_1, active); ++ avenrun[1] = calc_load(avenrun[1], EXP_5, active); ++ avenrun[2] = calc_load(avenrun[2], EXP_15, active); ++ ++ calc_load_update = jiffies + LOAD_FREQ; ++} ++ ++/** ++ * fixed_power_int - compute: x^n, in O(log n) time ++ * ++ * @x: base of the power ++ * @frac_bits: fractional bits of @x ++ * @n: power to raise @x to. ++ * ++ * By exploiting the relation between the definition of the natural power ++ * function: x^n := x*x*...*x (x multiplied by itself for n times), and ++ * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, ++ * (where: n_i \elem {0, 1}, the binary vector representing n), ++ * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is ++ * of course trivially computable in O(log_2 n), the length of our binary ++ * vector. ++ */ ++static unsigned long ++fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) ++{ ++ unsigned long result = 1UL << frac_bits; ++ ++ if (n) { ++ for (;;) { ++ if (n & 1) { ++ result *= x; ++ result += 1UL << (frac_bits - 1); ++ result >>= frac_bits; ++ } ++ n >>= 1; ++ if (!n) ++ break; ++ x *= x; ++ x += 1UL << (frac_bits - 1); ++ x >>= frac_bits; ++ } ++ } ++ ++ return result; ++} ++ ++/* ++ * a1 = a0 * e + a * (1 - e) ++ * ++ * a2 = a1 * e + a * (1 - e) ++ * = (a0 * e + a * (1 - e)) * e + a * (1 - e) ++ * = a0 * e^2 + a * (1 - e) * (1 + e) ++ * ++ * a3 = a2 * e + a * (1 - e) ++ * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) ++ * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) ++ * ++ * ... ++ * ++ * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] ++ * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) ++ * = a0 * e^n + a * (1 - e^n) ++ * ++ * [1] application of the geometric series: ++ * ++ * n 1 - x^(n+1) ++ * S_n := \Sum x^i = ------------- ++ * i=0 1 - x ++ */ ++unsigned long ++calc_load_n(unsigned long load, unsigned long exp, ++ unsigned long active, unsigned int n) ++{ ++ return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); ++} ++ ++DEFINE_PER_CPU(struct kernel_stat, kstat); ++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); ++ ++EXPORT_PER_CPU_SYMBOL(kstat); ++EXPORT_PER_CPU_SYMBOL(kernel_cpustat); ++ ++#ifdef CONFIG_PARAVIRT ++static inline u64 steal_ticks(u64 steal) ++{ ++ if (unlikely(steal > NSEC_PER_SEC)) ++ return div_u64(steal, TICK_NSEC); ++ ++ return __iter_div_u64_rem(steal, TICK_NSEC, &steal); ++} ++#endif ++ ++#ifndef nsecs_to_cputime ++# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) ++#endif ++ ++/* ++ * On each tick, add the number of nanoseconds to the unbanked variables and ++ * once one tick's worth has accumulated, account it allowing for accurate ++ * sub-tick accounting and totals. Use the TICK_APPROX_NS to match the way we ++ * deduct nanoseconds. ++ */ ++static void pc_idle_time(struct rq *rq, struct task_struct *idle, unsigned long ns) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ unsigned long ticks; ++ ++ if (atomic_read(&rq->nr_iowait) > 0) { ++ rq->iowait_ns += ns; ++ if (rq->iowait_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->iowait_ns); ++ cpustat[CPUTIME_IOWAIT] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->iowait_ns %= JIFFY_NS; ++ } ++ } else { ++ rq->idle_ns += ns; ++ if (rq->idle_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->idle_ns); ++ cpustat[CPUTIME_IDLE] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->idle_ns %= JIFFY_NS; ++ } ++ } ++ acct_update_integrals(idle); ++} ++ ++static void pc_system_time(struct rq *rq, struct task_struct *p, ++ int hardirq_offset, unsigned long ns) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ unsigned long ticks; ++ ++ p->stime_ns += ns; ++ if (p->stime_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(p->stime_ns); ++ p->stime_ns %= JIFFY_NS; ++ p->stime += (__force u64)TICK_APPROX_NS * ticks; ++ account_group_system_time(p, TICK_APPROX_NS * ticks); ++ } ++ p->sched_time += ns; ++ account_group_exec_runtime(p, ns); ++ ++ if (hardirq_count() - hardirq_offset) { ++ rq->irq_ns += ns; ++ if (rq->irq_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->irq_ns); ++ cpustat[CPUTIME_IRQ] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->irq_ns %= JIFFY_NS; ++ } ++ } else if (in_serving_softirq()) { ++ rq->softirq_ns += ns; ++ if (rq->softirq_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->softirq_ns); ++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->softirq_ns %= JIFFY_NS; ++ } ++ } else { ++ rq->system_ns += ns; ++ if (rq->system_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->system_ns); ++ cpustat[CPUTIME_SYSTEM] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->system_ns %= JIFFY_NS; ++ } ++ } ++ acct_update_integrals(p); ++} ++ ++static void pc_user_time(struct rq *rq, struct task_struct *p, unsigned long ns) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ unsigned long ticks; ++ ++ p->utime_ns += ns; ++ if (p->utime_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(p->utime_ns); ++ p->utime_ns %= JIFFY_NS; ++ p->utime += (__force u64)TICK_APPROX_NS * ticks; ++ account_group_user_time(p, TICK_APPROX_NS * ticks); ++ } ++ p->sched_time += ns; ++ account_group_exec_runtime(p, ns); ++ ++ if (this_cpu_ksoftirqd() == p) { ++ /* ++ * ksoftirqd time do not get accounted in cpu_softirq_time. ++ * So, we have to handle it separately here. ++ */ ++ rq->softirq_ns += ns; ++ if (rq->softirq_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->softirq_ns); ++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->softirq_ns %= JIFFY_NS; ++ } ++ } ++ ++ if (task_nice(p) > 0 || idleprio_task(p)) { ++ rq->nice_ns += ns; ++ if (rq->nice_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->nice_ns); ++ cpustat[CPUTIME_NICE] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->nice_ns %= JIFFY_NS; ++ } ++ } else { ++ rq->user_ns += ns; ++ if (rq->user_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->user_ns); ++ cpustat[CPUTIME_USER] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->user_ns %= JIFFY_NS; ++ } ++ } ++ acct_update_integrals(p); ++} ++ ++/* ++ * This is called on clock ticks. ++ * Bank in p->sched_time the ns elapsed since the last tick or switch. ++ * CPU scheduler quota accounting is also performed here in microseconds. ++ */ ++static void update_cpu_clock_tick(struct rq *rq, struct task_struct *p) ++{ ++ s64 account_ns = rq->niffies - p->last_ran; ++ struct task_struct *idle = rq->idle; ++ ++ /* Accurate tick timekeeping */ ++ if (user_mode(get_irq_regs())) ++ pc_user_time(rq, p, account_ns); ++ else if (p != idle || (irq_count() != HARDIRQ_OFFSET)) { ++ pc_system_time(rq, p, HARDIRQ_OFFSET, account_ns); ++ } else ++ pc_idle_time(rq, idle, account_ns); ++ ++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */ ++ if (p->policy != SCHED_FIFO && p != idle) ++ p->time_slice -= NS_TO_US(account_ns); ++ ++ p->last_ran = rq->niffies; ++} ++ ++/* ++ * This is called on context switches. ++ * Bank in p->sched_time the ns elapsed since the last tick or switch. ++ * CPU scheduler quota accounting is also performed here in microseconds. ++ */ ++static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p) ++{ ++ s64 account_ns = rq->niffies - p->last_ran; ++ struct task_struct *idle = rq->idle; ++ ++ /* Accurate subtick timekeeping */ ++ if (p != idle) ++ pc_user_time(rq, p, account_ns); ++ else ++ pc_idle_time(rq, idle, account_ns); ++ ++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */ ++ if (p->policy != SCHED_FIFO && p != idle) ++ p->time_slice -= NS_TO_US(account_ns); ++} ++ ++/* ++ * Return any ns on the sched_clock that have not yet been accounted in ++ * @p in case that task is currently running. ++ * ++ * Called with task_rq_lock(p) held. ++ */ ++static inline u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) ++{ ++ u64 ns = 0; ++ ++ /* ++ * Must be ->curr _and_ ->on_rq. If dequeued, we would ++ * project cycles that may never be accounted to this ++ * thread, breaking clock_gettime(). ++ */ ++ if (p == rq->curr && task_on_rq_queued(p)) { ++ update_clocks(rq); ++ ns = rq->niffies - p->last_ran; ++ } ++ ++ return ns; ++} ++ ++/* ++ * Return accounted runtime for the task. ++ * Return separately the current's pending runtime that have not been ++ * accounted yet. ++ * ++ */ ++unsigned long long task_sched_runtime(struct task_struct *p) ++{ ++ struct rq_flags rf; ++ struct rq *rq; ++ u64 ns; ++ ++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) ++ /* ++ * 64-bit doesn't need locks to atomically read a 64-bit value. ++ * So we have a optimisation chance when the task's delta_exec is 0. ++ * Reading ->on_cpu is racy, but this is ok. ++ * ++ * If we race with it leaving CPU, we'll take a lock. So we're correct. ++ * If we race with it entering CPU, unaccounted time is 0. This is ++ * indistinguishable from the read occurring a few cycles earlier. ++ * If we see ->on_cpu without ->on_rq, the task is leaving, and has ++ * been accounted, so we're correct here as well. ++ */ ++ if (!p->on_cpu || !task_on_rq_queued(p)) ++ return tsk_seruntime(p); ++#endif ++ ++ rq = task_rq_lock(p, &rf); ++ ns = p->sched_time + do_task_delta_exec(p, rq); ++ task_rq_unlock(rq, p, &rf); ++ ++ return ns; ++} ++ ++/* ++ * Functions to test for when SCHED_ISO tasks have used their allocated ++ * quota as real time scheduling and convert them back to SCHED_NORMAL. All ++ * data is modified only by the local runqueue during scheduler_tick with ++ * interrupts disabled. ++ */ ++ ++/* ++ * Test if SCHED_ISO tasks have run longer than their alloted period as RT ++ * tasks and set the refractory flag if necessary. There is 10% hysteresis ++ * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a ++ * slow division. ++ */ ++static inline void iso_tick(struct rq *rq) ++{ ++ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - 1) / ISO_PERIOD; ++ rq->iso_ticks += 100; ++ if (rq->iso_ticks > ISO_PERIOD * sched_iso_cpu) { ++ rq->iso_refractory = true; ++ if (unlikely(rq->iso_ticks > ISO_PERIOD * 100)) ++ rq->iso_ticks = ISO_PERIOD * 100; ++ } ++} ++ ++/* No SCHED_ISO task was running so decrease rq->iso_ticks */ ++static inline void no_iso_tick(struct rq *rq, int ticks) ++{ ++ if (rq->iso_ticks > 0 || rq->iso_refractory) { ++ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - ticks) / ISO_PERIOD; ++ if (rq->iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128)) { ++ rq->iso_refractory = false; ++ if (unlikely(rq->iso_ticks < 0)) ++ rq->iso_ticks = 0; ++ } ++ } ++} ++ ++/* This manages tasks that have run out of timeslice during a scheduler_tick */ ++static void task_running_tick(struct rq *rq) ++{ ++ struct task_struct *p = rq->curr; ++ ++ /* ++ * If a SCHED_ISO task is running we increment the iso_ticks. In ++ * order to prevent SCHED_ISO tasks from causing starvation in the ++ * presence of true RT tasks we account those as iso_ticks as well. ++ */ ++ if (rt_task(p) || task_running_iso(p)) ++ iso_tick(rq); ++ else ++ no_iso_tick(rq, 1); ++ ++ /* SCHED_FIFO tasks never run out of timeslice. */ ++ if (p->policy == SCHED_FIFO) ++ return; ++ ++ if (iso_task(p)) { ++ if (task_running_iso(p)) { ++ if (rq->iso_refractory) { ++ /* ++ * SCHED_ISO task is running as RT and limit ++ * has been hit. Force it to reschedule as ++ * SCHED_NORMAL by zeroing its time_slice ++ */ ++ p->time_slice = 0; ++ } ++ } else if (!rq->iso_refractory) { ++ /* Can now run again ISO. Reschedule to pick up prio */ ++ goto out_resched; ++ } ++ } ++ ++ /* ++ * Tasks that were scheduled in the first half of a tick are not ++ * allowed to run into the 2nd half of the next tick if they will ++ * run out of time slice in the interim. Otherwise, if they have ++ * less than RESCHED_US μs of time slice left they will be rescheduled. ++ * Dither is used as a backup for when hrexpiry is disabled or high res ++ * timers not configured in. ++ */ ++ if (p->time_slice - rq->dither >= RESCHED_US) ++ return; ++out_resched: ++ rq_lock(rq); ++ __set_tsk_resched(p); ++ rq_unlock(rq); ++} ++ ++static inline void task_tick(struct rq *rq) ++{ ++ if (!rq_idle(rq)) ++ task_running_tick(rq); ++ else if (rq->last_jiffy > rq->last_scheduler_tick) ++ no_iso_tick(rq, rq->last_jiffy - rq->last_scheduler_tick); ++} ++ ++#ifdef CONFIG_NO_HZ_FULL ++/* ++ * We can stop the timer tick any time highres timers are active since ++ * we rely entirely on highres timeouts for task expiry rescheduling. ++ */ ++static void sched_stop_tick(struct rq *rq, int cpu) ++{ ++ if (!hrexpiry_enabled(rq)) ++ return; ++ if (!tick_nohz_full_enabled()) ++ return; ++ if (!tick_nohz_full_cpu(cpu)) ++ return; ++ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); ++} ++ ++static inline void sched_start_tick(struct rq *rq, int cpu) ++{ ++ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); ++} ++ ++struct tick_work { ++ int cpu; ++ struct delayed_work work; ++}; ++ ++static struct tick_work __percpu *tick_work_cpu; ++ ++static void sched_tick_remote(struct work_struct *work) ++{ ++ struct delayed_work *dwork = to_delayed_work(work); ++ struct tick_work *twork = container_of(dwork, struct tick_work, work); ++ int cpu = twork->cpu; ++ struct rq *rq = cpu_rq(cpu); ++ struct task_struct *curr; ++ u64 delta; ++ ++ /* ++ * Handle the tick only if it appears the remote CPU is running in full ++ * dynticks mode. The check is racy by nature, but missing a tick or ++ * having one too much is no big deal because the scheduler tick updates ++ * statistics and checks timeslices in a time-independent way, regardless ++ * of when exactly it is running. ++ */ ++ if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu)) ++ goto out_requeue; ++ ++ rq_lock_irq(rq); ++ curr = rq->curr; ++ if (is_idle_task(curr)) ++ goto out_unlock; ++ ++ update_rq_clock(rq); ++ delta = rq_clock_task(rq) - curr->last_ran; ++ ++ /* ++ * Make sure the next tick runs within a reasonable ++ * amount of time. ++ */ ++ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); ++ task_tick(rq); ++ ++out_unlock: ++ rq_unlock_irq(rq, NULL); ++ ++out_requeue: ++ /* ++ * Run the remote tick once per second (1Hz). This arbitrary ++ * frequency is large enough to avoid overload but short enough ++ * to keep scheduler internal stats reasonably up to date. ++ */ ++ queue_delayed_work(system_unbound_wq, dwork, HZ); ++} ++ ++static void sched_tick_start(int cpu) ++{ ++ struct tick_work *twork; ++ ++ if (housekeeping_cpu(cpu, HK_FLAG_TICK)) ++ return; ++ ++ WARN_ON_ONCE(!tick_work_cpu); ++ ++ twork = per_cpu_ptr(tick_work_cpu, cpu); ++ twork->cpu = cpu; ++ INIT_DELAYED_WORK(&twork->work, sched_tick_remote); ++ queue_delayed_work(system_unbound_wq, &twork->work, HZ); ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++static void sched_tick_stop(int cpu) ++{ ++ struct tick_work *twork; ++ ++ if (housekeeping_cpu(cpu, HK_FLAG_TICK)) ++ return; ++ ++ WARN_ON_ONCE(!tick_work_cpu); ++ ++ twork = per_cpu_ptr(tick_work_cpu, cpu); ++ cancel_delayed_work_sync(&twork->work); ++} ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++int __init sched_tick_offload_init(void) ++{ ++ tick_work_cpu = alloc_percpu(struct tick_work); ++ BUG_ON(!tick_work_cpu); ++ ++ return 0; ++} ++ ++#else /* !CONFIG_NO_HZ_FULL */ ++static inline void sched_stop_tick(struct rq *rq, int cpu) {} ++static inline void sched_start_tick(struct rq *rq, int cpu) {} ++static inline void sched_tick_start(int cpu) { } ++static inline void sched_tick_stop(int cpu) { } ++#endif ++ ++/* ++ * This function gets called by the timer code, with HZ frequency. ++ * We call it with interrupts disabled. ++ */ ++void scheduler_tick(void) ++{ ++ int cpu __maybe_unused = smp_processor_id(); ++ struct rq *rq = cpu_rq(cpu); ++ ++ sched_clock_tick(); ++ update_clocks(rq); ++ update_load_avg(rq, 0); ++ update_cpu_clock_tick(rq, rq->curr); ++ task_tick(rq); ++ rq->last_scheduler_tick = rq->last_jiffy; ++ rq->last_tick = rq->clock; ++ psi_task_tick(rq); ++ perf_event_task_tick(); ++ sched_stop_tick(rq, cpu); ++} ++ ++#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ ++ defined(CONFIG_TRACE_PREEMPT_TOGGLE)) ++/* ++ * If the value passed in is equal to the current preempt count ++ * then we just disabled preemption. Start timing the latency. ++ */ ++static inline void preempt_latency_start(int val) ++{ ++ if (preempt_count() == val) { ++ unsigned long ip = get_lock_parent_ip(); ++#ifdef CONFIG_DEBUG_PREEMPT ++ current->preempt_disable_ip = ip; ++#endif ++ trace_preempt_off(CALLER_ADDR0, ip); ++ } ++} ++ ++void preempt_count_add(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) ++ return; ++#endif ++ __preempt_count_add(val); ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Spinlock count overflowing soon? ++ */ ++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= ++ PREEMPT_MASK - 10); ++#endif ++ preempt_latency_start(val); ++} ++EXPORT_SYMBOL(preempt_count_add); ++NOKPROBE_SYMBOL(preempt_count_add); ++ ++/* ++ * If the value passed in equals to the current preempt count ++ * then we just enabled preemption. Stop timing the latency. ++ */ ++static inline void preempt_latency_stop(int val) ++{ ++ if (preempt_count() == val) ++ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); ++} ++ ++void preempt_count_sub(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) ++ return; ++ /* ++ * Is the spinlock portion underflowing? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && ++ !(preempt_count() & PREEMPT_MASK))) ++ return; ++#endif ++ ++ preempt_latency_stop(val); ++ __preempt_count_sub(val); ++} ++EXPORT_SYMBOL(preempt_count_sub); ++NOKPROBE_SYMBOL(preempt_count_sub); ++ ++#else ++static inline void preempt_latency_start(int val) { } ++static inline void preempt_latency_stop(int val) { } ++#endif ++ ++static inline unsigned long get_preempt_disable_ip(struct task_struct *p) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ return p->preempt_disable_ip; ++#else ++ return 0; ++#endif ++} ++ ++/* ++ * The time_slice is only refilled when it is empty and that is when we set a ++ * new deadline. Make sure update_clocks has been called recently to update ++ * rq->niffies. ++ */ ++static void time_slice_expired(struct task_struct *p, struct rq *rq) ++{ ++ p->time_slice = timeslice(); ++ p->deadline = rq->niffies + task_deadline_diff(p); ++#ifdef CONFIG_SMT_NICE ++ if (!p->mm) ++ p->smt_bias = 0; ++ else if (rt_task(p)) ++ p->smt_bias = 1 << 30; ++ else if (task_running_iso(p)) ++ p->smt_bias = 1 << 29; ++ else if (idleprio_task(p)) { ++ if (task_running_idle(p)) ++ p->smt_bias = 0; ++ else ++ p->smt_bias = 1; ++ } else if (--p->smt_bias < 1) ++ p->smt_bias = MAX_PRIO - p->static_prio; ++#endif ++} ++ ++/* ++ * Timeslices below RESCHED_US are considered as good as expired as there's no ++ * point rescheduling when there's so little time left. SCHED_BATCH tasks ++ * have been flagged be not latency sensitive and likely to be fully CPU ++ * bound so every time they're rescheduled they have their time_slice ++ * refilled, but get a new later deadline to have little effect on ++ * SCHED_NORMAL tasks. ++ ++ */ ++static inline void check_deadline(struct task_struct *p, struct rq *rq) ++{ ++ if (p->time_slice < RESCHED_US || batch_task(p)) ++ time_slice_expired(p, rq); ++} ++ ++/* ++ * Task selection with skiplists is a simple matter of picking off the first ++ * task in the sorted list, an O(1) operation. The lookup is amortised O(1) ++ * being bound to the number of processors. ++ * ++ * Runqueues are selectively locked based on their unlocked data and then ++ * unlocked if not needed. At most 3 locks will be held at any time and are ++ * released as soon as they're no longer needed. All balancing between CPUs ++ * is thus done here in an extremely simple first come best fit manner. ++ * ++ * This iterates over runqueues in cache locality order. In interactive mode ++ * it iterates over all CPUs and finds the task with the best key/deadline. ++ * In non-interactive mode it will only take a task if it's from the current ++ * runqueue or a runqueue with more tasks than the current one with a better ++ * key/deadline. ++ */ ++#ifdef CONFIG_SMP ++static inline struct task_struct ++*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle) ++{ ++ struct rq *locked = NULL, *chosen = NULL; ++ struct task_struct *edt = idle; ++ int i, best_entries = 0; ++ u64 best_key = ~0ULL; ++ ++ for (i = 0; i < total_runqueues; i++) { ++ struct rq *other_rq = rq_order(rq, i); ++ skiplist_node *next; ++ int entries; ++ ++ entries = other_rq->sl->entries; ++ /* ++ * Check for queued entres lockless first. The local runqueue ++ * is locked so entries will always be accurate. ++ */ ++ if (!sched_interactive) { ++ /* ++ * Don't reschedule balance across nodes unless the CPU ++ * is idle. ++ */ ++ if (edt != idle && rq->cpu_locality[other_rq->cpu] > 3) ++ break; ++ if (entries <= best_entries) ++ continue; ++ } else if (!entries) ++ continue; ++ ++ /* if (i) implies other_rq != rq */ ++ if (i) { ++ /* Check for best id queued lockless first */ ++ if (other_rq->best_key >= best_key) ++ continue; ++ ++ if (unlikely(!trylock_rq(rq, other_rq))) ++ continue; ++ ++ /* Need to reevaluate entries after locking */ ++ entries = other_rq->sl->entries; ++ if (unlikely(!entries)) { ++ unlock_rq(other_rq); ++ continue; ++ } ++ } ++ ++ next = other_rq->node; ++ /* ++ * In interactive mode we check beyond the best entry on other ++ * runqueues if we can't get the best for smt or affinity ++ * reasons. ++ */ ++ while ((next = next->next[0]) != other_rq->node) { ++ struct task_struct *p; ++ u64 key = next->key; ++ ++ /* Reevaluate key after locking */ ++ if (key >= best_key) ++ break; ++ ++ p = next->value; ++ if (!smt_schedule(p, rq)) { ++ if (i && !sched_interactive) ++ break; ++ continue; ++ } ++ ++ if (sched_other_cpu(p, cpu)) { ++ if (sched_interactive || !i) ++ continue; ++ break; ++ } ++ /* Make sure affinity is ok */ ++ if (i) { ++ /* From this point on p is the best so far */ ++ if (locked) ++ unlock_rq(locked); ++ chosen = locked = other_rq; ++ } ++ best_entries = entries; ++ best_key = key; ++ edt = p; ++ break; ++ } ++ /* rq->preempting is a hint only as the state may have changed ++ * since it was set with the resched call but if we have met ++ * the condition we can break out here. */ ++ if (edt == rq->preempting) ++ break; ++ if (i && other_rq != chosen) ++ unlock_rq(other_rq); ++ } ++ ++ if (likely(edt != idle)) ++ take_task(rq, cpu, edt); ++ ++ if (locked) ++ unlock_rq(locked); ++ ++ rq->preempting = NULL; ++ ++ return edt; ++} ++#else /* CONFIG_SMP */ ++static inline struct task_struct ++*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle) ++{ ++ struct task_struct *edt; ++ ++ if (unlikely(!rq->sl->entries)) ++ return idle; ++ edt = rq->node->next[0]->value; ++ take_task(rq, cpu, edt); ++ return edt; ++} ++#endif /* CONFIG_SMP */ ++ ++/* ++ * Print scheduling while atomic bug: ++ */ ++static noinline void __schedule_bug(struct task_struct *prev) ++{ ++ /* Save this before calling printk(), since that will clobber it */ ++ unsigned long preempt_disable_ip = get_preempt_disable_ip(current); ++ ++ if (oops_in_progress) ++ return; ++ ++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", ++ prev->comm, prev->pid, preempt_count()); ++ ++ debug_show_held_locks(prev); ++ print_modules(); ++ if (irqs_disabled()) ++ print_irqtrace_events(prev); ++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) ++ && in_atomic_preempt_off()) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(preempt_disable_ip); ++ pr_cont("\n"); ++ } ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++ ++/* ++ * Various schedule()-time debugging checks and statistics: ++ */ ++static inline void schedule_debug(struct task_struct *prev) ++{ ++#ifdef CONFIG_SCHED_STACK_END_CHECK ++ if (task_stack_end_corrupted(prev)) ++ panic("corrupted stack end detected inside scheduler\n"); ++#endif ++ ++ if (unlikely(in_atomic_preempt_off())) { ++ __schedule_bug(prev); ++ preempt_count_set(PREEMPT_DISABLED); ++ } ++ rcu_sleep_check(); ++ ++ profile_hit(SCHED_PROFILING, __builtin_return_address(0)); ++ ++ schedstat_inc(this_rq()->sched_count); ++} ++ ++/* ++ * The currently running task's information is all stored in rq local data ++ * which is only modified by the local CPU. ++ */ ++static inline void set_rq_task(struct rq *rq, struct task_struct *p) ++{ ++ if (p == rq->idle || p->policy == SCHED_FIFO) ++ hrexpiry_clear(rq); ++ else ++ hrexpiry_start(rq, US_TO_NS(p->time_slice)); ++ if (rq->clock - rq->last_tick > HALF_JIFFY_NS) ++ rq->dither = 0; ++ else ++ rq->dither = rq_dither(rq); ++ ++ rq->rq_deadline = p->deadline; ++ rq->rq_prio = p->prio; ++#ifdef CONFIG_SMT_NICE ++ rq->rq_mm = p->mm; ++ rq->rq_smt_bias = p->smt_bias; ++#endif ++} ++ ++#ifdef CONFIG_SMT_NICE ++static void check_no_siblings(struct rq __maybe_unused *this_rq) {} ++static void wake_no_siblings(struct rq __maybe_unused *this_rq) {} ++static void (*check_siblings)(struct rq *this_rq) = &check_no_siblings; ++static void (*wake_siblings)(struct rq *this_rq) = &wake_no_siblings; ++ ++/* Iterate over smt siblings when we've scheduled a process on cpu and decide ++ * whether they should continue running or be descheduled. */ ++static void check_smt_siblings(struct rq *this_rq) ++{ ++ int other_cpu; ++ ++ for_each_cpu(other_cpu, &this_rq->thread_mask) { ++ struct task_struct *p; ++ struct rq *rq; ++ ++ rq = cpu_rq(other_cpu); ++ if (rq_idle(rq)) ++ continue; ++ p = rq->curr; ++ if (!smt_schedule(p, this_rq)) ++ resched_curr(rq); ++ } ++} ++ ++static void wake_smt_siblings(struct rq *this_rq) ++{ ++ int other_cpu; ++ ++ for_each_cpu(other_cpu, &this_rq->thread_mask) { ++ struct rq *rq; ++ ++ rq = cpu_rq(other_cpu); ++ if (rq_idle(rq)) ++ resched_idle(rq); ++ } ++} ++#else ++static void check_siblings(struct rq __maybe_unused *this_rq) {} ++static void wake_siblings(struct rq __maybe_unused *this_rq) {} ++#endif ++ ++/* ++ * schedule() is the main scheduler function. ++ * ++ * The main means of driving the scheduler and thus entering this function are: ++ * ++ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. ++ * ++ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return ++ * paths. For example, see arch/x86/entry_64.S. ++ * ++ * To drive preemption between tasks, the scheduler sets the flag in timer ++ * interrupt handler scheduler_tick(). ++ * ++ * 3. Wakeups don't really cause entry into schedule(). They add a ++ * task to the run-queue and that's it. ++ * ++ * Now, if the new task added to the run-queue preempts the current ++ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets ++ * called on the nearest possible occasion: ++ * ++ * - If the kernel is preemptible (CONFIG_PREEMPT=y): ++ * ++ * - in syscall or exception context, at the next outmost ++ * preempt_enable(). (this might be as soon as the wake_up()'s ++ * spin_unlock()!) ++ * ++ * - in IRQ context, return from interrupt-handler to ++ * preemptible context ++ * ++ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) ++ * then at the next: ++ * ++ * - cond_resched() call ++ * - explicit schedule() call ++ * - return from syscall or exception to user-space ++ * - return from interrupt-handler to user-space ++ * ++ * WARNING: must be called with preemption disabled! ++ */ ++static void __sched notrace __schedule(bool preempt) ++{ ++ struct task_struct *prev, *next, *idle; ++ unsigned long *switch_count; ++ bool deactivate = false; ++ struct rq *rq; ++ u64 niffies; ++ int cpu; ++ ++ cpu = smp_processor_id(); ++ rq = cpu_rq(cpu); ++ prev = rq->curr; ++ idle = rq->idle; ++ ++ schedule_debug(prev); ++ ++ local_irq_disable(); ++ rcu_note_context_switch(preempt); ++ ++ /* ++ * Make sure that signal_pending_state()->signal_pending() below ++ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) ++ * done by the caller to avoid the race with signal_wake_up(). ++ * ++ * The membarrier system call requires a full memory barrier ++ * after coming from user-space, before storing to rq->curr. ++ */ ++ rq_lock(rq); ++ smp_mb__after_spinlock(); ++#ifdef CONFIG_SMP ++ if (rq->preempt) { ++ /* ++ * Make sure resched_curr hasn't triggered a preemption ++ * locklessly on a task that has since scheduled away. Spurious ++ * wakeup of idle is okay though. ++ */ ++ if (unlikely(preempt && prev != idle && !test_tsk_need_resched(prev))) { ++ rq->preempt = NULL; ++ clear_preempt_need_resched(); ++ rq_unlock_irq(rq, NULL); ++ return; ++ } ++ rq->preempt = NULL; ++ } ++#endif ++ ++ switch_count = &prev->nivcsw; ++ if (!preempt && prev->state) { ++ if (signal_pending_state(prev->state, prev)) { ++ prev->state = TASK_RUNNING; ++ } else { ++ deactivate = true; ++ prev->on_rq = 0; ++ ++ if (prev->in_iowait) { ++ atomic_inc(&rq->nr_iowait); ++ delayacct_blkio_start(); ++ } ++ ++ /* ++ * If a worker is going to sleep, notify and ++ * ask workqueue whether it wants to wake up a ++ * task to maintain concurrency. If so, wake ++ * up the task. ++ */ ++ if (prev->flags & PF_WQ_WORKER) { ++ struct task_struct *to_wakeup; ++ ++ to_wakeup = wq_worker_sleeping(prev); ++ if (to_wakeup) ++ try_to_wake_up_local(to_wakeup); ++ } ++ } ++ switch_count = &prev->nvcsw; ++ } ++ ++ /* ++ * Store the niffy value here for use by the next task's last_ran ++ * below to avoid losing niffies due to update_clocks being called ++ * again after this point. ++ */ ++ update_clocks(rq); ++ niffies = rq->niffies; ++ update_cpu_clock_switch(rq, prev); ++ ++ clear_tsk_need_resched(prev); ++ clear_preempt_need_resched(); ++ ++ if (idle != prev) { ++ check_deadline(prev, rq); ++ return_task(prev, rq, cpu, deactivate); ++ } ++ ++ next = earliest_deadline_task(rq, cpu, idle); ++ if (likely(next->prio != PRIO_LIMIT)) ++ clear_cpuidle_map(cpu); ++ else { ++ set_cpuidle_map(cpu); ++ update_load_avg(rq, 0); ++ } ++ ++ set_rq_task(rq, next); ++ next->last_ran = niffies; ++ ++ if (likely(prev != next)) { ++ /* ++ * Don't reschedule an idle task or deactivated tasks ++ */ ++ if (prev == idle) { ++ rq->nr_running++; ++ if (rt_task(next)) ++ rq->rt_nr_running++; ++ } else if (!deactivate) ++ resched_suitable_idle(prev); ++ if (unlikely(next == idle)) { ++ rq->nr_running--; ++ if (rt_task(prev)) ++ rq->rt_nr_running--; ++ wake_siblings(rq); ++ } else ++ check_siblings(rq); ++ rq->nr_switches++; ++ rq->curr = next; ++ /* ++ * The membarrier system call requires each architecture ++ * to have a full memory barrier after updating ++ * rq->curr, before returning to user-space. ++ * ++ * Here are the schemes providing that barrier on the ++ * various architectures: ++ * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. ++ * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. ++ * - finish_lock_switch() for weakly-ordered ++ * architectures where spin_unlock is a full barrier, ++ * - switch_to() for arm64 (weakly-ordered, spin_unlock ++ * is a RELEASE barrier), ++ */ ++ ++*switch_count; ++ ++ trace_sched_switch(preempt, prev, next); ++ context_switch(rq, prev, next); /* unlocks the rq */ ++ } else { ++ check_siblings(rq); ++ rq_unlock(rq); ++ do_pending_softirq(rq, next); ++ local_irq_enable(); ++ } ++} ++ ++void __noreturn do_task_dead(void) ++{ ++ /* Causes final put_task_struct in finish_task_switch(). */ ++ set_special_state(TASK_DEAD); ++ ++ /* Tell freezer to ignore us: */ ++ current->flags |= PF_NOFREEZE; ++ __schedule(false); ++ BUG(); ++ ++ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ ++ for (;;) ++ cpu_relax(); ++} ++ ++static inline void sched_submit_work(struct task_struct *tsk) ++{ ++ if (!tsk->state || tsk_is_pi_blocked(tsk) || ++ preempt_count() || ++ signal_pending_state(tsk->state, tsk)) ++ return; ++ ++ /* ++ * If we are going to sleep and we have plugged IO queued, ++ * make sure to submit it to avoid deadlocks. ++ */ ++ if (blk_needs_flush_plug(tsk)) ++ blk_schedule_flush_plug(tsk); ++} ++ ++asmlinkage __visible void __sched schedule(void) ++{ ++ struct task_struct *tsk = current; ++ ++ sched_submit_work(tsk); ++ do { ++ preempt_disable(); ++ __schedule(false); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); ++} ++ ++EXPORT_SYMBOL(schedule); ++ ++/* ++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted ++ * state (have scheduled out non-voluntarily) by making sure that all ++ * tasks have either left the run queue or have gone into user space. ++ * As idle tasks do not do either, they must not ever be preempted ++ * (schedule out non-voluntarily). ++ * ++ * schedule_idle() is similar to schedule_preempt_disable() except that it ++ * never enables preemption because it does not call sched_submit_work(). ++ */ ++void __sched schedule_idle(void) ++{ ++ /* ++ * As this skips calling sched_submit_work(), which the idle task does ++ * regardless because that function is a nop when the task is in a ++ * TASK_RUNNING state, make sure this isn't used someplace that the ++ * current task can be in any other state. Note, idle is always in the ++ * TASK_RUNNING state. ++ */ ++ WARN_ON_ONCE(current->state); ++ do { ++ __schedule(false); ++ } while (need_resched()); ++} ++ ++#ifdef CONFIG_CONTEXT_TRACKING ++asmlinkage __visible void __sched schedule_user(void) ++{ ++ /* ++ * If we come here after a random call to set_need_resched(), ++ * or we have been woken up remotely but the IPI has not yet arrived, ++ * we haven't yet exited the RCU idle mode. Do it here manually until ++ * we find a better solution. ++ * ++ * NB: There are buggy callers of this function. Ideally we ++ * should warn if prev_state != IN_USER, but that will trigger ++ * too frequently to make sense yet. ++ */ ++ enum ctx_state prev_state = exception_enter(); ++ schedule(); ++ exception_exit(prev_state); ++} ++#endif ++ ++/** ++ * schedule_preempt_disabled - called with preemption disabled ++ * ++ * Returns with preemption disabled. Note: preempt_count must be 1 ++ */ ++void __sched schedule_preempt_disabled(void) ++{ ++ sched_preempt_enable_no_resched(); ++ schedule(); ++ preempt_disable(); ++} ++ ++static void __sched notrace preempt_schedule_common(void) ++{ ++ do { ++ /* ++ * Because the function tracer can trace preempt_count_sub() ++ * and it also uses preempt_enable/disable_notrace(), if ++ * NEED_RESCHED is set, the preempt_enable_notrace() called ++ * by the function tracer will call this function again and ++ * cause infinite recursion. ++ * ++ * Preemption must be disabled here before the function ++ * tracer can trace. Break up preempt_disable() into two ++ * calls. One to disable preemption without fear of being ++ * traced. The other to still record the preemption latency, ++ * which can also be traced by the function tracer. ++ */ ++ preempt_disable_notrace(); ++ preempt_latency_start(1); ++ __schedule(true); ++ preempt_latency_stop(1); ++ preempt_enable_no_resched_notrace(); ++ ++ /* ++ * Check again in case we missed a preemption opportunity ++ * between schedule and now. ++ */ ++ } while (need_resched()); ++} ++ ++#ifdef CONFIG_PREEMPT ++/* ++ * this is the entry point to schedule() from in-kernel preemption ++ * off of preempt_enable. Kernel preemptions off return from interrupt ++ * occur there and call schedule directly. ++ */ ++asmlinkage __visible void __sched notrace preempt_schedule(void) ++{ ++ /* ++ * If there is a non-zero preempt_count or interrupts are disabled, ++ * we do not want to preempt the current task. Just return.. ++ */ ++ if (likely(!preemptible())) ++ return; ++ ++ preempt_schedule_common(); ++} ++NOKPROBE_SYMBOL(preempt_schedule); ++EXPORT_SYMBOL(preempt_schedule); ++ ++/** ++ * preempt_schedule_notrace - preempt_schedule called by tracing ++ * ++ * The tracing infrastructure uses preempt_enable_notrace to prevent ++ * recursion and tracing preempt enabling caused by the tracing ++ * infrastructure itself. But as tracing can happen in areas coming ++ * from userspace or just about to enter userspace, a preempt enable ++ * can occur before user_exit() is called. This will cause the scheduler ++ * to be called when the system is still in usermode. ++ * ++ * To prevent this, the preempt_enable_notrace will use this function ++ * instead of preempt_schedule() to exit user context if needed before ++ * calling the scheduler. ++ */ ++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) ++{ ++ enum ctx_state prev_ctx; ++ ++ if (likely(!preemptible())) ++ return; ++ ++ do { ++ /* ++ * Because the function tracer can trace preempt_count_sub() ++ * and it also uses preempt_enable/disable_notrace(), if ++ * NEED_RESCHED is set, the preempt_enable_notrace() called ++ * by the function tracer will call this function again and ++ * cause infinite recursion. ++ * ++ * Preemption must be disabled here before the function ++ * tracer can trace. Break up preempt_disable() into two ++ * calls. One to disable preemption without fear of being ++ * traced. The other to still record the preemption latency, ++ * which can also be traced by the function tracer. ++ */ ++ preempt_disable_notrace(); ++ preempt_latency_start(1); ++ /* ++ * Needs preempt disabled in case user_exit() is traced ++ * and the tracer calls preempt_enable_notrace() causing ++ * an infinite recursion. ++ */ ++ prev_ctx = exception_enter(); ++ __schedule(true); ++ exception_exit(prev_ctx); ++ ++ preempt_latency_stop(1); ++ preempt_enable_no_resched_notrace(); ++ } while (need_resched()); ++} ++EXPORT_SYMBOL_GPL(preempt_schedule_notrace); ++ ++#endif /* CONFIG_PREEMPT */ ++ ++/* ++ * this is the entry point to schedule() from kernel preemption ++ * off of irq context. ++ * Note, that this is called and return with irqs disabled. This will ++ * protect us against recursive calling from irq. ++ */ ++asmlinkage __visible void __sched preempt_schedule_irq(void) ++{ ++ enum ctx_state prev_state; ++ ++ /* Catch callers which need to be fixed */ ++ BUG_ON(preempt_count() || !irqs_disabled()); ++ ++ prev_state = exception_enter(); ++ ++ do { ++ preempt_disable(); ++ local_irq_enable(); ++ __schedule(true); ++ local_irq_disable(); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); ++ ++ exception_exit(prev_state); ++} ++ ++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, ++ void *key) ++{ ++ return try_to_wake_up(curr->private, mode, wake_flags); ++} ++EXPORT_SYMBOL(default_wake_function); ++ ++#ifdef CONFIG_RT_MUTEXES ++ ++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) ++{ ++ if (pi_task) ++ prio = min(prio, pi_task->prio); ++ ++ return prio; ++} ++ ++static inline int rt_effective_prio(struct task_struct *p, int prio) ++{ ++ struct task_struct *pi_task = rt_mutex_get_top_task(p); ++ ++ return __rt_effective_prio(pi_task, prio); ++} ++ ++/* ++ * rt_mutex_setprio - set the current priority of a task ++ * @p: task to boost ++ * @pi_task: donor task ++ * ++ * This function changes the 'effective' priority of a task. It does ++ * not touch ->normal_prio like __setscheduler(). ++ * ++ * Used by the rt_mutex code to implement priority inheritance ++ * logic. Call site only calls if the priority of the task changed. ++ */ ++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) ++{ ++ int prio, oldprio; ++ struct rq *rq; ++ ++ /* XXX used to be waiter->prio, not waiter->task->prio */ ++ prio = __rt_effective_prio(pi_task, p->normal_prio); ++ ++ /* ++ * If nothing changed; bail early. ++ */ ++ if (p->pi_top_task == pi_task && prio == p->prio) ++ return; ++ ++ rq = __task_rq_lock(p, NULL); ++ update_rq_clock(rq); ++ /* ++ * Set under pi_lock && rq->lock, such that the value can be used under ++ * either lock. ++ * ++ * Note that there is loads of tricky to make this pointer cache work ++ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to ++ * ensure a task is de-boosted (pi_task is set to NULL) before the ++ * task is allowed to run again (and can exit). This ensures the pointer ++ * points to a blocked task -- which guaratees the task is present. ++ */ ++ p->pi_top_task = pi_task; ++ ++ /* ++ * For FIFO/RR we only need to set prio, if that matches we're done. ++ */ ++ if (prio == p->prio) ++ goto out_unlock; ++ ++ /* ++ * Idle task boosting is a nono in general. There is one ++ * exception, when PREEMPT_RT and NOHZ is active: ++ * ++ * The idle task calls get_next_timer_interrupt() and holds ++ * the timer wheel base->lock on the CPU and another CPU wants ++ * to access the timer (probably to cancel it). We can safely ++ * ignore the boosting request, as the idle CPU runs this code ++ * with interrupts disabled and will complete the lock ++ * protected section without being interrupted. So there is no ++ * real need to boost. ++ */ ++ if (unlikely(p == rq->idle)) { ++ WARN_ON(p != rq->curr); ++ WARN_ON(p->pi_blocked_on); ++ goto out_unlock; ++ } ++ ++ trace_sched_pi_setprio(p, pi_task); ++ oldprio = p->prio; ++ p->prio = prio; ++ if (task_running(rq, p)){ ++ if (prio > oldprio) ++ resched_task(p); ++ } else if (task_queued(p)) { ++ dequeue_task(rq, p, DEQUEUE_SAVE); ++ enqueue_task(rq, p, ENQUEUE_RESTORE); ++ if (prio < oldprio) ++ try_preempt(p, rq); ++ } ++out_unlock: ++ __task_rq_unlock(rq, NULL); ++} ++#else ++static inline int rt_effective_prio(struct task_struct *p, int prio) ++{ ++ return prio; ++} ++#endif ++ ++/* ++ * Adjust the deadline for when the priority is to change, before it's ++ * changed. ++ */ ++static inline void adjust_deadline(struct task_struct *p, int new_prio) ++{ ++ p->deadline += static_deadline_diff(new_prio) - task_deadline_diff(p); ++} ++ ++void set_user_nice(struct task_struct *p, long nice) ++{ ++ int new_static, old_static; ++ struct rq_flags rf; ++ struct rq *rq; ++ ++ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) ++ return; ++ new_static = NICE_TO_PRIO(nice); ++ /* ++ * We have to be careful, if called from sys_setpriority(), ++ * the task might be in the middle of scheduling on another CPU. ++ */ ++ rq = task_rq_lock(p, &rf); ++ update_rq_clock(rq); ++ ++ /* ++ * The RT priorities are set via sched_setscheduler(), but we still ++ * allow the 'normal' nice value to be set - but as expected ++ * it wont have any effect on scheduling until the task is ++ * not SCHED_NORMAL/SCHED_BATCH: ++ */ ++ if (has_rt_policy(p)) { ++ p->static_prio = new_static; ++ goto out_unlock; ++ } ++ ++ adjust_deadline(p, new_static); ++ old_static = p->static_prio; ++ p->static_prio = new_static; ++ p->prio = effective_prio(p); ++ ++ if (task_queued(p)) { ++ dequeue_task(rq, p, DEQUEUE_SAVE); ++ enqueue_task(rq, p, ENQUEUE_RESTORE); ++ if (new_static < old_static) ++ try_preempt(p, rq); ++ } else if (task_running(rq, p)) { ++ set_rq_task(rq, p); ++ if (old_static < new_static) ++ resched_task(p); ++ } ++out_unlock: ++ task_rq_unlock(rq, p, &rf); ++} ++EXPORT_SYMBOL(set_user_nice); ++ ++/* ++ * can_nice - check if a task can reduce its nice value ++ * @p: task ++ * @nice: nice value ++ */ ++int can_nice(const struct task_struct *p, const int nice) ++{ ++ /* Convert nice value [19,-20] to rlimit style value [1,40] */ ++ int nice_rlim = nice_to_rlimit(nice); ++ ++ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || ++ capable(CAP_SYS_NICE)); ++} ++ ++#ifdef __ARCH_WANT_SYS_NICE ++ ++/* ++ * sys_nice - change the priority of the current process. ++ * @increment: priority increment ++ * ++ * sys_setpriority is a more generic, but much slower function that ++ * does similar things. ++ */ ++SYSCALL_DEFINE1(nice, int, increment) ++{ ++ long nice, retval; ++ ++ /* ++ * Setpriority might change our priority at the same moment. ++ * We don't have to worry. Conceptually one call occurs first ++ * and we have a single winner. ++ */ ++ ++ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); ++ nice = task_nice(current) + increment; ++ ++ nice = clamp_val(nice, MIN_NICE, MAX_NICE); ++ if (increment < 0 && !can_nice(current, nice)) ++ return -EPERM; ++ ++ retval = security_task_setnice(current, nice); ++ if (retval) ++ return retval; ++ ++ set_user_nice(current, nice); ++ return 0; ++} ++ ++#endif ++ ++/** ++ * task_prio - return the priority value of a given task. ++ * @p: the task in question. ++ * ++ * Return: The priority value as seen by users in /proc. ++ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes ++ * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO). ++ */ ++int task_prio(const struct task_struct *p) ++{ ++ int delta, prio = p->prio - MAX_RT_PRIO; ++ ++ /* rt tasks and iso tasks */ ++ if (prio <= 0) ++ goto out; ++ ++ /* Convert to ms to avoid overflows */ ++ delta = NS_TO_MS(p->deadline - task_rq(p)->niffies); ++ if (unlikely(delta < 0)) ++ delta = 0; ++ delta = delta * 40 / ms_longest_deadline_diff(); ++ if (delta <= 80) ++ prio += delta; ++ if (idleprio_task(p)) ++ prio += 40; ++out: ++ return prio; ++} ++ ++/** ++ * idle_cpu - is a given CPU idle currently? ++ * @cpu: the processor in question. ++ * ++ * Return: 1 if the CPU is currently idle. 0 otherwise. ++ */ ++int idle_cpu(int cpu) ++{ ++ return cpu_curr(cpu) == cpu_rq(cpu)->idle; ++} ++ ++/** ++ * available_idle_cpu - is a given CPU idle for enqueuing work. ++ * @cpu: the CPU in question. ++ * ++ * Return: 1 if the CPU is currently idle. 0 otherwise. ++ */ ++int available_idle_cpu(int cpu) ++{ ++ if (!idle_cpu(cpu)) ++ return 0; ++ ++ if (vcpu_is_preempted(cpu)) ++ return 0; ++ ++ return 1; ++} ++ ++/** ++ * idle_task - return the idle task for a given CPU. ++ * @cpu: the processor in question. ++ * ++ * Return: The idle task for the CPU @cpu. ++ */ ++struct task_struct *idle_task(int cpu) ++{ ++ return cpu_rq(cpu)->idle; ++} ++ ++/** ++ * find_process_by_pid - find a process with a matching PID value. ++ * @pid: the pid in question. ++ * ++ * The task of @pid, if found. %NULL otherwise. ++ */ ++static inline struct task_struct *find_process_by_pid(pid_t pid) ++{ ++ return pid ? find_task_by_vpid(pid) : current; ++} ++ ++/* Actually do priority change: must hold rq lock. */ ++static void __setscheduler(struct task_struct *p, struct rq *rq, int policy, ++ int prio, bool keep_boost) ++{ ++ int oldrtprio, oldprio; ++ ++ p->policy = policy; ++ oldrtprio = p->rt_priority; ++ p->rt_priority = prio; ++ p->normal_prio = normal_prio(p); ++ oldprio = p->prio; ++ /* ++ * Keep a potential priority boosting if called from ++ * sched_setscheduler(). ++ */ ++ p->prio = normal_prio(p); ++ if (keep_boost) ++ p->prio = rt_effective_prio(p, p->prio); ++ ++ if (task_running(rq, p)) { ++ set_rq_task(rq, p); ++ resched_task(p); ++ } else if (task_queued(p)) { ++ dequeue_task(rq, p, DEQUEUE_SAVE); ++ enqueue_task(rq, p, ENQUEUE_RESTORE); ++ if (p->prio < oldprio || p->rt_priority > oldrtprio) ++ try_preempt(p, rq); ++ } ++} ++ ++/* ++ * Check the target process has a UID that matches the current process's ++ */ ++static bool check_same_owner(struct task_struct *p) ++{ ++ const struct cred *cred = current_cred(), *pcred; ++ bool match; ++ ++ rcu_read_lock(); ++ pcred = __task_cred(p); ++ match = (uid_eq(cred->euid, pcred->euid) || ++ uid_eq(cred->euid, pcred->uid)); ++ rcu_read_unlock(); ++ return match; ++} ++ ++static int __sched_setscheduler(struct task_struct *p, ++ const struct sched_attr *attr, ++ bool user, bool pi) ++{ ++ int retval, policy = attr->sched_policy, oldpolicy = -1, priority = attr->sched_priority; ++ unsigned long rlim_rtprio = 0; ++ struct rq_flags rf; ++ int reset_on_fork; ++ struct rq *rq; ++ ++ /* The pi code expects interrupts enabled */ ++ BUG_ON(pi && in_interrupt()); ++ ++ if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) { ++ unsigned long lflags; ++ ++ if (!lock_task_sighand(p, &lflags)) ++ return -ESRCH; ++ rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); ++ unlock_task_sighand(p, &lflags); ++ if (rlim_rtprio) ++ goto recheck; ++ /* ++ * If the caller requested an RT policy without having the ++ * necessary rights, we downgrade the policy to SCHED_ISO. ++ * We also set the parameter to zero to pass the checks. ++ */ ++ policy = SCHED_ISO; ++ priority = 0; ++ } ++recheck: ++ /* Double check policy once rq lock held */ ++ if (policy < 0) { ++ reset_on_fork = p->sched_reset_on_fork; ++ policy = oldpolicy = p->policy; ++ } else { ++ reset_on_fork = !!(policy & SCHED_RESET_ON_FORK); ++ policy &= ~SCHED_RESET_ON_FORK; ++ ++ if (!SCHED_RANGE(policy)) ++ return -EINVAL; ++ } ++ ++ if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) ++ return -EINVAL; ++ ++ /* ++ * Valid priorities for SCHED_FIFO and SCHED_RR are ++ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and ++ * SCHED_BATCH is 0. ++ */ ++ if (priority < 0 || ++ (p->mm && priority > MAX_USER_RT_PRIO - 1) || ++ (!p->mm && priority > MAX_RT_PRIO - 1)) ++ return -EINVAL; ++ if (is_rt_policy(policy) != (priority != 0)) ++ return -EINVAL; ++ ++ /* ++ * Allow unprivileged RT tasks to decrease priority: ++ */ ++ if (user && !capable(CAP_SYS_NICE)) { ++ if (is_rt_policy(policy)) { ++ unsigned long rlim_rtprio = ++ task_rlimit(p, RLIMIT_RTPRIO); ++ ++ /* Can't set/change the rt policy */ ++ if (policy != p->policy && !rlim_rtprio) ++ return -EPERM; ++ ++ /* Can't increase priority */ ++ if (priority > p->rt_priority && ++ priority > rlim_rtprio) ++ return -EPERM; ++ } else { ++ switch (p->policy) { ++ /* ++ * Can only downgrade policies but not back to ++ * SCHED_NORMAL ++ */ ++ case SCHED_ISO: ++ if (policy == SCHED_ISO) ++ goto out; ++ if (policy != SCHED_NORMAL) ++ return -EPERM; ++ break; ++ case SCHED_BATCH: ++ if (policy == SCHED_BATCH) ++ goto out; ++ if (policy != SCHED_IDLEPRIO) ++ return -EPERM; ++ break; ++ case SCHED_IDLEPRIO: ++ if (policy == SCHED_IDLEPRIO) ++ goto out; ++ return -EPERM; ++ default: ++ break; ++ } ++ } ++ ++ /* Can't change other user's priorities */ ++ if (!check_same_owner(p)) ++ return -EPERM; ++ ++ /* Normal users shall not reset the sched_reset_on_fork flag: */ ++ if (p->sched_reset_on_fork && !reset_on_fork) ++ return -EPERM; ++ } ++ ++ if (user) { ++ retval = security_task_setscheduler(p); ++ if (retval) ++ return retval; ++ } ++ ++ /* ++ * Make sure no PI-waiters arrive (or leave) while we are ++ * changing the priority of the task: ++ * ++ * To be able to change p->policy safely, the runqueue lock must be ++ * held. ++ */ ++ rq = task_rq_lock(p, &rf); ++ update_rq_clock(rq); ++ ++ /* ++ * Changing the policy of the stop threads its a very bad idea: ++ */ ++ if (p == rq->stop) { ++ task_rq_unlock(rq, p, &rf); ++ return -EINVAL; ++ } ++ ++ /* ++ * If not changing anything there's no need to proceed further: ++ */ ++ if (unlikely(policy == p->policy && (!is_rt_policy(policy) || ++ priority == p->rt_priority))) { ++ task_rq_unlock(rq, p, &rf); ++ return 0; ++ } ++ ++ /* Re-check policy now with rq lock held */ ++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { ++ policy = oldpolicy = -1; ++ task_rq_unlock(rq, p, &rf); ++ goto recheck; ++ } ++ p->sched_reset_on_fork = reset_on_fork; ++ ++ __setscheduler(p, rq, policy, priority, pi); ++ task_rq_unlock(rq, p, &rf); ++ ++ if (pi) ++ rt_mutex_adjust_pi(p); ++out: ++ return 0; ++} ++ ++static int _sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param, bool check) ++{ ++ struct sched_attr attr = { ++ .sched_policy = policy, ++ .sched_priority = param->sched_priority, ++ .sched_nice = PRIO_TO_NICE(p->static_prio), ++ }; ++ ++ return __sched_setscheduler(p, &attr, check, true); ++} ++/** ++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Return: 0 on success. An error code otherwise. ++ * ++ * NOTE that the task may be already dead. ++ */ ++int sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return _sched_setscheduler(p, policy, param, true); ++} ++ ++EXPORT_SYMBOL_GPL(sched_setscheduler); ++ ++int sched_setattr(struct task_struct *p, const struct sched_attr *attr) ++{ ++ return __sched_setscheduler(p, attr, true, true); ++} ++EXPORT_SYMBOL_GPL(sched_setattr); ++ ++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) ++{ ++ return __sched_setscheduler(p, attr, false, true); ++} ++ ++/** ++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Just like sched_setscheduler, only don't bother checking if the ++ * current context has permission. For example, this is needed in ++ * stop_machine(): we create temporary high priority worker threads, ++ * but our caller might not have that capability. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++int sched_setscheduler_nocheck(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return _sched_setscheduler(p, policy, param, false); ++} ++EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); ++ ++static int ++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) ++{ ++ struct sched_param lparam; ++ struct task_struct *p; ++ int retval; ++ ++ if (!param || pid < 0) ++ return -EINVAL; ++ if (copy_from_user(&lparam, param, sizeof(struct sched_param))) ++ return -EFAULT; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (p != NULL) ++ retval = sched_setscheduler(p, policy, &lparam); ++ rcu_read_unlock(); ++ ++ return retval; ++} ++ ++/* ++ * Mimics kernel/events/core.c perf_copy_attr(). ++ */ ++static int sched_copy_attr(struct sched_attr __user *uattr, ++ struct sched_attr *attr) ++{ ++ u32 size; ++ int ret; ++ ++ if (!access_ok(uattr, SCHED_ATTR_SIZE_VER0)) ++ return -EFAULT; ++ ++ /* Zero the full structure, so that a short copy will be nice: */ ++ memset(attr, 0, sizeof(*attr)); ++ ++ ret = get_user(size, &uattr->size); ++ if (ret) ++ return ret; ++ ++ /* Bail out on silly large: */ ++ if (size > PAGE_SIZE) ++ goto err_size; ++ ++ /* ABI compatibility quirk: */ ++ if (!size) ++ size = SCHED_ATTR_SIZE_VER0; ++ ++ if (size < SCHED_ATTR_SIZE_VER0) ++ goto err_size; ++ ++ /* ++ * If we're handed a bigger struct than we know of, ++ * ensure all the unknown bits are 0 - i.e. new ++ * user-space does not rely on any kernel feature ++ * extensions we dont know about yet. ++ */ ++ if (size > sizeof(*attr)) { ++ unsigned char __user *addr; ++ unsigned char __user *end; ++ unsigned char val; ++ ++ addr = (void __user *)uattr + sizeof(*attr); ++ end = (void __user *)uattr + size; ++ ++ for (; addr < end; addr++) { ++ ret = get_user(val, addr); ++ if (ret) ++ return ret; ++ if (val) ++ goto err_size; ++ } ++ size = sizeof(*attr); ++ } ++ ++ ret = copy_from_user(attr, uattr, size); ++ if (ret) ++ return -EFAULT; ++ ++ /* ++ * XXX: Do we want to be lenient like existing syscalls; or do we want ++ * to be strict and return an error on out-of-bounds values? ++ */ ++ attr->sched_nice = clamp(attr->sched_nice, -20, 19); ++ ++ /* sched/core.c uses zero here but we already know ret is zero */ ++ return 0; ++ ++err_size: ++ put_user(sizeof(*attr), &uattr->size); ++ return -E2BIG; ++} ++ ++/* ++ * sched_setparam() passes in -1 for its policy, to let the functions ++ * it calls know not to change it. ++ */ ++#define SETPARAM_POLICY -1 ++ ++/** ++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority ++ * @pid: the pid in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) ++{ ++ if (policy < 0) ++ return -EINVAL; ++ ++ return do_sched_setscheduler(pid, policy, param); ++} ++ ++/** ++ * sys_sched_setparam - set/change the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the new RT priority. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ return do_sched_setscheduler(pid, SETPARAM_POLICY, param); ++} ++ ++/** ++ * sys_sched_setattr - same as above, but with extended sched_attr ++ * @pid: the pid in question. ++ * @uattr: structure containing the extended parameters. ++ */ ++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, ++ unsigned int, flags) ++{ ++ struct sched_attr attr; ++ struct task_struct *p; ++ int retval; ++ ++ if (!uattr || pid < 0 || flags) ++ return -EINVAL; ++ ++ retval = sched_copy_attr(uattr, &attr); ++ if (retval) ++ return retval; ++ ++ if ((int)attr.sched_policy < 0) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (p != NULL) ++ retval = sched_setattr(p, &attr); ++ rcu_read_unlock(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread ++ * @pid: the pid in question. ++ * ++ * Return: On success, the policy of the thread. Otherwise, a negative error ++ * code. ++ */ ++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) ++{ ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (pid < 0) ++ goto out_nounlock; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (p) { ++ retval = security_task_getscheduler(p); ++ if (!retval) ++ retval = p->policy; ++ } ++ rcu_read_unlock(); ++ ++out_nounlock: ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the RT priority. ++ * ++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error ++ * code. ++ */ ++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ struct sched_param lp = { .sched_priority = 0 }; ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (!param || pid < 0) ++ goto out_nounlock; ++ ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ if (has_rt_policy(p)) ++ lp.sched_priority = p->rt_priority; ++ rcu_read_unlock(); ++ ++ /* ++ * This one might sleep, we cannot do it with a spinlock held ... ++ */ ++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; ++ ++out_nounlock: ++ return retval; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++static int sched_read_attr(struct sched_attr __user *uattr, ++ struct sched_attr *attr, ++ unsigned int usize) ++{ ++ int ret; ++ ++ if (!access_ok(uattr, usize)) ++ return -EFAULT; ++ ++ /* ++ * If we're handed a smaller struct than we know of, ++ * ensure all the unknown bits are 0 - i.e. old ++ * user-space does not get uncomplete information. ++ */ ++ if (usize < sizeof(*attr)) { ++ unsigned char *addr; ++ unsigned char *end; ++ ++ addr = (void *)attr + usize; ++ end = (void *)attr + sizeof(*attr); ++ ++ for (; addr < end; addr++) { ++ if (*addr) ++ return -EFBIG; ++ } ++ ++ attr->size = usize; ++ } ++ ++ ret = copy_to_user(uattr, attr, attr->size); ++ if (ret) ++ return -EFAULT; ++ ++ /* sched/core.c uses zero here but we already know ret is zero */ ++ return ret; ++} ++ ++/** ++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr ++ * @pid: the pid in question. ++ * @uattr: structure containing the extended parameters. ++ * @size: sizeof(attr) for fwd/bwd comp. ++ * @flags: for future extension. ++ */ ++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, ++ unsigned int, size, unsigned int, flags) ++{ ++ struct sched_attr attr = { ++ .size = sizeof(struct sched_attr), ++ }; ++ struct task_struct *p; ++ int retval; ++ ++ if (!uattr || pid < 0 || size > PAGE_SIZE || ++ size < SCHED_ATTR_SIZE_VER0 || flags) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ attr.sched_policy = p->policy; ++ if (rt_task(p)) ++ attr.sched_priority = p->rt_priority; ++ else ++ attr.sched_nice = task_nice(p); ++ ++ rcu_read_unlock(); ++ ++ retval = sched_read_attr(uattr, &attr, size); ++ return retval; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) ++{ ++ cpumask_var_t cpus_allowed, new_mask; ++ struct task_struct *p; ++ int retval; ++ ++ rcu_read_lock(); ++ ++ p = find_process_by_pid(pid); ++ if (!p) { ++ rcu_read_unlock(); ++ return -ESRCH; ++ } ++ ++ /* Prevent p going away */ ++ get_task_struct(p); ++ rcu_read_unlock(); ++ ++ if (p->flags & PF_NO_SETAFFINITY) { ++ retval = -EINVAL; ++ goto out_put_task; ++ } ++ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { ++ retval = -ENOMEM; ++ goto out_put_task; ++ } ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { ++ retval = -ENOMEM; ++ goto out_free_cpus_allowed; ++ } ++ retval = -EPERM; ++ if (!check_same_owner(p)) { ++ rcu_read_lock(); ++ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { ++ rcu_read_unlock(); ++ goto out_unlock; ++ } ++ rcu_read_unlock(); ++ } ++ ++ retval = security_task_setscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ cpuset_cpus_allowed(p, cpus_allowed); ++ cpumask_and(new_mask, in_mask, cpus_allowed); ++again: ++ retval = __set_cpus_allowed_ptr(p, new_mask, true); ++ ++ if (!retval) { ++ cpuset_cpus_allowed(p, cpus_allowed); ++ if (!cpumask_subset(new_mask, cpus_allowed)) { ++ /* ++ * We must have raced with a concurrent cpuset ++ * update. Just reset the cpus_allowed to the ++ * cpuset's cpus_allowed ++ */ ++ cpumask_copy(new_mask, cpus_allowed); ++ goto again; ++ } ++ } ++out_unlock: ++ free_cpumask_var(new_mask); ++out_free_cpus_allowed: ++ free_cpumask_var(cpus_allowed); ++out_put_task: ++ put_task_struct(p); ++ return retval; ++} ++ ++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, ++ cpumask_t *new_mask) ++{ ++ if (len < cpumask_size()) ++ cpumask_clear(new_mask); ++ else if (len > cpumask_size()) ++ len = cpumask_size(); ++ ++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; ++} ++ ++ ++/** ++ * sys_sched_setaffinity - set the CPU affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to the new CPU mask ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ cpumask_var_t new_mask; ++ int retval; ++ ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); ++ if (retval == 0) ++ retval = sched_setaffinity(pid, new_mask); ++ free_cpumask_var(new_mask); ++ return retval; ++} ++ ++long sched_getaffinity(pid_t pid, cpumask_t *mask) ++{ ++ struct task_struct *p; ++ unsigned long flags; ++ int retval; ++ ++ get_online_cpus(); ++ rcu_read_lock(); ++ ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++out_unlock: ++ rcu_read_unlock(); ++ put_online_cpus(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getaffinity - get the CPU affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to hold the current CPU mask ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ int ret; ++ cpumask_var_t mask; ++ ++ if ((len * BITS_PER_BYTE) < nr_cpu_ids) ++ return -EINVAL; ++ if (len & (sizeof(unsigned long)-1)) ++ return -EINVAL; ++ ++ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ ret = sched_getaffinity(pid, mask); ++ if (ret == 0) { ++ unsigned int retlen = min(len, cpumask_size()); ++ ++ if (copy_to_user(user_mask_ptr, mask, retlen)) ++ ret = -EFAULT; ++ else ++ ret = retlen; ++ } ++ free_cpumask_var(mask); ++ ++ return ret; ++} ++ ++/** ++ * sys_sched_yield - yield the current processor to other threads. ++ * ++ * This function yields the current CPU to other tasks. It does this by ++ * scheduling away the current task. If it still has the earliest deadline ++ * it will be scheduled again as the next task. ++ * ++ * Return: 0. ++ */ ++static void do_sched_yield(void) ++{ ++ struct rq *rq; ++ ++ if (!sched_yield_type) ++ return; ++ ++ local_irq_disable(); ++ rq = this_rq(); ++ rq_lock(rq); ++ ++ if (sched_yield_type > 1) ++ time_slice_expired(current, rq); ++ schedstat_inc(rq->yld_count); ++ ++ /* ++ * Since we are going to call schedule() anyway, there's ++ * no need to preempt or enable interrupts: ++ */ ++ preempt_disable(); ++ rq_unlock(rq); ++ sched_preempt_enable_no_resched(); ++ ++ schedule(); ++} ++ ++SYSCALL_DEFINE0(sched_yield) ++{ ++ do_sched_yield(); ++ return 0; ++} ++ ++#ifndef CONFIG_PREEMPT ++int __sched _cond_resched(void) ++{ ++ if (should_resched(0)) { ++ preempt_schedule_common(); ++ return 1; ++ } ++ rcu_all_qs(); ++ return 0; ++} ++EXPORT_SYMBOL(_cond_resched); ++#endif ++ ++/* ++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock, ++ * call schedule, and on return reacquire the lock. ++ * ++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level ++ * operations here to prevent schedule() from being called twice (once via ++ * spin_unlock(), once by hand). ++ */ ++int __cond_resched_lock(spinlock_t *lock) ++{ ++ int resched = should_resched(PREEMPT_LOCK_OFFSET); ++ int ret = 0; ++ ++ lockdep_assert_held(lock); ++ ++ if (spin_needbreak(lock) || resched) { ++ spin_unlock(lock); ++ if (resched) ++ preempt_schedule_common(); ++ else ++ cpu_relax(); ++ ret = 1; ++ spin_lock(lock); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(__cond_resched_lock); ++ ++/** ++ * yield - yield the current processor to other threads. ++ * ++ * Do not ever use this function, there's a 99% chance you're doing it wrong. ++ * ++ * The scheduler is at all times free to pick the calling task as the most ++ * eligible task to run, if removing the yield() call from your code breaks ++ * it, its already broken. ++ * ++ * Typical broken usage is: ++ * ++ * while (!event) ++ * yield(); ++ * ++ * where one assumes that yield() will let 'the other' process run that will ++ * make event true. If the current task is a SCHED_FIFO task that will never ++ * happen. Never use yield() as a progress guarantee!! ++ * ++ * If you want to use yield() to wait for something, use wait_event(). ++ * If you want to use yield() to be 'nice' for others, use cond_resched(). ++ * If you still want to use yield(), do not! ++ */ ++void __sched yield(void) ++{ ++ set_current_state(TASK_RUNNING); ++ do_sched_yield(); ++} ++EXPORT_SYMBOL(yield); ++ ++/** ++ * yield_to - yield the current processor to another thread in ++ * your thread group, or accelerate that thread toward the ++ * processor it's on. ++ * @p: target task ++ * @preempt: whether task preemption is allowed or not ++ * ++ * It's the caller's job to ensure that the target task struct ++ * can't go away on us before we can do any checks. ++ * ++ * Return: ++ * true (>0) if we indeed boosted the target task. ++ * false (0) if we failed to boost the target. ++ * -ESRCH if there's no task to yield to. ++ */ ++int __sched yield_to(struct task_struct *p, bool preempt) ++{ ++ struct task_struct *rq_p; ++ struct rq *rq, *p_rq; ++ unsigned long flags; ++ int yielded = 0; ++ ++ local_irq_save(flags); ++ rq = this_rq(); ++ ++again: ++ p_rq = task_rq(p); ++ /* ++ * If we're the only runnable task on the rq and target rq also ++ * has only one task, there's absolutely no point in yielding. ++ */ ++ if (task_running(p_rq, p) || p->state) { ++ yielded = -ESRCH; ++ goto out_irq; ++ } ++ ++ double_rq_lock(rq, p_rq); ++ if (unlikely(task_rq(p) != p_rq)) { ++ double_rq_unlock(rq, p_rq); ++ goto again; ++ } ++ ++ yielded = 1; ++ schedstat_inc(rq->yld_count); ++ rq_p = rq->curr; ++ if (p->deadline > rq_p->deadline) ++ p->deadline = rq_p->deadline; ++ p->time_slice += rq_p->time_slice; ++ if (p->time_slice > timeslice()) ++ p->time_slice = timeslice(); ++ time_slice_expired(rq_p, rq); ++ if (preempt && rq != p_rq) ++ resched_task(p_rq->curr); ++ double_rq_unlock(rq, p_rq); ++out_irq: ++ local_irq_restore(flags); ++ ++ if (yielded > 0) ++ schedule(); ++ return yielded; ++} ++EXPORT_SYMBOL_GPL(yield_to); ++ ++int io_schedule_prepare(void) ++{ ++ int old_iowait = current->in_iowait; ++ ++ current->in_iowait = 1; ++ blk_schedule_flush_plug(current); ++ ++ return old_iowait; ++} ++ ++void io_schedule_finish(int token) ++{ ++ current->in_iowait = token; ++} ++ ++/* ++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so ++ * that process accounting knows that this is a task in IO wait state. ++ * ++ * But don't do that if it is a deliberate, throttling IO wait (this task ++ * has set its backing_dev_info: the queue against which it should throttle) ++ */ ++ ++long __sched io_schedule_timeout(long timeout) ++{ ++ int token; ++ long ret; ++ ++ token = io_schedule_prepare(); ++ ret = schedule_timeout(timeout); ++ io_schedule_finish(token); ++ ++ return ret; ++} ++EXPORT_SYMBOL(io_schedule_timeout); ++ ++void io_schedule(void) ++{ ++ int token; ++ ++ token = io_schedule_prepare(); ++ schedule(); ++ io_schedule_finish(token); ++} ++EXPORT_SYMBOL(io_schedule); ++ ++/** ++ * sys_sched_get_priority_max - return maximum RT priority. ++ * @policy: scheduling class. ++ * ++ * Return: On success, this syscall returns the maximum ++ * rt_priority that can be used by a given scheduling class. ++ * On failure, a negative error code is returned. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_max, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = MAX_USER_RT_PRIO-1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_ISO: ++ case SCHED_IDLEPRIO: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * sys_sched_get_priority_min - return minimum RT priority. ++ * @policy: scheduling class. ++ * ++ * Return: On success, this syscall returns the minimum ++ * rt_priority that can be used by a given scheduling class. ++ * On failure, a negative error code is returned. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_min, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = 1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_ISO: ++ case SCHED_IDLEPRIO: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) ++{ ++ struct task_struct *p; ++ unsigned int time_slice; ++ struct rq_flags rf; ++ struct rq *rq; ++ int retval; ++ ++ if (pid < 0) ++ return -EINVAL; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ rq = task_rq_lock(p, &rf); ++ time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p)); ++ task_rq_unlock(rq, p, &rf); ++ ++ rcu_read_unlock(); ++ *t = ns_to_timespec64(time_slice); ++ return 0; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++/** ++ * sys_sched_rr_get_interval - return the default timeslice of a process. ++ * @pid: pid of the process. ++ * @interval: userspace pointer to the timeslice value. ++ * ++ * this syscall writes the default timeslice value of a given process ++ * into the user-space timespec buffer. A value of '0' means infinity. ++ * ++ * Return: On success, 0 and the timeslice is in @interval. Otherwise, ++ * an error code. ++ */ ++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, ++ struct __kernel_timespec __user *, interval) ++{ ++ struct timespec64 t; ++ int retval = sched_rr_get_interval(pid, &t); ++ ++ if (retval == 0) ++ retval = put_timespec64(&t, interval); ++ ++ return retval; ++} ++ ++#ifdef CONFIG_COMPAT_32BIT_TIME ++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, ++ struct old_timespec32 __user *, interval) ++{ ++ struct timespec64 t; ++ int retval = sched_rr_get_interval(pid, &t); ++ ++ if (retval == 0) ++ retval = put_old_timespec32(&t, interval); ++ return retval; ++} ++#endif ++ ++void sched_show_task(struct task_struct *p) ++{ ++ unsigned long free = 0; ++ int ppid; ++ ++ if (!try_get_task_stack(p)) ++ return; ++ ++ printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p)); ++ ++ if (p->state == TASK_RUNNING) ++ printk(KERN_CONT " running task "); ++#ifdef CONFIG_DEBUG_STACK_USAGE ++ free = stack_not_used(p); ++#endif ++ ppid = 0; ++ rcu_read_lock(); ++ if (pid_alive(p)) ++ ppid = task_pid_nr(rcu_dereference(p->real_parent)); ++ rcu_read_unlock(); ++ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, ++ task_pid_nr(p), ppid, ++ (unsigned long)task_thread_info(p)->flags); ++ ++ print_worker_info(KERN_INFO, p); ++ show_stack(p, NULL); ++ put_task_stack(p); ++} ++EXPORT_SYMBOL_GPL(sched_show_task); ++ ++static inline bool ++state_filter_match(unsigned long state_filter, struct task_struct *p) ++{ ++ /* no filter, everything matches */ ++ if (!state_filter) ++ return true; ++ ++ /* filter, but doesn't match */ ++ if (!(p->state & state_filter)) ++ return false; ++ ++ /* ++ * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows ++ * TASK_KILLABLE). ++ */ ++ if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) ++ return false; ++ ++ return true; ++} ++ ++void show_state_filter(unsigned long state_filter) ++{ ++ struct task_struct *g, *p; ++ ++#if BITS_PER_LONG == 32 ++ printk(KERN_INFO ++ " task PC stack pid father\n"); ++#else ++ printk(KERN_INFO ++ " task PC stack pid father\n"); ++#endif ++ rcu_read_lock(); ++ for_each_process_thread(g, p) { ++ /* ++ * reset the NMI-timeout, listing all files on a slow ++ * console might take a lot of time: ++ * Also, reset softlockup watchdogs on all CPUs, because ++ * another CPU might be blocked waiting for us to process ++ * an IPI. ++ */ ++ touch_nmi_watchdog(); ++ touch_all_softlockup_watchdogs(); ++ if (state_filter_match(state_filter, p)) ++ sched_show_task(p); ++ } ++ ++ rcu_read_unlock(); ++ /* ++ * Only show locks if all tasks are dumped: ++ */ ++ if (!state_filter) ++ debug_show_all_locks(); ++} ++ ++void dump_cpu_task(int cpu) ++{ ++ pr_info("Task dump for CPU %d:\n", cpu); ++ sched_show_task(cpu_curr(cpu)); ++} ++ ++#ifdef CONFIG_SMP ++void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ cpumask_copy(&p->cpus_allowed, new_mask); ++ p->nr_cpus_allowed = cpumask_weight(new_mask); ++} ++ ++void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ struct rq *rq = task_rq(p); ++ ++ lockdep_assert_held(&p->pi_lock); ++ ++ cpumask_copy(&p->cpus_allowed, new_mask); ++ ++ if (task_queued(p)) { ++ /* ++ * Because __kthread_bind() calls this on blocked tasks without ++ * holding rq->lock. ++ */ ++ lockdep_assert_held(rq->lock); ++ } ++} ++ ++/* ++ * Calling do_set_cpus_allowed from outside the scheduler code should not be ++ * called on a running or queued task. We should be holding pi_lock. ++ */ ++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ __do_set_cpus_allowed(p, new_mask); ++ if (needs_other_cpu(p, task_cpu(p))) { ++ struct rq *rq; ++ ++ rq = __task_rq_lock(p, NULL); ++ set_task_cpu(p, valid_task_cpu(p)); ++ resched_task(p); ++ __task_rq_unlock(rq, NULL); ++ } ++} ++#endif ++ ++/** ++ * init_idle - set up an idle thread for a given CPU ++ * @idle: task in question ++ * @cpu: cpu the idle task belongs to ++ * ++ * NOTE: this function does not set the idle thread's NEED_RESCHED ++ * flag, to make booting more robust. ++ */ ++void init_idle(struct task_struct *idle, int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&idle->pi_lock, flags); ++ raw_spin_lock(rq->lock); ++ idle->last_ran = rq->niffies; ++ time_slice_expired(idle, rq); ++ idle->state = TASK_RUNNING; ++ /* Setting prio to illegal value shouldn't matter when never queued */ ++ idle->prio = PRIO_LIMIT; ++ ++ kasan_unpoison_task_stack(idle); ++ ++#ifdef CONFIG_SMP ++ /* ++ * It's possible that init_idle() gets called multiple times on a task, ++ * in that case do_set_cpus_allowed() will not do the right thing. ++ * ++ * And since this is boot we can forgo the serialisation. ++ */ ++ set_cpus_allowed_common(idle, cpumask_of(cpu)); ++#ifdef CONFIG_SMT_NICE ++ idle->smt_bias = 0; ++#endif ++#endif ++ set_rq_task(rq, idle); ++ ++ /* Silence PROVE_RCU */ ++ rcu_read_lock(); ++ set_task_cpu(idle, cpu); ++ rcu_read_unlock(); ++ ++ rq->curr = rq->idle = idle; ++ idle->on_rq = TASK_ON_RQ_QUEUED; ++ raw_spin_unlock(rq->lock); ++ raw_spin_unlock_irqrestore(&idle->pi_lock, flags); ++ ++ /* Set the preempt count _outside_ the spinlocks! */ ++ init_idle_preempt_count(idle, cpu); ++ ++ ftrace_graph_init_idle_task(idle, cpu); ++ vtime_init_idle(idle, cpu); ++#ifdef CONFIG_SMP ++ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); ++#endif ++} ++ ++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur, ++ const struct cpumask __maybe_unused *trial) ++{ ++ return 1; ++} ++ ++int task_can_attach(struct task_struct *p, ++ const struct cpumask *cs_cpus_allowed) ++{ ++ int ret = 0; ++ ++ /* ++ * Kthreads which disallow setaffinity shouldn't be moved ++ * to a new cpuset; we don't want to change their CPU ++ * affinity and isolating such threads by their set of ++ * allowed nodes is unnecessary. Thus, cpusets are not ++ * applicable for such threads. This prevents checking for ++ * success of set_cpus_allowed_ptr() on all attached tasks ++ * before cpus_allowed may be changed. ++ */ ++ if (p->flags & PF_NO_SETAFFINITY) ++ ret = -EINVAL; ++ ++ return ret; ++} ++ ++void resched_cpu(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ struct rq_flags rf; ++ ++ rq_lock_irqsave(rq, &rf); ++ if (cpu_online(cpu) || cpu == smp_processor_id()) ++ resched_curr(rq); ++ rq_unlock_irqrestore(rq, &rf); ++} ++ ++#ifdef CONFIG_SMP ++#ifdef CONFIG_NO_HZ_COMMON ++void nohz_balance_enter_idle(int cpu) ++{ ++} ++ ++void select_nohz_load_balancer(int stop_tick) ++{ ++} ++ ++void set_cpu_sd_state_idle(void) {} ++ ++/* ++ * In the semi idle case, use the nearest busy CPU for migrating timers ++ * from an idle CPU. This is good for power-savings. ++ * ++ * We don't do similar optimization for completely idle system, as ++ * selecting an idle CPU will add more delays to the timers than intended ++ * (as that CPU's timer base may not be uptodate wrt jiffies etc). ++ */ ++int get_nohz_timer_target(void) ++{ ++ int i, cpu = smp_processor_id(); ++ struct sched_domain *sd; ++ ++ if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER)) ++ return cpu; ++ ++ rcu_read_lock(); ++ for_each_domain(cpu, sd) { ++ for_each_cpu(i, sched_domain_span(sd)) { ++ if (cpu == i) ++ continue; ++ ++ if (!idle_cpu(i) && housekeeping_cpu(i, HK_FLAG_TIMER)) { ++ cpu = i; ++ cpu = i; ++ goto unlock; ++ } ++ } ++ } ++ ++ if (!housekeeping_cpu(cpu, HK_FLAG_TIMER)) ++ cpu = housekeeping_any_cpu(HK_FLAG_TIMER); ++unlock: ++ rcu_read_unlock(); ++ return cpu; ++} ++ ++/* ++ * When add_timer_on() enqueues a timer into the timer wheel of an ++ * idle CPU then this timer might expire before the next timer event ++ * which is scheduled to wake up that CPU. In case of a completely ++ * idle system the next event might even be infinite time into the ++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and ++ * leaves the inner idle loop so the newly added timer is taken into ++ * account when the CPU goes back to idle and evaluates the timer ++ * wheel for the next timer event. ++ */ ++void wake_up_idle_cpu(int cpu) ++{ ++ if (cpu == smp_processor_id()) ++ return; ++ ++ if (set_nr_and_not_polling(cpu_rq(cpu)->idle)) ++ smp_sched_reschedule(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++static bool wake_up_full_nohz_cpu(int cpu) ++{ ++ /* ++ * We just need the target to call irq_exit() and re-evaluate ++ * the next tick. The nohz full kick at least implies that. ++ * If needed we can still optimize that later with an ++ * empty IRQ. ++ */ ++ if (cpu_is_offline(cpu)) ++ return true; /* Don't try to wake offline CPUs. */ ++ if (tick_nohz_full_cpu(cpu)) { ++ if (cpu != smp_processor_id() || ++ tick_nohz_tick_stopped()) ++ tick_nohz_full_kick_cpu(cpu); ++ return true; ++ } ++ ++ return false; ++} ++ ++/* ++ * Wake up the specified CPU. If the CPU is going offline, it is the ++ * caller's responsibility to deal with the lost wakeup, for example, ++ * by hooking into the CPU_DEAD notifier like timers and hrtimers do. ++ */ ++void wake_up_nohz_cpu(int cpu) ++{ ++ if (!wake_up_full_nohz_cpu(cpu)) ++ wake_up_idle_cpu(cpu); ++} ++#endif /* CONFIG_NO_HZ_COMMON */ ++ ++/* ++ * Change a given task's CPU affinity. Migrate the thread to a ++ * proper CPU and schedule it away if the CPU it's executing on ++ * is removed from the allowed bitmask. ++ * ++ * NOTE: the caller must have a valid reference to the task, the ++ * task must not exit() & deallocate itself prematurely. The ++ * call is not atomic; no spinlocks may be held. ++ */ ++static int __set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, bool check) ++{ ++ const struct cpumask *cpu_valid_mask = cpu_active_mask; ++ bool queued = false, running_wrong = false, kthread; ++ struct cpumask old_mask; ++ struct rq_flags rf; ++ int cpu, ret = 0; ++ struct rq *rq; ++ ++ rq = task_rq_lock(p, &rf); ++ update_rq_clock(rq); ++ ++ kthread = !!(p->flags & PF_KTHREAD); ++ if (kthread) { ++ /* ++ * Kernel threads are allowed on online && !active CPUs ++ */ ++ cpu_valid_mask = cpu_online_mask; ++ } ++ ++ /* ++ * Must re-check here, to close a race against __kthread_bind(), ++ * sched_setaffinity() is not guaranteed to observe the flag. ++ */ ++ if (check && (p->flags & PF_NO_SETAFFINITY)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ cpumask_copy(&old_mask, &p->cpus_allowed); ++ if (cpumask_equal(&old_mask, new_mask)) ++ goto out; ++ ++ if (!cpumask_intersects(new_mask, cpu_valid_mask)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ queued = task_queued(p); ++ __do_set_cpus_allowed(p, new_mask); ++ ++ if (kthread) { ++ /* ++ * For kernel threads that do indeed end up on online && ++ * !active we want to ensure they are strict per-CPU threads. ++ */ ++ WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && ++ !cpumask_intersects(new_mask, cpu_active_mask) && ++ p->nr_cpus_allowed != 1); ++ } ++ ++ /* Can the task run on the task's current CPU? If so, we're done */ ++ if (cpumask_test_cpu(task_cpu(p), new_mask)) ++ goto out; ++ ++ if (task_running(rq, p)) { ++ /* Task is running on the wrong cpu now, reschedule it. */ ++ if (rq == this_rq()) { ++ cpu = cpumask_any_and(cpu_valid_mask, new_mask); ++ set_task_cpu(p, cpu); ++ set_tsk_need_resched(p); ++ running_wrong = true; ++ } else ++ resched_task(p); ++ } else { ++ cpu = cpumask_any_and(cpu_valid_mask, new_mask); ++ if (queued) { ++ /* ++ * Switch runqueue locks after dequeueing the task ++ * here while still holding the pi_lock to be holding ++ * the correct lock for enqueueing. ++ */ ++ dequeue_task(rq, p, 0); ++ rq_unlock(rq); ++ ++ rq = cpu_rq(cpu); ++ rq_lock(rq); ++ } ++ set_task_cpu(p, cpu); ++ if (queued) ++ enqueue_task(rq, p, 0); ++ } ++ if (queued) ++ try_preempt(p, rq); ++ if (running_wrong) ++ preempt_disable(); ++out: ++ task_rq_unlock(rq, p, &rf); ++ ++ if (running_wrong) { ++ __schedule(true); ++ preempt_enable(); ++ } ++ ++ return ret; ++} ++ ++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ return __set_cpus_allowed_ptr(p, new_mask, false); ++} ++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); ++ ++#ifdef CONFIG_HOTPLUG_CPU ++/* ++ * Run through task list and find tasks affined to the dead cpu, then remove ++ * that cpu from the list, enable cpu0 and set the zerobound flag. Must hold ++ * cpu 0 and src_cpu's runqueue locks. ++ */ ++static void bind_zero(int src_cpu) ++{ ++ struct task_struct *p, *t; ++ struct rq *rq0; ++ int bound = 0; ++ ++ if (src_cpu == 0) ++ return; ++ ++ rq0 = cpu_rq(0); ++ ++ do_each_thread(t, p) { ++ if (cpumask_test_cpu(src_cpu, &p->cpus_allowed)) { ++ bool local = (task_cpu(p) == src_cpu); ++ struct rq *rq = task_rq(p); ++ ++ /* task_running is the cpu stopper thread */ ++ if (local && task_running(rq, p)) ++ continue; ++ atomic_clear_cpu(src_cpu, &p->cpus_allowed); ++ atomic_set_cpu(0, &p->cpus_allowed); ++ p->zerobound = true; ++ bound++; ++ if (local) { ++ bool queued = task_queued(p); ++ ++ if (queued) ++ dequeue_task(rq, p, 0); ++ set_task_cpu(p, 0); ++ if (queued) ++ enqueue_task(rq0, p, 0); ++ } ++ } ++ } while_each_thread(t, p); ++ ++ if (bound) { ++ printk(KERN_INFO "Removed affinity for %d processes to cpu %d\n", ++ bound, src_cpu); ++ } ++} ++ ++/* Find processes with the zerobound flag and reenable their affinity for the ++ * CPU coming alive. */ ++static void unbind_zero(int src_cpu) ++{ ++ int unbound = 0, zerobound = 0; ++ struct task_struct *p, *t; ++ ++ if (src_cpu == 0) ++ return; ++ ++ do_each_thread(t, p) { ++ if (!p->mm) ++ p->zerobound = false; ++ if (p->zerobound) { ++ unbound++; ++ cpumask_set_cpu(src_cpu, &p->cpus_allowed); ++ /* Once every CPU affinity has been re-enabled, remove ++ * the zerobound flag */ ++ if (cpumask_subset(cpu_possible_mask, &p->cpus_allowed)) { ++ p->zerobound = false; ++ zerobound++; ++ } ++ } ++ } while_each_thread(t, p); ++ ++ if (unbound) { ++ printk(KERN_INFO "Added affinity for %d processes to cpu %d\n", ++ unbound, src_cpu); ++ } ++ if (zerobound) { ++ printk(KERN_INFO "Released forced binding to cpu0 for %d processes\n", ++ zerobound); ++ } ++} ++ ++/* ++ * Ensure that the idle task is using init_mm right before its cpu goes ++ * offline. ++ */ ++void idle_task_exit(void) ++{ ++ struct mm_struct *mm = current->active_mm; ++ ++ BUG_ON(cpu_online(smp_processor_id())); ++ ++ if (mm != &init_mm) { ++ switch_mm(mm, &init_mm, current); ++ current->active_mm = &init_mm; ++ finish_arch_post_lock_switch(); ++ } ++ mmdrop(mm); ++} ++#else /* CONFIG_HOTPLUG_CPU */ ++static void unbind_zero(int src_cpu) {} ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++void sched_set_stop_task(int cpu, struct task_struct *stop) ++{ ++ struct sched_param stop_param = { .sched_priority = STOP_PRIO }; ++ struct sched_param start_param = { .sched_priority = 0 }; ++ struct task_struct *old_stop = cpu_rq(cpu)->stop; ++ ++ if (stop) { ++ /* ++ * Make it appear like a SCHED_FIFO task, its something ++ * userspace knows about and won't get confused about. ++ * ++ * Also, it will make PI more or less work without too ++ * much confusion -- but then, stop work should not ++ * rely on PI working anyway. ++ */ ++ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param); ++ } ++ ++ cpu_rq(cpu)->stop = stop; ++ ++ if (old_stop) { ++ /* ++ * Reset it back to a normal scheduling policy so that ++ * it can die in pieces. ++ */ ++ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param); ++ } ++} ++ ++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) ++ ++static struct ctl_table sd_ctl_dir[] = { ++ { ++ .procname = "sched_domain", ++ .mode = 0555, ++ }, ++ {} ++}; ++ ++static struct ctl_table sd_ctl_root[] = { ++ { ++ .procname = "kernel", ++ .mode = 0555, ++ .child = sd_ctl_dir, ++ }, ++ {} ++}; ++ ++static struct ctl_table *sd_alloc_ctl_entry(int n) ++{ ++ struct ctl_table *entry = ++ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); ++ ++ return entry; ++} ++ ++static void sd_free_ctl_entry(struct ctl_table **tablep) ++{ ++ struct ctl_table *entry; ++ ++ /* ++ * In the intermediate directories, both the child directory and ++ * procname are dynamically allocated and could fail but the mode ++ * will always be set. In the lowest directory the names are ++ * static strings and all have proc handlers. ++ */ ++ for (entry = *tablep; entry->mode; entry++) { ++ if (entry->child) ++ sd_free_ctl_entry(&entry->child); ++ if (entry->proc_handler == NULL) ++ kfree(entry->procname); ++ } ++ ++ kfree(*tablep); ++ *tablep = NULL; ++} ++ ++#define CPU_LOAD_IDX_MAX 5 ++static int min_load_idx = 0; ++static int max_load_idx = CPU_LOAD_IDX_MAX-1; ++ ++static void ++set_table_entry(struct ctl_table *entry, ++ const char *procname, void *data, int maxlen, ++ umode_t mode, proc_handler *proc_handler, ++ bool load_idx) ++{ ++ entry->procname = procname; ++ entry->data = data; ++ entry->maxlen = maxlen; ++ entry->mode = mode; ++ entry->proc_handler = proc_handler; ++ ++ if (load_idx) { ++ entry->extra1 = &min_load_idx; ++ entry->extra2 = &max_load_idx; ++ } ++} ++ ++static struct ctl_table * ++sd_alloc_ctl_domain_table(struct sched_domain *sd) ++{ ++ struct ctl_table *table = sd_alloc_ctl_entry(14); ++ ++ if (table == NULL) ++ return NULL; ++ ++ set_table_entry(&table[0], "min_interval", &sd->min_interval, ++ sizeof(long), 0644, proc_doulongvec_minmax, false); ++ set_table_entry(&table[1], "max_interval", &sd->max_interval, ++ sizeof(long), 0644, proc_doulongvec_minmax, false); ++ set_table_entry(&table[2], "busy_idx", &sd->busy_idx, ++ sizeof(int), 0644, proc_dointvec_minmax, true); ++ set_table_entry(&table[3], "idle_idx", &sd->idle_idx, ++ sizeof(int), 0644, proc_dointvec_minmax, true); ++ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, ++ sizeof(int), 0644, proc_dointvec_minmax, true); ++ set_table_entry(&table[5], "wake_idx", &sd->wake_idx, ++ sizeof(int), 0644, proc_dointvec_minmax, true); ++ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, ++ sizeof(int), 0644, proc_dointvec_minmax, true); ++ set_table_entry(&table[7], "busy_factor", &sd->busy_factor, ++ sizeof(int), 0644, proc_dointvec_minmax, false); ++ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, ++ sizeof(int), 0644, proc_dointvec_minmax, false); ++ set_table_entry(&table[9], "cache_nice_tries", ++ &sd->cache_nice_tries, ++ sizeof(int), 0644, proc_dointvec_minmax, false); ++ set_table_entry(&table[10], "flags", &sd->flags, ++ sizeof(int), 0644, proc_dointvec_minmax, false); ++ set_table_entry(&table[11], "max_newidle_lb_cost", ++ &sd->max_newidle_lb_cost, ++ sizeof(long), 0644, proc_doulongvec_minmax, false); ++ set_table_entry(&table[12], "name", sd->name, ++ CORENAME_MAX_SIZE, 0444, proc_dostring, false); ++ /* &table[13] is terminator */ ++ ++ return table; ++} ++ ++static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) ++{ ++ struct ctl_table *entry, *table; ++ struct sched_domain *sd; ++ int domain_num = 0, i; ++ char buf[32]; ++ ++ for_each_domain(cpu, sd) ++ domain_num++; ++ entry = table = sd_alloc_ctl_entry(domain_num + 1); ++ if (table == NULL) ++ return NULL; ++ ++ i = 0; ++ for_each_domain(cpu, sd) { ++ snprintf(buf, 32, "domain%d", i); ++ entry->procname = kstrdup(buf, GFP_KERNEL); ++ entry->mode = 0555; ++ entry->child = sd_alloc_ctl_domain_table(sd); ++ entry++; ++ i++; ++ } ++ return table; ++} ++ ++static cpumask_var_t sd_sysctl_cpus; ++static struct ctl_table_header *sd_sysctl_header; ++ ++void register_sched_domain_sysctl(void) ++{ ++ static struct ctl_table *cpu_entries; ++ static struct ctl_table **cpu_idx; ++ char buf[32]; ++ int i; ++ ++ if (!cpu_entries) { ++ cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1); ++ if (!cpu_entries) ++ return; ++ ++ WARN_ON(sd_ctl_dir[0].child); ++ sd_ctl_dir[0].child = cpu_entries; ++ } ++ ++ if (!cpu_idx) { ++ struct ctl_table *e = cpu_entries; ++ ++ cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL); ++ if (!cpu_idx) ++ return; ++ ++ /* deal with sparse possible map */ ++ for_each_possible_cpu(i) { ++ cpu_idx[i] = e; ++ e++; ++ } ++ } ++ ++ if (!cpumask_available(sd_sysctl_cpus)) { ++ if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL)) ++ return; ++ ++ /* init to possible to not have holes in @cpu_entries */ ++ cpumask_copy(sd_sysctl_cpus, cpu_possible_mask); ++ } ++ ++ for_each_cpu(i, sd_sysctl_cpus) { ++ struct ctl_table *e = cpu_idx[i]; ++ ++ if (e->child) ++ sd_free_ctl_entry(&e->child); ++ ++ if (!e->procname) { ++ snprintf(buf, 32, "cpu%d", i); ++ e->procname = kstrdup(buf, GFP_KERNEL); ++ } ++ e->mode = 0555; ++ e->child = sd_alloc_ctl_cpu_table(i); ++ ++ __cpumask_clear_cpu(i, sd_sysctl_cpus); ++ } ++ ++ WARN_ON(sd_sysctl_header); ++ sd_sysctl_header = register_sysctl_table(sd_ctl_root); ++} ++ ++void dirty_sched_domain_sysctl(int cpu) ++{ ++ if (cpumask_available(sd_sysctl_cpus)) ++ __cpumask_set_cpu(cpu, sd_sysctl_cpus); ++} ++ ++/* may be called multiple times per register */ ++void unregister_sched_domain_sysctl(void) ++{ ++ unregister_sysctl_table(sd_sysctl_header); ++ sd_sysctl_header = NULL; ++} ++#endif /* CONFIG_SYSCTL */ ++ ++void set_rq_online(struct rq *rq) ++{ ++ if (!rq->online) { ++ cpumask_set_cpu(cpu_of(rq), rq->rd->online); ++ rq->online = true; ++ } ++} ++ ++void set_rq_offline(struct rq *rq) ++{ ++ if (rq->online) { ++ int cpu = cpu_of(rq); ++ ++ cpumask_clear_cpu(cpu, rq->rd->online); ++ rq->online = false; ++ clear_cpuidle_map(cpu); ++ } ++} ++ ++/* ++ * used to mark begin/end of suspend/resume: ++ */ ++static int num_cpus_frozen; ++ ++/* ++ * Update cpusets according to cpu_active mask. If cpusets are ++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper ++ * around partition_sched_domains(). ++ * ++ * If we come here as part of a suspend/resume, don't touch cpusets because we ++ * want to restore it back to its original state upon resume anyway. ++ */ ++static void cpuset_cpu_active(void) ++{ ++ if (cpuhp_tasks_frozen) { ++ /* ++ * num_cpus_frozen tracks how many CPUs are involved in suspend ++ * resume sequence. As long as this is not the last online ++ * operation in the resume sequence, just build a single sched ++ * domain, ignoring cpusets. ++ */ ++ partition_sched_domains(1, NULL, NULL); ++ if (--num_cpus_frozen) ++ return; ++ /* ++ * This is the last CPU online operation. So fall through and ++ * restore the original sched domains by considering the ++ * cpuset configurations. ++ */ ++ cpuset_force_rebuild(); ++ } ++ ++ cpuset_update_active_cpus(); ++} ++ ++static int cpuset_cpu_inactive(unsigned int cpu) ++{ ++ if (!cpuhp_tasks_frozen) { ++ cpuset_update_active_cpus(); ++ } else { ++ num_cpus_frozen++; ++ partition_sched_domains(1, NULL, NULL); ++ } ++ return 0; ++} ++ ++int sched_cpu_activate(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ struct rq_flags rf; ++ ++ set_cpu_active(cpu, true); ++ ++ if (sched_smp_initialized) { ++ sched_domains_numa_masks_set(cpu); ++ cpuset_cpu_active(); ++ } ++ ++ /* ++ * Put the rq online, if not already. This happens: ++ * ++ * 1) In the early boot process, because we build the real domains ++ * after all CPUs have been brought up. ++ * ++ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the ++ * domains. ++ */ ++ rq_lock_irqsave(rq, &rf); ++ if (rq->rd) { ++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); ++ set_rq_online(rq); ++ } ++ unbind_zero(cpu); ++ rq_unlock_irqrestore(rq, &rf); ++ ++ return 0; ++} ++ ++int sched_cpu_deactivate(unsigned int cpu) ++{ ++ int ret; ++ ++ set_cpu_active(cpu, false); ++ /* ++ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU ++ * users of this state to go away such that all new such users will ++ * observe it. ++ * ++ * Do sync before park smpboot threads to take care the rcu boost case. ++ */ ++ synchronize_rcu(); ++ ++ if (!sched_smp_initialized) ++ return 0; ++ ++ ret = cpuset_cpu_inactive(cpu); ++ if (ret) { ++ set_cpu_active(cpu, true); ++ return ret; ++ } ++ sched_domains_numa_masks_clear(cpu); ++ return 0; ++} ++ ++int sched_cpu_starting(unsigned int cpu) ++{ ++ sched_tick_start(cpu); ++ return 0; ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++int sched_cpu_dying(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ /* Handle pending wakeups and then migrate everything off */ ++ sched_ttwu_pending(); ++ sched_tick_stop(cpu); ++ ++ local_irq_save(flags); ++ double_rq_lock(rq, cpu_rq(0)); ++ if (rq->rd) { ++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); ++ set_rq_offline(rq); ++ } ++ bind_zero(cpu); ++ double_rq_unlock(rq, cpu_rq(0)); ++ sched_start_tick(rq, cpu); ++ hrexpiry_clear(rq); ++ local_irq_restore(flags); ++ ++ return 0; ++} ++#endif ++ ++#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC) ++/* ++ * Cheaper version of the below functions in case support for SMT and MC is ++ * compiled in but CPUs have no siblings. ++ */ ++static bool sole_cpu_idle(struct rq *rq) ++{ ++ return rq_idle(rq); ++} ++#endif ++#ifdef CONFIG_SCHED_SMT ++static const cpumask_t *thread_cpumask(int cpu) ++{ ++ return topology_sibling_cpumask(cpu); ++} ++/* All this CPU's SMT siblings are idle */ ++static bool siblings_cpu_idle(struct rq *rq) ++{ ++ return cpumask_subset(&rq->thread_mask, &cpu_idle_map); ++} ++#endif ++#ifdef CONFIG_SCHED_MC ++static const cpumask_t *core_cpumask(int cpu) ++{ ++ return topology_core_cpumask(cpu); ++} ++/* All this CPU's shared cache siblings are idle */ ++static bool cache_cpu_idle(struct rq *rq) ++{ ++ return cpumask_subset(&rq->core_mask, &cpu_idle_map); ++} ++#endif ++ ++enum sched_domain_level { ++ SD_LV_NONE = 0, ++ SD_LV_SIBLING, ++ SD_LV_MC, ++ SD_LV_BOOK, ++ SD_LV_CPU, ++ SD_LV_NODE, ++ SD_LV_ALLNODES, ++ SD_LV_MAX ++}; ++ ++void __init sched_init_smp(void) ++{ ++ struct rq *rq, *other_rq, *leader = cpu_rq(0); ++ struct sched_domain *sd; ++ int cpu, other_cpu, i; ++#ifdef CONFIG_SCHED_SMT ++ bool smt_threads = false; ++#endif ++ sched_init_numa(); ++ ++ /* ++ * There's no userspace yet to cause hotplug operations; hence all the ++ * cpu masks are stable and all blatant races in the below code cannot ++ * happen. ++ */ ++ mutex_lock(&sched_domains_mutex); ++ sched_init_domains(cpu_active_mask); ++ mutex_unlock(&sched_domains_mutex); ++ ++ /* Move init over to a non-isolated CPU */ ++ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) ++ BUG(); ++ ++ local_irq_disable(); ++ mutex_lock(&sched_domains_mutex); ++ lock_all_rqs(); ++ /* ++ * Set up the relative cache distance of each online cpu from each ++ * other in a simple array for quick lookup. Locality is determined ++ * by the closest sched_domain that CPUs are separated by. CPUs with ++ * shared cache in SMT and MC are treated as local. Separate CPUs ++ * (within the same package or physically) within the same node are ++ * treated as not local. CPUs not even in the same domain (different ++ * nodes) are treated as very distant. ++ */ ++ for_each_online_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ ++ /* First check if this cpu is in the same node */ ++ for_each_domain(cpu, sd) { ++ if (sd->level > SD_LV_MC) ++ continue; ++ if (rqshare != RQSHARE_ALL) ++ leader = NULL; ++ /* Set locality to local node if not already found lower */ ++ for_each_cpu(other_cpu, sched_domain_span(sd)) { ++ if (rqshare >= RQSHARE_SMP) { ++ other_rq = cpu_rq(other_cpu); ++ ++ /* Set the smp_leader to the first CPU */ ++ if (!leader) ++ leader = rq; ++ other_rq->smp_leader = leader; ++ } ++ ++ if (rq->cpu_locality[other_cpu] > 3) ++ rq->cpu_locality[other_cpu] = 3; ++ } ++ } ++ ++ /* ++ * Each runqueue has its own function in case it doesn't have ++ * siblings of its own allowing mixed topologies. ++ */ ++#ifdef CONFIG_SCHED_MC ++ leader = NULL; ++ if (cpumask_weight(core_cpumask(cpu)) > 1) { ++ cpumask_copy(&rq->core_mask, core_cpumask(cpu)); ++ cpumask_clear_cpu(cpu, &rq->core_mask); ++ for_each_cpu(other_cpu, core_cpumask(cpu)) { ++ if (rqshare == RQSHARE_MC) { ++ other_rq = cpu_rq(other_cpu); ++ ++ /* Set the mc_leader to the first CPU */ ++ if (!leader) ++ leader = rq; ++ other_rq->mc_leader = leader; ++ } ++ if (rq->cpu_locality[other_cpu] > 2) ++ rq->cpu_locality[other_cpu] = 2; ++ } ++ rq->cache_idle = cache_cpu_idle; ++ } ++#endif ++#ifdef CONFIG_SCHED_SMT ++ leader = NULL; ++ if (cpumask_weight(thread_cpumask(cpu)) > 1) { ++ cpumask_copy(&rq->thread_mask, thread_cpumask(cpu)); ++ cpumask_clear_cpu(cpu, &rq->thread_mask); ++ for_each_cpu(other_cpu, thread_cpumask(cpu)) { ++ if (rqshare == RQSHARE_SMT) { ++ other_rq = cpu_rq(other_cpu); ++ ++ /* Set the smt_leader to the first CPU */ ++ if (!leader) ++ leader = rq; ++ other_rq->smt_leader = leader; ++ } ++ if (rq->cpu_locality[other_cpu] > 1) ++ rq->cpu_locality[other_cpu] = 1; ++ } ++ rq->siblings_idle = siblings_cpu_idle; ++ smt_threads = true; ++ } ++#endif ++ } ++ ++#ifdef CONFIG_SMT_NICE ++ if (smt_threads) { ++ check_siblings = &check_smt_siblings; ++ wake_siblings = &wake_smt_siblings; ++ smt_schedule = &smt_should_schedule; ++ } ++#endif ++ unlock_all_rqs(); ++ mutex_unlock(&sched_domains_mutex); ++ ++ for_each_online_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ ++ for_each_online_cpu(other_cpu) { ++ if (other_cpu <= cpu) ++ continue; ++ printk(KERN_DEBUG "MuQSS locality CPU %d to %d: %d\n", cpu, other_cpu, rq->cpu_locality[other_cpu]); ++ } ++ } ++ ++ for_each_online_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ leader = rq->smp_leader; ++ ++ rq_lock(rq); ++ if (leader && rq != leader) { ++ printk(KERN_INFO "Sharing SMP runqueue from CPU %d to CPU %d\n", ++ leader->cpu, rq->cpu); ++ kfree(rq->node); ++ kfree(rq->sl); ++ kfree(rq->lock); ++ rq->node = leader->node; ++ rq->sl = leader->sl; ++ rq->lock = leader->lock; ++ barrier(); ++ /* To make up for not unlocking the freed runlock */ ++ preempt_enable(); ++ } else ++ rq_unlock(rq); ++ } ++ ++#ifdef CONFIG_SCHED_MC ++ for_each_online_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ leader = rq->mc_leader; ++ ++ rq_lock(rq); ++ if (leader && rq != leader) { ++ printk(KERN_INFO "Sharing MC runqueue from CPU %d to CPU %d\n", ++ leader->cpu, rq->cpu); ++ kfree(rq->node); ++ kfree(rq->sl); ++ kfree(rq->lock); ++ rq->node = leader->node; ++ rq->sl = leader->sl; ++ rq->lock = leader->lock; ++ barrier(); ++ /* To make up for not unlocking the freed runlock */ ++ preempt_enable(); ++ } else ++ rq_unlock(rq); ++ } ++#endif /* CONFIG_SCHED_MC */ ++ ++#ifdef CONFIG_SCHED_SMT ++ for_each_online_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ ++ leader = rq->smt_leader; ++ ++ rq_lock(rq); ++ if (leader && rq != leader) { ++ printk(KERN_INFO "Sharing SMT runqueue from CPU %d to CPU %d\n", ++ leader->cpu, rq->cpu); ++ kfree(rq->node); ++ kfree(rq->sl); ++ kfree(rq->lock); ++ rq->node = leader->node; ++ rq->sl = leader->sl; ++ rq->lock = leader->lock; ++ barrier(); ++ /* To make up for not unlocking the freed runlock */ ++ preempt_enable(); ++ } else ++ rq_unlock(rq); ++ } ++#endif /* CONFIG_SCHED_SMT */ ++ ++ local_irq_enable(); ++ ++ total_runqueues = 0; ++ for_each_possible_cpu(cpu) { ++ int locality, total_rqs = 0, total_cpus = 0; ++ ++ rq = cpu_rq(cpu); ++ if ( ++#ifdef CONFIG_SCHED_MC ++ (rq->mc_leader == rq) && ++#endif ++#ifdef CONFIG_SCHED_SMT ++ (rq->smt_leader == rq) && ++#endif ++ (rq->smp_leader == rq)) ++ total_runqueues++; ++ ++ for (locality = 0; locality <= 4; locality++) { ++ int test_cpu; ++ ++ for_each_possible_cpu(test_cpu) { ++ /* Work from each CPU up instead of every rq ++ * starting at CPU 0. Orders are better matched ++ * if the top half CPUs count down instead. */ ++ if (cpu < num_possible_cpus() / 2) ++ other_cpu = cpu + test_cpu; ++ else ++ other_cpu = cpu - test_cpu; ++ if (other_cpu < 0) ++ other_cpu += num_possible_cpus(); ++ else ++ other_cpu %= num_possible_cpus(); ++ other_rq = cpu_rq(other_cpu); ++ ++ if (rq->cpu_locality[other_cpu] == locality) { ++ rq->cpu_order[total_cpus++] = other_rq; ++ if ( ++ ++#ifdef CONFIG_SCHED_MC ++ (other_rq->mc_leader == other_rq) && ++#endif ++#ifdef CONFIG_SCHED_SMT ++ (other_rq->smt_leader == other_rq) && ++#endif ++ (other_rq->smp_leader == other_rq)) ++ rq->rq_order[total_rqs++] = other_rq; ++ } ++ } ++ } ++ } ++ ++ for_each_possible_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ for (i = 0; i < total_runqueues; i++) { ++ printk(KERN_DEBUG "CPU %d RQ order %d RQ %d\n", cpu, i, ++ rq->rq_order[i]->cpu); ++ } ++ } ++ for_each_possible_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ for (i = 0; i < num_possible_cpus(); i++) { ++ printk(KERN_DEBUG "CPU %d CPU order %d RQ %d\n", cpu, i, ++ rq->cpu_order[i]->cpu); ++ } ++ } ++ switch (rqshare) { ++ case RQSHARE_ALL: ++ /* This should only ever read 1 */ ++ printk(KERN_INFO "MuQSS runqueue share type ALL total runqueues: %d\n", ++ total_runqueues); ++ break; ++ case RQSHARE_SMP: ++ printk(KERN_INFO "MuQSS runqueue share type SMP total runqueues: %d\n", ++ total_runqueues); ++ break; ++ case RQSHARE_MC: ++ printk(KERN_INFO "MuQSS runqueue share type MC total runqueues: %d\n", ++ total_runqueues); ++ break; ++ case RQSHARE_SMT: ++ printk(KERN_INFO "MuQSS runqueue share type SMT total runqueues: %d\n", ++ total_runqueues); ++ break; ++ case RQSHARE_NONE: ++ printk(KERN_INFO "MuQSS runqueue share type NONE total runqueues: %d\n", ++ total_runqueues); ++ break; ++ } ++ ++ sched_smp_initialized = true; ++} ++#else ++void __init sched_init_smp(void) ++{ ++ sched_smp_initialized = true; ++} ++#endif /* CONFIG_SMP */ ++ ++int in_sched_functions(unsigned long addr) ++{ ++ return in_lock_functions(addr) || ++ (addr >= (unsigned long)__sched_text_start ++ && addr < (unsigned long)__sched_text_end); ++} ++ ++#ifdef CONFIG_CGROUP_SCHED ++/* task group related information */ ++struct task_group { ++ struct cgroup_subsys_state css; ++ ++ struct rcu_head rcu; ++ struct list_head list; ++ ++ struct task_group *parent; ++ struct list_head siblings; ++ struct list_head children; ++}; ++ ++/* ++ * Default task group. ++ * Every task in system belongs to this group at bootup. ++ */ ++struct task_group root_task_group; ++LIST_HEAD(task_groups); ++ ++/* Cacheline aligned slab cache for task_group */ ++static struct kmem_cache *task_group_cache __read_mostly; ++#endif /* CONFIG_CGROUP_SCHED */ ++ ++void __init sched_init(void) ++{ ++#ifdef CONFIG_SMP ++ int cpu_ids; ++#endif ++ int i; ++ struct rq *rq; ++ ++ wait_bit_init(); ++ ++ prio_ratios[0] = 128; ++ for (i = 1 ; i < NICE_WIDTH ; i++) ++ prio_ratios[i] = prio_ratios[i - 1] * 11 / 10; ++ ++ skiplist_node_init(&init_task.node); ++ ++#ifdef CONFIG_SMP ++ init_defrootdomain(); ++ cpumask_clear(&cpu_idle_map); ++#else ++ uprq = &per_cpu(runqueues, 0); ++#endif ++ ++#ifdef CONFIG_CGROUP_SCHED ++ task_group_cache = KMEM_CACHE(task_group, 0); ++ ++ list_add(&root_task_group.list, &task_groups); ++ INIT_LIST_HEAD(&root_task_group.children); ++ INIT_LIST_HEAD(&root_task_group.siblings); ++#endif /* CONFIG_CGROUP_SCHED */ ++ for_each_possible_cpu(i) { ++ rq = cpu_rq(i); ++ rq->node = kmalloc(sizeof(skiplist_node), GFP_ATOMIC); ++ skiplist_init(rq->node); ++ rq->sl = new_skiplist(rq->node); ++ rq->lock = kmalloc(sizeof(raw_spinlock_t), GFP_ATOMIC); ++ raw_spin_lock_init(rq->lock); ++ rq->nr_running = 0; ++ rq->nr_uninterruptible = 0; ++ rq->nr_switches = 0; ++ rq->clock = rq->old_clock = rq->last_niffy = rq->niffies = 0; ++ rq->last_jiffy = jiffies; ++ rq->user_ns = rq->nice_ns = rq->softirq_ns = rq->system_ns = ++ rq->iowait_ns = rq->idle_ns = 0; ++ rq->dither = 0; ++ set_rq_task(rq, &init_task); ++ rq->iso_ticks = 0; ++ rq->iso_refractory = false; ++#ifdef CONFIG_SMP ++ rq->smp_leader = rq; ++#ifdef CONFIG_SCHED_MC ++ rq->mc_leader = rq; ++#endif ++#ifdef CONFIG_SCHED_SMT ++ rq->smt_leader = rq; ++#endif ++ rq->sd = NULL; ++ rq->rd = NULL; ++ rq->online = false; ++ rq->cpu = i; ++ rq_attach_root(rq, &def_root_domain); ++#endif ++ init_rq_hrexpiry(rq); ++ atomic_set(&rq->nr_iowait, 0); ++ } ++ ++#ifdef CONFIG_SMP ++ cpu_ids = i; ++ /* ++ * Set the base locality for cpu cache distance calculation to ++ * "distant" (3). Make sure the distance from a CPU to itself is 0. ++ */ ++ for_each_possible_cpu(i) { ++ int j; ++ ++ rq = cpu_rq(i); ++#ifdef CONFIG_SCHED_SMT ++ rq->siblings_idle = sole_cpu_idle; ++#endif ++#ifdef CONFIG_SCHED_MC ++ rq->cache_idle = sole_cpu_idle; ++#endif ++ rq->cpu_locality = kmalloc(cpu_ids * sizeof(int *), GFP_ATOMIC); ++ for_each_possible_cpu(j) { ++ if (i == j) ++ rq->cpu_locality[j] = 0; ++ else ++ rq->cpu_locality[j] = 4; ++ } ++ rq->rq_order = kmalloc(cpu_ids * sizeof(struct rq *), GFP_ATOMIC); ++ rq->cpu_order = kmalloc(cpu_ids * sizeof(struct rq *), GFP_ATOMIC); ++ rq->rq_order[0] = rq->cpu_order[0] = rq; ++ for (j = 1; j < cpu_ids; j++) ++ rq->rq_order[j] = rq->cpu_order[j] = cpu_rq(j); ++ } ++#endif ++ ++ /* ++ * The boot idle thread does lazy MMU switching as well: ++ */ ++ mmgrab(&init_mm); ++ enter_lazy_tlb(&init_mm, current); ++ ++ /* ++ * Make us the idle thread. Technically, schedule() should not be ++ * called from this thread, however somewhere below it might be, ++ * but because we are the idle thread, we just pick up running again ++ * when this runqueue becomes "idle". ++ */ ++ init_idle(current, smp_processor_id()); ++ ++#ifdef CONFIG_SMP ++ idle_thread_set_boot_cpu(); ++#endif /* SMP */ ++ ++ init_schedstats(); ++ ++ psi_init(); ++} ++ ++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP ++static inline int preempt_count_equals(int preempt_offset) ++{ ++ int nested = preempt_count() + rcu_preempt_depth(); ++ ++ return (nested == preempt_offset); ++} ++ ++void __might_sleep(const char *file, int line, int preempt_offset) ++{ ++ /* ++ * Blocking primitives will set (and therefore destroy) current->state, ++ * since we will exit with TASK_RUNNING make sure we enter with it, ++ * otherwise we will destroy state. ++ */ ++ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, ++ "do not call blocking ops when !TASK_RUNNING; " ++ "state=%lx set at [<%p>] %pS\n", ++ current->state, ++ (void *)current->task_state_change, ++ (void *)current->task_state_change); ++ ++ ___might_sleep(file, line, preempt_offset); ++} ++EXPORT_SYMBOL(__might_sleep); ++ ++void __cant_sleep(const char *file, int line, int preempt_offset) ++{ ++ static unsigned long prev_jiffy; ++ ++ if (irqs_disabled()) ++ return; ++ ++ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) ++ return; ++ ++ if (preempt_count() > preempt_offset) ++ return; ++ ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); ++ printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), ++ current->pid, current->comm); ++ ++ debug_show_held_locks(current); ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++EXPORT_SYMBOL_GPL(__cant_sleep); ++ ++void ___might_sleep(const char *file, int line, int preempt_offset) ++{ ++ /* Ratelimiting timestamp: */ ++ static unsigned long prev_jiffy; ++ ++ unsigned long preempt_disable_ip; ++ ++ /* WARN_ON_ONCE() by default, no rate limit required: */ ++ rcu_sleep_check(); ++ ++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && ++ !is_idle_task(current)) || ++ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || ++ oops_in_progress) ++ return; ++ ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ /* Save this before calling printk(), since that will clobber it: */ ++ preempt_disable_ip = get_preempt_disable_ip(current); ++ ++ printk(KERN_ERR ++ "BUG: sleeping function called from invalid context at %s:%d\n", ++ file, line); ++ printk(KERN_ERR ++ "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), ++ current->pid, current->comm); ++ ++ if (task_stack_end_corrupted(current)) ++ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); ++ ++ debug_show_held_locks(current); ++ if (irqs_disabled()) ++ print_irqtrace_events(current); ++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) ++ && !preempt_count_equals(preempt_offset)) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(preempt_disable_ip); ++ pr_cont("\n"); ++ } ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++EXPORT_SYMBOL(___might_sleep); ++#endif ++ ++#ifdef CONFIG_MAGIC_SYSRQ ++static inline void normalise_rt_tasks(void) ++{ ++ struct task_struct *g, *p; ++ struct rq_flags rf; ++ struct rq *rq; ++ ++ read_lock(&tasklist_lock); ++ for_each_process_thread(g, p) { ++ /* ++ * Only normalize user tasks: ++ */ ++ if (p->flags & PF_KTHREAD) ++ continue; ++ ++ if (!rt_task(p) && !iso_task(p)) ++ continue; ++ ++ rq = task_rq_lock(p, &rf); ++ __setscheduler(p, rq, SCHED_NORMAL, 0, false); ++ task_rq_unlock(rq, p, &rf); ++ } ++ read_unlock(&tasklist_lock); ++} ++ ++void normalize_rt_tasks(void) ++{ ++ normalise_rt_tasks(); ++} ++#endif /* CONFIG_MAGIC_SYSRQ */ ++ ++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) ++/* ++ * These functions are only useful for the IA64 MCA handling, or kdb. ++ * ++ * They can only be called when the whole system has been ++ * stopped - every CPU needs to be quiescent, and no scheduling ++ * activity can take place. Using them for anything else would ++ * be a serious bug, and as a result, they aren't even visible ++ * under any other configuration. ++ */ ++ ++/** ++ * curr_task - return the current task for a given CPU. ++ * @cpu: the processor in question. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ * ++ * Return: The current task for @cpu. ++ */ ++struct task_struct *curr_task(int cpu) ++{ ++ return cpu_curr(cpu); ++} ++ ++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ ++ ++#ifdef CONFIG_IA64 ++/** ++ * set_curr_task - set the current task for a given CPU. ++ * @cpu: the processor in question. ++ * @p: the task pointer to set. ++ * ++ * Description: This function must only be used when non-maskable interrupts ++ * are serviced on a separate stack. It allows the architecture to switch the ++ * notion of the current task on a CPU in a non-blocking manner. This function ++ * must be called with all CPU's synchronised, and interrupts disabled, the ++ * and caller must save the original value of the current task (see ++ * curr_task() above) and restore that value before reenabling interrupts and ++ * re-starting the system. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ */ ++void ia64_set_curr_task(int cpu, struct task_struct *p) ++{ ++ cpu_curr(cpu) = p; ++} ++ ++#endif ++ ++void init_idle_bootup_task(struct task_struct *idle) ++{} ++ ++#ifdef CONFIG_SCHED_DEBUG ++__read_mostly bool sched_debug_enabled; ++ ++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, ++ struct seq_file *m) ++{} ++ ++void proc_sched_set_task(struct task_struct *p) ++{} ++#endif ++ ++#ifdef CONFIG_CGROUP_SCHED ++static void sched_free_group(struct task_group *tg) ++{ ++ kmem_cache_free(task_group_cache, tg); ++} ++ ++/* allocate runqueue etc for a new task group */ ++struct task_group *sched_create_group(struct task_group *parent) ++{ ++ struct task_group *tg; ++ ++ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); ++ if (!tg) ++ return ERR_PTR(-ENOMEM); ++ ++ return tg; ++} ++ ++void sched_online_group(struct task_group *tg, struct task_group *parent) ++{ ++} ++ ++/* rcu callback to free various structures associated with a task group */ ++static void sched_free_group_rcu(struct rcu_head *rhp) ++{ ++ /* Now it should be safe to free those cfs_rqs */ ++ sched_free_group(container_of(rhp, struct task_group, rcu)); ++} ++ ++void sched_destroy_group(struct task_group *tg) ++{ ++ /* Wait for possible concurrent references to cfs_rqs complete */ ++ call_rcu(&tg->rcu, sched_free_group_rcu); ++} ++ ++void sched_offline_group(struct task_group *tg) ++{ ++} ++ ++static inline struct task_group *css_tg(struct cgroup_subsys_state *css) ++{ ++ return css ? container_of(css, struct task_group, css) : NULL; ++} ++ ++static struct cgroup_subsys_state * ++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) ++{ ++ struct task_group *parent = css_tg(parent_css); ++ struct task_group *tg; ++ ++ if (!parent) { ++ /* This is early initialization for the top cgroup */ ++ return &root_task_group.css; ++ } ++ ++ tg = sched_create_group(parent); ++ if (IS_ERR(tg)) ++ return ERR_PTR(-ENOMEM); ++ return &tg->css; ++} ++ ++/* Expose task group only after completing cgroup initialization */ ++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ struct task_group *parent = css_tg(css->parent); ++ ++ if (parent) ++ sched_online_group(tg, parent); ++ return 0; ++} ++ ++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ ++ sched_offline_group(tg); ++} ++ ++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ ++ /* ++ * Relies on the RCU grace period between css_released() and this. ++ */ ++ sched_free_group(tg); ++} ++ ++static void cpu_cgroup_fork(struct task_struct *task) ++{ ++} ++ ++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) ++{ ++ return 0; ++} ++ ++static void cpu_cgroup_attach(struct cgroup_taskset *tset) ++{ ++} ++ ++static struct cftype cpu_legacy_files[] = { ++ { } /* Terminate */ ++}; ++ ++static struct cftype cpu_files[] = { ++ { } /* terminate */ ++}; ++ ++static int cpu_extra_stat_show(struct seq_file *sf, ++ struct cgroup_subsys_state *css) ++{ ++ return 0; ++} ++ ++struct cgroup_subsys cpu_cgrp_subsys = { ++ .css_alloc = cpu_cgroup_css_alloc, ++ .css_online = cpu_cgroup_css_online, ++ .css_released = cpu_cgroup_css_released, ++ .css_free = cpu_cgroup_css_free, ++ .css_extra_stat_show = cpu_extra_stat_show, ++ .fork = cpu_cgroup_fork, ++ .can_attach = cpu_cgroup_can_attach, ++ .attach = cpu_cgroup_attach, ++ .legacy_cftypes = cpu_files, ++ .legacy_cftypes = cpu_legacy_files, ++ .dfl_cftypes = cpu_files, ++ .early_init = true, ++ .threaded = true, ++}; ++#endif /* CONFIG_CGROUP_SCHED */ ++ ++#undef CREATE_TRACE_POINTS +diff -Nur a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h +--- a/kernel/sched/MuQSS.h 1970-01-01 01:00:00.000000000 +0100 ++++ b/kernel/sched/MuQSS.h 2019-07-07 09:17:41.261241813 +0100 +@@ -0,0 +1,957 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef MUQSS_SCHED_H ++#define MUQSS_SCHED_H ++ ++#include <linux/sched/clock.h> ++#include <linux/sched/cpufreq.h> ++#include <linux/sched/cputime.h> ++#include <linux/sched/debug.h> ++#include <linux/sched/hotplug.h> ++#include <linux/sched/init.h> ++#include <linux/sched/isolation.h> ++#include <linux/sched/mm.h> ++#include <linux/sched/nohz.h> ++#include <linux/sched/signal.h> ++#include <linux/sched/smt.h> ++#include <linux/sched/stat.h> ++#include <linux/sched/task.h> ++#include <linux/sched/task_stack.h> ++#include <linux/sched/topology.h> ++#include <linux/sched/wake_q.h> ++ ++#include <uapi/linux/sched/types.h> ++ ++#include <linux/cgroup.h> ++#include <linux/cpufreq.h> ++#include <linux/cpuidle.h> ++#include <linux/cpuset.h> ++#include <linux/ctype.h> ++#include <linux/energy_model.h> ++#include <linux/freezer.h> ++#include <linux/interrupt.h> ++#include <linux/kernel_stat.h> ++#include <linux/kthread.h> ++#include <linux/membarrier.h> ++#include <linux/livepatch.h> ++#include <linux/proc_fs.h> ++#include <linux/psi.h> ++#include <linux/sched.h> ++#include <linux/slab.h> ++#include <linux/skip_list.h> ++#include <linux/stop_machine.h> ++#include <linux/suspend.h> ++#include <linux/swait.h> ++#include <linux/syscalls.h> ++#include <linux/tick.h> ++#include <linux/tsacct_kern.h> ++#include <linux/u64_stats_sync.h> ++ ++#ifdef CONFIG_PARAVIRT ++#include <asm/paravirt.h> ++#endif ++ ++#include "cpupri.h" ++ ++#ifdef CONFIG_SCHED_DEBUG ++# define SCHED_WARN_ON(x) WARN_ONCE(x, #x) ++#else ++# define SCHED_WARN_ON(x) ((void)(x)) ++#endif ++ ++/* task_struct::on_rq states: */ ++#define TASK_ON_RQ_QUEUED 1 ++#define TASK_ON_RQ_MIGRATING 2 ++ ++struct rq; ++ ++#ifdef CONFIG_SMP ++ ++static inline bool sched_asym_prefer(int a, int b) ++{ ++ return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); ++} ++ ++struct perf_domain { ++ struct em_perf_domain *em_pd; ++ struct perf_domain *next; ++ struct rcu_head rcu; ++}; ++ ++/* Scheduling group status flags */ ++#define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ ++#define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ ++ ++/* ++ * We add the notion of a root-domain which will be used to define per-domain ++ * variables. Each exclusive cpuset essentially defines an island domain by ++ * fully partitioning the member cpus from any other cpuset. Whenever a new ++ * exclusive cpuset is created, we also create and attach a new root-domain ++ * object. ++ * ++ */ ++struct root_domain { ++ atomic_t refcount; ++ atomic_t rto_count; ++ struct rcu_head rcu; ++ cpumask_var_t span; ++ cpumask_var_t online; ++ ++ /* ++ * Indicate pullable load on at least one CPU, e.g: ++ * - More than one runnable task ++ * - Running task is misfit ++ */ ++ int overload; ++ ++ /* Indicate one or more cpus over-utilized (tipping point) */ ++ int overutilized; ++ ++ /* ++ * The bit corresponding to a CPU gets set here if such CPU has more ++ * than one runnable -deadline task (as it is below for RT tasks). ++ */ ++ cpumask_var_t dlo_mask; ++ atomic_t dlo_count; ++ /* Replace unused CFS structures with void */ ++ //struct dl_bw dl_bw; ++ //struct cpudl cpudl; ++ void *dl_bw; ++ void *cpudl; ++ ++ /* ++ * The "RT overload" flag: it gets set if a CPU has more than ++ * one runnable RT task. ++ */ ++ cpumask_var_t rto_mask; ++ //struct cpupri cpupri; ++ void *cpupri; ++ ++ unsigned long max_cpu_capacity; ++ ++ /* ++ * NULL-terminated list of performance domains intersecting with the ++ * CPUs of the rd. Protected by RCU. ++ */ ++ struct perf_domain *pd; ++}; ++ ++extern struct root_domain def_root_domain; ++extern struct mutex sched_domains_mutex; ++ ++extern void init_defrootdomain(void); ++extern int sched_init_domains(const struct cpumask *cpu_map); ++extern void rq_attach_root(struct rq *rq, struct root_domain *rd); ++ ++static inline void cpupri_cleanup(void __maybe_unused *cpupri) ++{ ++} ++ ++static inline void cpudl_cleanup(void __maybe_unused *cpudl) ++{ ++} ++ ++static inline void init_dl_bw(void __maybe_unused *dl_bw) ++{ ++} ++ ++static inline int cpudl_init(void __maybe_unused *dl_bw) ++{ ++ return 0; ++} ++ ++static inline int cpupri_init(void __maybe_unused *cpupri) ++{ ++ return 0; ++} ++#endif /* CONFIG_SMP */ ++ ++/* ++ * This is the main, per-CPU runqueue data structure. ++ * This data should only be modified by the local cpu. ++ */ ++struct rq { ++ raw_spinlock_t *lock; ++ raw_spinlock_t *orig_lock; ++ ++ struct task_struct *curr, *idle, *stop; ++ struct mm_struct *prev_mm; ++ ++ unsigned int nr_running; ++ /* ++ * This is part of a global counter where only the total sum ++ * over all CPUs matters. A task can increase this counter on ++ * one CPU and if it got migrated afterwards it may decrease ++ * it on another CPU. Always updated under the runqueue lock: ++ */ ++ unsigned long nr_uninterruptible; ++ u64 nr_switches; ++ ++ /* Stored data about rq->curr to work outside rq lock */ ++ u64 rq_deadline; ++ int rq_prio; ++ ++ /* Best queued id for use outside lock */ ++ u64 best_key; ++ ++ unsigned long last_scheduler_tick; /* Last jiffy this RQ ticked */ ++ unsigned long last_jiffy; /* Last jiffy this RQ updated rq clock */ ++ u64 niffies; /* Last time this RQ updated rq clock */ ++ u64 last_niffy; /* Last niffies as updated by local clock */ ++ u64 last_jiffy_niffies; /* Niffies @ last_jiffy */ ++ ++ u64 load_update; /* When we last updated load */ ++ unsigned long load_avg; /* Rolling load average */ ++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ ++ u64 irq_load_update; /* When we last updated IRQ load */ ++ unsigned long irq_load_avg; /* Rolling IRQ load average */ ++#endif ++#ifdef CONFIG_SMT_NICE ++ struct mm_struct *rq_mm; ++ int rq_smt_bias; /* Policy/nice level bias across smt siblings */ ++#endif ++ /* Accurate timekeeping data */ ++ unsigned long user_ns, nice_ns, irq_ns, softirq_ns, system_ns, ++ iowait_ns, idle_ns; ++ atomic_t nr_iowait; ++ ++ skiplist_node *node; ++ skiplist *sl; ++#ifdef CONFIG_SMP ++ struct task_struct *preempt; /* Preempt triggered on this task */ ++ struct task_struct *preempting; /* Hint only, what task is preempting */ ++ ++ int cpu; /* cpu of this runqueue */ ++ bool online; ++ ++ struct root_domain *rd; ++ struct sched_domain *sd; ++ ++ unsigned long cpu_capacity_orig; ++ ++ int *cpu_locality; /* CPU relative cache distance */ ++ struct rq **rq_order; /* Shared RQs ordered by relative cache distance */ ++ struct rq **cpu_order; /* RQs of discrete CPUs ordered by distance */ ++ ++ struct rq *smp_leader; /* First physical CPU per node */ ++#ifdef CONFIG_SCHED_SMT ++ struct rq *smt_leader; /* First logical CPU in SMT siblings */ ++ cpumask_t thread_mask; ++ bool (*siblings_idle)(struct rq *rq); ++ /* See if all smt siblings are idle */ ++#endif /* CONFIG_SCHED_SMT */ ++#ifdef CONFIG_SCHED_MC ++ struct rq *mc_leader; /* First logical CPU in MC siblings */ ++ cpumask_t core_mask; ++ bool (*cache_idle)(struct rq *rq); ++ /* See if all cache siblings are idle */ ++#endif /* CONFIG_SCHED_MC */ ++#endif /* CONFIG_SMP */ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ u64 prev_irq_time; ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++#ifdef CONFIG_PARAVIRT ++ u64 prev_steal_time; ++#endif /* CONFIG_PARAVIRT */ ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ u64 prev_steal_time_rq; ++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */ ++ ++ u64 clock, old_clock, last_tick; ++ /* Ensure that all clocks are in the same cache line */ ++ u64 clock_task ____cacheline_aligned; ++ int dither; ++ ++ int iso_ticks; ++ bool iso_refractory; ++ ++#ifdef CONFIG_HIGH_RES_TIMERS ++ struct hrtimer hrexpiry_timer; ++#endif ++ ++ int rt_nr_running; /* Number real time tasks running */ ++#ifdef CONFIG_SCHEDSTATS ++ ++ /* latency stats */ ++ struct sched_info rq_sched_info; ++ unsigned long long rq_cpu_time; ++ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ ++ ++ /* sys_sched_yield() stats */ ++ unsigned int yld_count; ++ ++ /* schedule() stats */ ++ unsigned int sched_switch; ++ unsigned int sched_count; ++ unsigned int sched_goidle; ++ ++ /* try_to_wake_up() stats */ ++ unsigned int ttwu_count; ++ unsigned int ttwu_local; ++#endif /* CONFIG_SCHEDSTATS */ ++ ++#ifdef CONFIG_SMP ++ struct llist_head wake_list; ++#endif ++ ++#ifdef CONFIG_CPU_IDLE ++ /* Must be inspected within a rcu lock section */ ++ struct cpuidle_state *idle_state; ++#endif ++}; ++ ++struct rq_flags { ++ unsigned long flags; ++}; ++ ++#ifdef CONFIG_SMP ++struct rq *cpu_rq(int cpu); ++#endif ++ ++#ifndef CONFIG_SMP ++extern struct rq *uprq; ++#define cpu_rq(cpu) (uprq) ++#define this_rq() (uprq) ++#define raw_rq() (uprq) ++#define task_rq(p) (uprq) ++#define cpu_curr(cpu) ((uprq)->curr) ++#else /* CONFIG_SMP */ ++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++#define this_rq() this_cpu_ptr(&runqueues) ++#define raw_rq() raw_cpu_ptr(&runqueues) ++#define task_rq(p) cpu_rq(task_cpu(p)) ++#endif /* CONFIG_SMP */ ++ ++static inline int task_current(struct rq *rq, struct task_struct *p) ++{ ++ return rq->curr == p; ++} ++ ++static inline int task_running(struct rq *rq, struct task_struct *p) ++{ ++#ifdef CONFIG_SMP ++ return p->on_cpu; ++#else ++ return task_current(rq, p); ++#endif ++} ++ ++static inline int task_on_rq_queued(struct task_struct *p) ++{ ++ return p->on_rq == TASK_ON_RQ_QUEUED; ++} ++ ++static inline int task_on_rq_migrating(struct task_struct *p) ++{ ++ return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; ++} ++ ++static inline void rq_lock(struct rq *rq) ++ __acquires(rq->lock) ++{ ++ raw_spin_lock(rq->lock); ++} ++ ++static inline void rq_unlock(struct rq *rq) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock(rq->lock); ++} ++ ++static inline void rq_lock_irq(struct rq *rq) ++ __acquires(rq->lock) ++{ ++ raw_spin_lock_irq(rq->lock); ++} ++ ++static inline void rq_unlock_irq(struct rq *rq, struct rq_flags __always_unused *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock_irq(rq->lock); ++} ++ ++static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ raw_spin_lock_irqsave(rq->lock, rf->flags); ++} ++ ++static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock_irqrestore(rq->lock, rf->flags); ++} ++ ++static inline struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(p->pi_lock) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ while (42) { ++ raw_spin_lock_irqsave(&p->pi_lock, rf->flags); ++ rq = task_rq(p); ++ raw_spin_lock(rq->lock); ++ if (likely(rq == task_rq(p))) ++ break; ++ raw_spin_unlock(rq->lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); ++ } ++ return rq; ++} ++ ++static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) ++ __releases(rq->lock) ++ __releases(p->pi_lock) ++{ ++ rq_unlock(rq); ++ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); ++} ++ ++static inline struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags __always_unused *rf) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ lockdep_assert_held(&p->pi_lock); ++ ++ while (42) { ++ rq = task_rq(p); ++ raw_spin_lock(rq->lock); ++ if (likely(rq == task_rq(p))) ++ break; ++ raw_spin_unlock(rq->lock); ++ } ++ return rq; ++} ++ ++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags __always_unused *rf) ++{ ++ rq_unlock(rq); ++} ++ ++static inline struct rq * ++this_rq_lock_irq(struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ local_irq_disable(); ++ rq = this_rq(); ++ rq_lock(rq); ++ return rq; ++} ++ ++/* ++ * {de,en}queue flags: Most not used on MuQSS. ++ * ++ * DEQUEUE_SLEEP - task is no longer runnable ++ * ENQUEUE_WAKEUP - task just became runnable ++ * ++ * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks ++ * are in a known state which allows modification. Such pairs ++ * should preserve as much state as possible. ++ * ++ * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location ++ * in the runqueue. ++ * ++ * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) ++ * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) ++ * ENQUEUE_MIGRATED - the task was migrated during wakeup ++ * ++ */ ++ ++#define DEQUEUE_SLEEP 0x01 ++#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */ ++ ++#define ENQUEUE_WAKEUP 0x01 ++#define ENQUEUE_RESTORE 0x02 ++ ++#ifdef CONFIG_SMP ++#define ENQUEUE_MIGRATED 0x40 ++#else ++#define ENQUEUE_MIGRATED 0x00 ++#endif ++ ++static inline u64 __rq_clock_broken(struct rq *rq) ++{ ++ return READ_ONCE(rq->clock); ++} ++ ++static inline u64 rq_clock(struct rq *rq) ++{ ++ lockdep_assert_held(rq->lock); ++ ++ return rq->clock; ++} ++ ++static inline u64 rq_clock_task(struct rq *rq) ++{ ++ lockdep_assert_held(rq->lock); ++ ++ return rq->clock_task; ++} ++ ++#ifdef CONFIG_NUMA ++enum numa_topology_type { ++ NUMA_DIRECT, ++ NUMA_GLUELESS_MESH, ++ NUMA_BACKPLANE, ++}; ++extern enum numa_topology_type sched_numa_topology_type; ++extern int sched_max_numa_distance; ++extern bool find_numa_distance(int distance); ++ ++extern void sched_init_numa(void); ++extern void sched_domains_numa_masks_set(unsigned int cpu); ++extern void sched_domains_numa_masks_clear(unsigned int cpu); ++#else ++static inline void sched_init_numa(void) { } ++static inline void sched_domains_numa_masks_set(unsigned int cpu) { } ++static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } ++#endif ++ ++extern struct mutex sched_domains_mutex; ++extern struct static_key_false sched_schedstats; ++ ++#define rcu_dereference_check_sched_domain(p) \ ++ rcu_dereference_check((p), \ ++ lockdep_is_held(&sched_domains_mutex)) ++ ++#ifdef CONFIG_SMP ++ ++/* ++ * The domain tree (rq->sd) is protected by RCU's quiescent state transition. ++ * See destroy_sched_domains: call_rcu for details. ++ * ++ * The domain tree of any CPU may only be accessed from within ++ * preempt-disabled sections. ++ */ ++#define for_each_domain(cpu, __sd) \ ++ for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ ++ __sd; __sd = __sd->parent) ++ ++#define for_each_lower_domain(sd) for (; sd; sd = sd->child) ++ ++/** ++ * highest_flag_domain - Return highest sched_domain containing flag. ++ * @cpu: The cpu whose highest level of sched domain is to ++ * be returned. ++ * @flag: The flag to check for the highest sched_domain ++ * for the given cpu. ++ * ++ * Returns the highest sched_domain of a cpu which contains the given flag. ++ */ ++static inline struct sched_domain *highest_flag_domain(int cpu, int flag) ++{ ++ struct sched_domain *sd, *hsd = NULL; ++ ++ for_each_domain(cpu, sd) { ++ if (!(sd->flags & flag)) ++ break; ++ hsd = sd; ++ } ++ ++ return hsd; ++} ++ ++static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) ++{ ++ struct sched_domain *sd; ++ ++ for_each_domain(cpu, sd) { ++ if (sd->flags & flag) ++ break; ++ } ++ ++ return sd; ++} ++ ++DECLARE_PER_CPU(struct sched_domain *, sd_llc); ++DECLARE_PER_CPU(int, sd_llc_size); ++DECLARE_PER_CPU(int, sd_llc_id); ++DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); ++DECLARE_PER_CPU(struct sched_domain *, sd_numa); ++DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing); ++DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity); ++ ++struct sched_group_capacity { ++ atomic_t ref; ++ /* ++ * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity ++ * for a single CPU. ++ */ ++ unsigned long capacity; ++ unsigned long min_capacity; /* Min per-CPU capacity in group */ ++ unsigned long max_capacity; /* Max per-CPU capacity in group */ ++ unsigned long next_update; ++ int imbalance; /* XXX unrelated to capacity but shared group state */ ++ ++#ifdef CONFIG_SCHED_DEBUG ++ int id; ++#endif ++ ++ unsigned long cpumask[0]; /* balance mask */ ++}; ++ ++struct sched_group { ++ struct sched_group *next; /* Must be a circular list */ ++ atomic_t ref; ++ ++ unsigned int group_weight; ++ struct sched_group_capacity *sgc; ++ int asym_prefer_cpu; /* cpu of highest priority in group */ ++ ++ /* ++ * The CPUs this group covers. ++ * ++ * NOTE: this field is variable length. (Allocated dynamically ++ * by attaching extra space to the end of the structure, ++ * depending on how many CPUs the kernel has booted up with) ++ */ ++ unsigned long cpumask[0]; ++}; ++ ++static inline struct cpumask *sched_group_span(struct sched_group *sg) ++{ ++ return to_cpumask(sg->cpumask); ++} ++ ++/* ++ * See build_balance_mask(). ++ */ ++static inline struct cpumask *group_balance_mask(struct sched_group *sg) ++{ ++ return to_cpumask(sg->sgc->cpumask); ++} ++ ++/** ++ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. ++ * @group: The group whose first cpu is to be returned. ++ */ ++static inline unsigned int group_first_cpu(struct sched_group *group) ++{ ++ return cpumask_first(sched_group_span(group)); ++} ++ ++ ++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) ++void register_sched_domain_sysctl(void); ++void dirty_sched_domain_sysctl(int cpu); ++void unregister_sched_domain_sysctl(void); ++#else ++static inline void register_sched_domain_sysctl(void) ++{ ++} ++static inline void dirty_sched_domain_sysctl(int cpu) ++{ ++} ++static inline void unregister_sched_domain_sysctl(void) ++{ ++} ++#endif ++ ++extern void sched_ttwu_pending(void); ++extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); ++extern void set_rq_online (struct rq *rq); ++extern void set_rq_offline(struct rq *rq); ++extern bool sched_smp_initialized; ++ ++static inline void update_group_capacity(struct sched_domain *sd, int cpu) ++{ ++} ++ ++static inline void trigger_load_balance(struct rq *rq) ++{ ++} ++ ++#define sched_feat(x) 0 ++ ++#else /* CONFIG_SMP */ ++ ++static inline void sched_ttwu_pending(void) { } ++ ++#endif /* CONFIG_SMP */ ++ ++#ifdef CONFIG_CPU_IDLE ++static inline void idle_set_state(struct rq *rq, ++ struct cpuidle_state *idle_state) ++{ ++ rq->idle_state = idle_state; ++} ++ ++static inline struct cpuidle_state *idle_get_state(struct rq *rq) ++{ ++ SCHED_WARN_ON(!rcu_read_lock_held()); ++ return rq->idle_state; ++} ++#else ++static inline void idle_set_state(struct rq *rq, ++ struct cpuidle_state *idle_state) ++{ ++} ++ ++static inline struct cpuidle_state *idle_get_state(struct rq *rq) ++{ ++ return NULL; ++} ++#endif ++ ++#ifdef CONFIG_SCHED_DEBUG ++extern bool sched_debug_enabled; ++#endif ++ ++extern void schedule_idle(void); ++ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++struct irqtime { ++ u64 total; ++ u64 tick_delta; ++ u64 irq_start_time; ++ struct u64_stats_sync sync; ++}; ++ ++DECLARE_PER_CPU(struct irqtime, cpu_irqtime); ++ ++/* ++ * Returns the irqtime minus the softirq time computed by ksoftirqd. ++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime ++ * and never move forward. ++ */ ++static inline u64 irq_time_read(int cpu) ++{ ++ struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); ++ unsigned int seq; ++ u64 total; ++ ++ do { ++ seq = __u64_stats_fetch_begin(&irqtime->sync); ++ total = irqtime->total; ++ } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); ++ ++ return total; ++} ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++ ++#ifdef CONFIG_SMP ++static inline int cpu_of(struct rq *rq) ++{ ++ return rq->cpu; ++} ++#else /* CONFIG_SMP */ ++static inline int cpu_of(struct rq *rq) ++{ ++ return 0; ++} ++#endif ++ ++#ifdef CONFIG_CPU_FREQ ++DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); ++ ++static inline void cpufreq_trigger(struct rq *rq, unsigned int flags) ++{ ++ struct update_util_data *data; ++ ++ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, ++ cpu_of(rq))); ++ ++ if (data) ++ data->func(data, rq->niffies, flags); ++} ++#else ++static inline void cpufreq_trigger(struct rq *rq, unsigned int flag) ++{ ++} ++#endif /* CONFIG_CPU_FREQ */ ++ ++#ifdef arch_scale_freq_capacity ++#ifndef arch_scale_freq_invariant ++#define arch_scale_freq_invariant() (true) ++#endif ++#else /* arch_scale_freq_capacity */ ++#define arch_scale_freq_invariant() (false) ++#endif ++ ++/* ++ * This should only be called when current == rq->idle. Dodgy workaround for ++ * when softirqs are pending and we are in the idle loop. Setting current to ++ * resched will kick us out of the idle loop and the softirqs will be serviced ++ * on our next pass through schedule(). ++ */ ++static inline bool softirq_pending(int cpu) ++{ ++ if (likely(!local_softirq_pending())) ++ return false; ++ set_tsk_need_resched(current); ++ return true; ++} ++ ++#ifdef CONFIG_64BIT ++static inline u64 read_sum_exec_runtime(struct task_struct *t) ++{ ++ return tsk_seruntime(t); ++} ++#else ++static inline u64 read_sum_exec_runtime(struct task_struct *t) ++{ ++ struct rq_flags rf; ++ u64 ns; ++ struct rq *rq; ++ ++ rq = task_rq_lock(t, &rf); ++ ns = tsk_seruntime(t); ++ task_rq_unlock(rq, t, &rf); ++ ++ return ns; ++} ++#endif ++ ++#ifndef arch_scale_freq_capacity ++static __always_inline ++unsigned long arch_scale_freq_capacity(int cpu) ++{ ++ return SCHED_CAPACITY_SCALE; ++} ++#endif ++ ++#ifdef CONFIG_NO_HZ_FULL ++extern bool sched_can_stop_tick(struct rq *rq); ++extern int __init sched_tick_offload_init(void); ++ ++/* ++ * Tick may be needed by tasks in the runqueue depending on their policy and ++ * requirements. If tick is needed, lets send the target an IPI to kick it out of ++ * nohz mode if necessary. ++ */ ++static inline void sched_update_tick_dependency(struct rq *rq) ++{ ++ int cpu; ++ ++ if (!tick_nohz_full_enabled()) ++ return; ++ ++ cpu = cpu_of(rq); ++ ++ if (!tick_nohz_full_cpu(cpu)) ++ return; ++ ++ if (sched_can_stop_tick(rq)) ++ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); ++ else ++ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); ++} ++#else ++static inline int sched_tick_offload_init(void) { return 0; } ++static inline void sched_update_tick_dependency(struct rq *rq) { } ++#endif ++ ++#define SCHED_FLAG_SUGOV 0x10000000 ++ ++static inline bool rt_rq_is_runnable(struct rq *rt_rq) ++{ ++ return rt_rq->rt_nr_running; ++} ++ ++#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL ++/** ++ * enum schedutil_type - CPU utilization type ++ * @FREQUENCY_UTIL: Utilization used to select frequency ++ * @ENERGY_UTIL: Utilization used during energy calculation ++ * ++ * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time ++ * need to be aggregated differently depending on the usage made of them. This ++ * enum is used within schedutil_freq_util() to differentiate the types of ++ * utilization expected by the callers, and adjust the aggregation accordingly. ++ */ ++enum schedutil_type { ++ FREQUENCY_UTIL, ++ ENERGY_UTIL, ++}; ++ ++unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs, ++ unsigned long max, enum schedutil_type type); ++ ++static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs) ++{ ++ unsigned long max = arch_scale_cpu_capacity(NULL, cpu); ++ ++ return schedutil_freq_util(cpu, cfs, max, ENERGY_UTIL); ++} ++ ++static inline unsigned long cpu_bw_dl(struct rq *rq) ++{ ++ return 0; ++} ++ ++static inline unsigned long cpu_util_dl(struct rq *rq) ++{ ++ return 0; ++} ++ ++static inline unsigned long cpu_util_cfs(struct rq *rq) ++{ ++ unsigned long ret = READ_ONCE(rq->load_avg); ++ ++ if (ret > SCHED_CAPACITY_SCALE) ++ ret = SCHED_CAPACITY_SCALE; ++ return ret; ++} ++ ++static inline unsigned long cpu_util_rt(struct rq *rq) ++{ ++ unsigned long ret = READ_ONCE(rq->rt_nr_running); ++ ++ if (ret > SCHED_CAPACITY_SCALE) ++ ret = SCHED_CAPACITY_SCALE; ++ return ret; ++} ++ ++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ ++static inline unsigned long cpu_util_irq(struct rq *rq) ++{ ++ unsigned long ret = READ_ONCE(rq->irq_load_avg); ++ ++ if (ret > SCHED_CAPACITY_SCALE) ++ ret = SCHED_CAPACITY_SCALE; ++ return ret; ++} ++ ++static inline ++unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) ++{ ++ util *= (max - irq); ++ util /= max; ++ ++ return util; ++ ++} ++#else ++static inline unsigned long cpu_util_irq(struct rq *rq) ++{ ++ return 0; ++} ++ ++static inline ++unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) ++{ ++ return util; ++} ++#endif ++#endif ++ ++#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) ++#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) ++ ++DECLARE_STATIC_KEY_FALSE(sched_energy_present); ++ ++static inline bool sched_energy_enabled(void) ++{ ++ return static_branch_unlikely(&sched_energy_present); ++} ++ ++#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ ++ ++#define perf_domain_span(pd) NULL ++static inline bool sched_energy_enabled(void) { return false; } ++ ++#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ ++#endif /* MUQSS_SCHED_H */ +diff -Nur a/kernel/sched/sched.h b/kernel/sched/sched.h +--- a/kernel/sched/sched.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/sched.h 2019-07-07 09:17:41.261241813 +0100 +@@ -2,6 +2,19 @@ + /* + * Scheduler internal types and methods: + */ ++#ifdef CONFIG_SCHED_MUQSS ++#include "MuQSS.h" ++ ++/* Begin compatibility wrappers for MuQSS/CFS differences */ ++#define rq_rt_nr_running(rq) ((rq)->rt_nr_running) ++#define rq_h_nr_running(rq) ((rq)->nr_running) ++ ++#else /* CONFIG_SCHED_MUQSS */ ++ ++#define rq_rt_nr_running(rq) ((rq)->rt.rt_nr_running) ++#define rq_h_nr_running(rq) ((rq)->cfs.h_nr_running) ++ ++ + #include <linux/sched.h> + + #include <linux/sched/autogroup.h> +@@ -2341,3 +2354,30 @@ + static inline bool sched_energy_enabled(void) { return false; } + + #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ ++ ++/* MuQSS compatibility functions */ ++static inline bool softirq_pending(int cpu) ++{ ++ return false; ++} ++ ++#ifdef CONFIG_64BIT ++static inline u64 read_sum_exec_runtime(struct task_struct *t) ++{ ++ return t->se.sum_exec_runtime; ++} ++#else ++static inline u64 read_sum_exec_runtime(struct task_struct *t) ++{ ++ u64 ns; ++ struct rq_flags rf; ++ struct rq *rq; ++ ++ rq = task_rq_lock(t, &rf); ++ ns = t->se.sum_exec_runtime; ++ task_rq_unlock(rq, t, &rf); ++ ++ return ns; ++} ++#endif ++#endif /* CONFIG_SCHED_MUQSS */ +diff -Nur a/kernel/sched/topology.c b/kernel/sched/topology.c +--- a/kernel/sched/topology.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/topology.c 2019-07-07 09:17:41.261241813 +0100 +@@ -442,7 +442,11 @@ + struct root_domain *old_rd = NULL; + unsigned long flags; + ++#ifdef CONFIG_SCHED_MUQSS ++ raw_spin_lock_irqsave(rq->lock, flags); ++#else + raw_spin_lock_irqsave(&rq->lock, flags); ++#endif + + if (rq->rd) { + old_rd = rq->rd; +@@ -468,7 +472,11 @@ + if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) + set_rq_online(rq); + ++#ifdef CONFIG_SCHED_MUQSS ++ raw_spin_unlock_irqrestore(rq->lock, flags); ++#else + raw_spin_unlock_irqrestore(&rq->lock, flags); ++#endif + + if (old_rd) + call_rcu(&old_rd->rcu, free_rootdomain); +diff -Nur a/kernel/skip_list.c b/kernel/skip_list.c +--- a/kernel/skip_list.c 1970-01-01 01:00:00.000000000 +0100 ++++ b/kernel/skip_list.c 2019-07-07 09:17:41.261241813 +0100 +@@ -0,0 +1,148 @@ ++/* ++ Copyright (C) 2011,2016 Con Kolivas. ++ ++ Code based on example originally by William Pugh. ++ ++Skip Lists are a probabilistic alternative to balanced trees, as ++described in the June 1990 issue of CACM and were invented by ++William Pugh in 1987. ++ ++A couple of comments about this implementation: ++The routine randomLevel has been hard-coded to generate random ++levels using p=0.25. It can be easily changed. ++ ++The insertion routine has been implemented so as to use the ++dirty hack described in the CACM paper: if a random level is ++generated that is more than the current maximum level, the ++current maximum level plus one is used instead. ++ ++Levels start at zero and go up to MaxLevel (which is equal to ++MaxNumberOfLevels-1). ++ ++The routines defined in this file are: ++ ++init: defines slnode ++ ++new_skiplist: returns a new, empty list ++ ++randomLevel: Returns a random level based on a u64 random seed passed to it. ++In MuQSS, the "niffy" time is used for this purpose. ++ ++insert(l,key, value): inserts the binding (key, value) into l. This operation ++occurs in O(log n) time. ++ ++delnode(slnode, l, node): deletes any binding of key from the l based on the ++actual node value. This operation occurs in O(k) time where k is the ++number of levels of the node in question (max 8). The original delete ++function occurred in O(log n) time and involved a search. ++ ++MuQSS Notes: In this implementation of skiplists, there are bidirectional ++next/prev pointers and the insert function returns a pointer to the actual ++node the value is stored. The key here is chosen by the scheduler so as to ++sort tasks according to the priority list requirements and is no longer used ++by the scheduler after insertion. The scheduler lookup, however, occurs in ++O(1) time because it is always the first item in the level 0 linked list. ++Since the task struct stores a copy of the node pointer upon skiplist_insert, ++it can also remove it much faster than the original implementation with the ++aid of prev<->next pointer manipulation and no searching. ++ ++*/ ++ ++#include <linux/slab.h> ++#include <linux/skip_list.h> ++ ++#define MaxNumberOfLevels 8 ++#define MaxLevel (MaxNumberOfLevels - 1) ++ ++void skiplist_init(skiplist_node *slnode) ++{ ++ int i; ++ ++ slnode->key = 0xFFFFFFFFFFFFFFFF; ++ slnode->level = 0; ++ slnode->value = NULL; ++ for (i = 0; i < MaxNumberOfLevels; i++) ++ slnode->next[i] = slnode->prev[i] = slnode; ++} ++ ++skiplist *new_skiplist(skiplist_node *slnode) ++{ ++ skiplist *l = kzalloc(sizeof(skiplist), GFP_ATOMIC); ++ ++ BUG_ON(!l); ++ l->header = slnode; ++ return l; ++} ++ ++void free_skiplist(skiplist *l) ++{ ++ skiplist_node *p, *q; ++ ++ p = l->header; ++ do { ++ q = p->next[0]; ++ p->next[0]->prev[0] = q->prev[0]; ++ skiplist_node_init(p); ++ p = q; ++ } while (p != l->header); ++ kfree(l); ++} ++ ++void skiplist_node_init(skiplist_node *node) ++{ ++ memset(node, 0, sizeof(skiplist_node)); ++} ++ ++static inline unsigned int randomLevel(const long unsigned int randseed) ++{ ++ return find_first_bit(&randseed, MaxLevel) / 2; ++} ++ ++void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed) ++{ ++ skiplist_node *update[MaxNumberOfLevels]; ++ skiplist_node *p, *q; ++ int k = l->level; ++ ++ p = l->header; ++ do { ++ while (q = p->next[k], q->key <= key) ++ p = q; ++ update[k] = p; ++ } while (--k >= 0); ++ ++ ++l->entries; ++ k = randomLevel(randseed); ++ if (k > l->level) { ++ k = ++l->level; ++ update[k] = l->header; ++ } ++ ++ node->level = k; ++ node->key = key; ++ node->value = value; ++ do { ++ p = update[k]; ++ node->next[k] = p->next[k]; ++ p->next[k] = node; ++ node->prev[k] = p; ++ node->next[k]->prev[k] = node; ++ } while (--k >= 0); ++} ++ ++void skiplist_delete(skiplist *l, skiplist_node *node) ++{ ++ int k, m = node->level; ++ ++ for (k = 0; k <= m; k++) { ++ node->prev[k]->next[k] = node->next[k]; ++ node->next[k]->prev[k] = node->prev[k]; ++ } ++ skiplist_node_init(node); ++ if (m == l->level) { ++ while (l->header->next[m] == l->header && l->header->prev[m] == l->header && m > 0) ++ m--; ++ l->level = m; ++ } ++ l->entries--; ++} +diff -Nur a/kernel/sysctl.c b/kernel/sysctl.c +--- a/kernel/sysctl.c 2019-07-07 09:08:19.152348621 +0100 ++++ b/kernel/sysctl.c 2019-07-07 09:23:47.863548280 +0100 +@@ -141,6 +141,12 @@ + static unsigned long long_max __read_only = LONG_MAX; + static int one_hundred __read_only = 100; + static int one_thousand __read_only = 1000; ++#ifdef CONFIG_SCHED_MUQSS ++extern int rr_interval; ++extern int sched_interactive; ++extern int sched_iso_cpu; ++extern int sched_yield_type; ++#endif + #ifdef CONFIG_PRINTK + static int ten_thousand __read_only = 10000; + #endif +@@ -316,7 +322,7 @@ + { } + }; + +-#ifdef CONFIG_SCHED_DEBUG ++#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_MUQSS) + static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */ + static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */ + static int min_wakeup_granularity_ns __read_only; /* 0 usecs */ +@@ -333,6 +339,7 @@ + #endif + + static struct ctl_table kern_table[] = { ++#ifndef CONFIG_SCHED_MUQSS + { + .procname = "sched_child_runs_first", + .data = &sysctl_sched_child_runs_first, +@@ -498,6 +505,7 @@ + .extra2 = &one, + }, + #endif ++#endif /* !CONFIG_SCHED_MUQSS */ + #ifdef CONFIG_PROVE_LOCKING + { + .procname = "prove_locking", +@@ -1110,6 +1118,44 @@ + .proc_handler = proc_dointvec, + }, + #endif ++#ifdef CONFIG_SCHED_MUQSS ++ { ++ .procname = "rr_interval", ++ .data = &rr_interval, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &one, ++ .extra2 = &one_thousand, ++ }, ++ { ++ .procname = "interactive", ++ .data = &sched_interactive, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &zero, ++ .extra2 = &one, ++ }, ++ { ++ .procname = "iso_cpu", ++ .data = &sched_iso_cpu, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &zero, ++ .extra2 = &one_hundred, ++ }, ++ { ++ .procname = "yield_type", ++ .data = &sched_yield_type, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &zero, ++ .extra2 = &two, ++ }, ++#endif + #if defined(CONFIG_S390) && defined(CONFIG_SMP) + { + .procname = "spin_retry", +diff -Nur a/kernel/time/clockevents.c b/kernel/time/clockevents.c +--- a/kernel/time/clockevents.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/time/clockevents.c 2019-07-07 09:17:41.261241813 +0100 +@@ -190,8 +190,13 @@ + + #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST + ++#ifdef CONFIG_SCHED_MUQSS ++/* Limit min_delta to 100us */ ++#define MIN_DELTA_LIMIT (NSEC_PER_SEC / 10000) ++#else + /* Limit min_delta to a jiffie */ + #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) ++#endif + + /** + * clockevents_increase_min_delta - raise minimum delta of a clock event device +diff -Nur a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c +--- a/kernel/time/posix-cpu-timers.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/time/posix-cpu-timers.c 2019-07-07 09:17:41.261241813 +0100 +@@ -829,7 +829,7 @@ + tsk_expires->virt_exp = expires; + + tsk_expires->sched_exp = check_timers_list(++timers, firing, +- tsk->se.sum_exec_runtime); ++ tsk_seruntime(tsk)); + + /* + * Check for the special case thread timers. +@@ -839,7 +839,7 @@ + unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); + + if (hard != RLIM_INFINITY && +- tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { ++ tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { + /* + * At the hard limit, we just die. + * No need to calculate anything else now. +@@ -851,7 +851,7 @@ + __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); + return; + } +- if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { ++ if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { + /* + * At the soft limit, send a SIGXCPU every second. + */ +@@ -1091,7 +1091,7 @@ + struct task_cputime task_sample; + + task_cputime(tsk, &task_sample.utime, &task_sample.stime); +- task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; ++ task_sample.sum_exec_runtime = tsk_seruntime(tsk); + if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) + return 1; + } +diff -Nur a/kernel/time/timer.c b/kernel/time/timer.c +--- a/kernel/time/timer.c 2019-07-07 09:08:19.152348621 +0100 ++++ b/kernel/time/timer.c 2019-07-07 09:17:41.271242152 +0100 +@@ -1478,7 +1478,7 @@ + * Check, if the next hrtimer event is before the next timer wheel + * event: + */ +-static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) ++static u64 cmp_next_hrtimer_event(struct timer_base *base, u64 basem, u64 expires) + { + u64 nextevt = hrtimer_get_next_event(); + +@@ -1496,6 +1496,9 @@ + if (nextevt <= basem) + return basem; + ++ if (nextevt < expires && nextevt - basem <= TICK_NSEC) ++ base->is_idle = false; ++ + /* + * Round up to the next jiffie. High resolution timers are + * off, so the hrtimers are expired in the tick and we need to +@@ -1565,7 +1568,7 @@ + } + raw_spin_unlock(&base->lock); + +- return cmp_next_hrtimer_event(basem, expires); ++ return cmp_next_hrtimer_event(base, basem, expires); + } + + /** +diff -Nur a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c +--- a/kernel/trace/trace_selftest.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/trace/trace_selftest.c 2019-07-07 09:17:41.271242152 +0100 +@@ -1045,10 +1045,15 @@ + { + /* Make this a -deadline thread */ + static const struct sched_attr attr = { ++#ifdef CONFIG_SCHED_MUQSS ++ /* No deadline on MuQSS, use RR */ ++ .sched_policy = SCHED_RR, ++#else + .sched_policy = SCHED_DEADLINE, + .sched_runtime = 100000ULL, + .sched_deadline = 10000000ULL, + .sched_period = 10000000ULL ++#endif + }; + struct wakeup_test_data *x = data; + diff --git a/sys-kernel/linux-image-redcore/files/5.1-0002-Fix-Werror-build-failure-in-tools.patch b/sys-kernel/linux-image-redcore/files/5.1-0002-Fix-Werror-build-failure-in-tools.patch new file mode 100644 index 00000000..c8c39888 --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0002-Fix-Werror-build-failure-in-tools.patch @@ -0,0 +1,25 @@ +From 89b8d55e743d382f463526832cf8b8a4f8cf32ff Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Sun, 18 Feb 2018 12:36:22 +1100 +Subject: [PATCH 02/16] Fix Werror build failure in tools. + +--- + tools/objtool/Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile +index 53f8be0f4a1f..ad2c11a881db 100644 +--- a/tools/objtool/Makefile ++++ b/tools/objtool/Makefile +@@ -34,7 +34,7 @@ INCLUDES := -I$(srctree)/tools/include \ + -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ + -I$(srctree)/tools/objtool/arch/$(ARCH)/include + WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed +-CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS) ++CFLAGS += $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS) + LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS) + + # Allow old libelf to be used: +-- +2.17.1 + diff --git a/sys-kernel/linux-image-redcore/files/5.1-0003-Make-preemptible-kernel-default.patch b/sys-kernel/linux-image-redcore/files/5.1-0003-Make-preemptible-kernel-default.patch new file mode 100644 index 00000000..ec621e09 --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0003-Make-preemptible-kernel-default.patch @@ -0,0 +1,4653 @@ +From 4caf76327e0d7e1c25b40dbbf7294cc80a2d167c Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Sat, 29 Oct 2016 11:20:37 +1100 +Subject: [PATCH 03/16] Make preemptible kernel default. + +Make full preempt default on all arches. +--- + arch/arc/configs/tb10x_defconfig | 2 +- + arch/arm/configs/bcm2835_defconfig | 2 +- + arch/arm/configs/imx_v6_v7_defconfig | 1 + + arch/arm/configs/mps2_defconfig | 2 +- + arch/arm/configs/mxs_defconfig | 7 +- + arch/blackfin/configs/BF518F-EZBRD_defconfig | 121 ++++ + arch/blackfin/configs/BF526-EZBRD_defconfig | 158 ++++++ + .../blackfin/configs/BF527-EZKIT-V2_defconfig | 188 +++++++ + arch/blackfin/configs/BF527-EZKIT_defconfig | 181 ++++++ + .../blackfin/configs/BF527-TLL6527M_defconfig | 178 ++++++ + arch/blackfin/configs/BF533-EZKIT_defconfig | 114 ++++ + arch/blackfin/configs/BF533-STAMP_defconfig | 124 +++++ + arch/blackfin/configs/BF537-STAMP_defconfig | 136 +++++ + arch/blackfin/configs/BF538-EZKIT_defconfig | 133 +++++ + arch/blackfin/configs/BF548-EZKIT_defconfig | 207 +++++++ + arch/blackfin/configs/BF561-ACVILON_defconfig | 149 +++++ + .../configs/BF561-EZKIT-SMP_defconfig | 112 ++++ + arch/blackfin/configs/BF561-EZKIT_defconfig | 114 ++++ + arch/blackfin/configs/BF609-EZKIT_defconfig | 154 +++++ + arch/blackfin/configs/BlackStamp_defconfig | 108 ++++ + arch/blackfin/configs/CM-BF527_defconfig | 129 +++++ + arch/blackfin/configs/PNAV-10_defconfig | 111 ++++ + arch/blackfin/configs/SRV1_defconfig | 88 +++ + arch/blackfin/configs/TCM-BF518_defconfig | 131 +++++ + arch/mips/configs/fuloong2e_defconfig | 2 +- + arch/mips/configs/gpr_defconfig | 2 +- + arch/mips/configs/ip22_defconfig | 2 +- + arch/mips/configs/ip28_defconfig | 2 +- + arch/mips/configs/jazz_defconfig | 2 +- + arch/mips/configs/mtx1_defconfig | 2 +- + arch/mips/configs/nlm_xlr_defconfig | 2 +- + arch/mips/configs/pic32mzda_defconfig | 2 +- + arch/mips/configs/pistachio_defconfig | 2 +- + arch/mips/configs/pnx8335_stb225_defconfig | 2 +- + arch/mips/configs/rm200_defconfig | 2 +- + arch/parisc/configs/712_defconfig | 2 +- + arch/parisc/configs/c3000_defconfig | 2 +- + arch/parisc/configs/default_defconfig | 2 +- + arch/powerpc/configs/c2k_defconfig | 389 +++++++++++++ + arch/powerpc/configs/ppc6xx_defconfig | 2 +- + arch/score/configs/spct6600_defconfig | 84 +++ + arch/sh/configs/se7712_defconfig | 2 +- + arch/sh/configs/se7721_defconfig | 2 +- + arch/sh/configs/titan_defconfig | 2 +- + arch/sparc/configs/sparc64_defconfig | 2 +- + arch/tile/configs/tilegx_defconfig | 411 ++++++++++++++ + arch/tile/configs/tilepro_defconfig | 524 ++++++++++++++++++ + arch/x86/configs/i386_defconfig | 2 +- + arch/x86/configs/x86_64_defconfig | 2 +- + kernel/Kconfig.preempt | 7 +- + 50 files changed, 4079 insertions(+), 28 deletions(-) + create mode 100644 arch/blackfin/configs/BF518F-EZBRD_defconfig + create mode 100644 arch/blackfin/configs/BF526-EZBRD_defconfig + create mode 100644 arch/blackfin/configs/BF527-EZKIT-V2_defconfig + create mode 100644 arch/blackfin/configs/BF527-EZKIT_defconfig + create mode 100644 arch/blackfin/configs/BF527-TLL6527M_defconfig + create mode 100644 arch/blackfin/configs/BF533-EZKIT_defconfig + create mode 100644 arch/blackfin/configs/BF533-STAMP_defconfig + create mode 100644 arch/blackfin/configs/BF537-STAMP_defconfig + create mode 100644 arch/blackfin/configs/BF538-EZKIT_defconfig + create mode 100644 arch/blackfin/configs/BF548-EZKIT_defconfig + create mode 100644 arch/blackfin/configs/BF561-ACVILON_defconfig + create mode 100644 arch/blackfin/configs/BF561-EZKIT-SMP_defconfig + create mode 100644 arch/blackfin/configs/BF561-EZKIT_defconfig + create mode 100644 arch/blackfin/configs/BF609-EZKIT_defconfig + create mode 100644 arch/blackfin/configs/BlackStamp_defconfig + create mode 100644 arch/blackfin/configs/CM-BF527_defconfig + create mode 100644 arch/blackfin/configs/PNAV-10_defconfig + create mode 100644 arch/blackfin/configs/SRV1_defconfig + create mode 100644 arch/blackfin/configs/TCM-BF518_defconfig + create mode 100644 arch/powerpc/configs/c2k_defconfig + create mode 100644 arch/score/configs/spct6600_defconfig + create mode 100644 arch/tile/configs/tilegx_defconfig + create mode 100644 arch/tile/configs/tilepro_defconfig + +diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig +index 5b5119d2b5d5..7425d2ec3a82 100644 +--- a/arch/arc/configs/tb10x_defconfig ++++ b/arch/arc/configs/tb10x_defconfig +@@ -29,7 +29,7 @@ CONFIG_ARC_PLAT_TB10X=y + CONFIG_ARC_CACHE_LINE_SHIFT=5 + CONFIG_HZ=250 + CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk" +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + # CONFIG_COMPACTION is not set + CONFIG_NET=y + CONFIG_PACKET=y +diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig +index dcf7610cfe55..d15cd13aa944 100644 +--- a/arch/arm/configs/bcm2835_defconfig ++++ b/arch/arm/configs/bcm2835_defconfig +@@ -29,7 +29,7 @@ CONFIG_MODULE_UNLOAD=y + CONFIG_ARCH_MULTI_V6=y + CONFIG_ARCH_BCM=y + CONFIG_ARCH_BCM2835=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_AEABI=y + CONFIG_KSM=y + CONFIG_CLEANCACHE=y +diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig +index 50fb01d70b10..5f2960f1853f 100644 +--- a/arch/arm/configs/imx_v6_v7_defconfig ++++ b/arch/arm/configs/imx_v6_v7_defconfig +@@ -45,6 +45,7 @@ CONFIG_PCI_MSI=y + CONFIG_PCI_IMX6=y + CONFIG_SMP=y + CONFIG_ARM_PSCI=y ++CONFIG_PREEMPT=y + CONFIG_HIGHMEM=y + CONFIG_FORCE_MAX_ZONEORDER=14 + CONFIG_CMDLINE="noinitrd console=ttymxc0,115200" +diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig +index 1d923dbb9928..9c1931f1fafd 100644 +--- a/arch/arm/configs/mps2_defconfig ++++ b/arch/arm/configs/mps2_defconfig +@@ -18,7 +18,7 @@ CONFIG_ARCH_MPS2=y + CONFIG_SET_MEM_PARAM=y + CONFIG_DRAM_BASE=0x21000000 + CONFIG_DRAM_SIZE=0x1000000 +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + # CONFIG_ATAGS is not set + CONFIG_ZBOOT_ROM_TEXT=0x0 + CONFIG_ZBOOT_ROM_BSS=0x0 +diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig +index 38480596c449..d509ff66f73a 100644 +--- a/arch/arm/configs/mxs_defconfig ++++ b/arch/arm/configs/mxs_defconfig +@@ -1,7 +1,7 @@ + CONFIG_SYSVIPC=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT_VOLUNTARY=n + CONFIG_TASKSTATS=y + CONFIG_TASK_DELAY_ACCT=y + CONFIG_TASK_XACCT=y +@@ -27,6 +27,11 @@ CONFIG_MODVERSIONS=y + CONFIG_BLK_DEV_INTEGRITY=y + # CONFIG_IOSCHED_DEADLINE is not set + # CONFIG_IOSCHED_CFQ is not set ++# CONFIG_ARCH_MULTI_V7 is not set ++CONFIG_ARCH_MXS=y ++# CONFIG_ARM_THUMB is not set ++CONFIG_PREEMPT=y ++CONFIG_AEABI=y + CONFIG_NET=y + CONFIG_PACKET=y + CONFIG_UNIX=y +diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig +new file mode 100644 +index 000000000000..39b91dfa55b5 +--- /dev/null ++++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig +@@ -0,0 +1,121 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF518=y ++CONFIG_IRQ_TIMER0=12 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_JEDECPROBE=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++CONFIG_NET_BFIN=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_USB_SUPPORT is not set ++CONFIG_MMC=y ++CONFIG_SDH_BFIN=y ++CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=m ++# CONFIG_DNOTIFY is not set ++CONFIG_VFAT_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRC_CCITT=m +diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig +new file mode 100644 +index 000000000000..675cadb3a0c4 +--- /dev/null ++++ b/arch/blackfin/configs/BF526-EZBRD_defconfig +@@ -0,0 +1,158 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF526=y ++CONFIG_IRQ_TIMER0=12 ++CONFIG_BFIN526_EZBRD=y ++CONFIG_IRQ_USB_INT0=11 ++CONFIG_IRQ_USB_INT1=11 ++CONFIG_IRQ_USB_INT2=11 ++CONFIG_IRQ_USB_DMA=11 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_PHYSMAP=y ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_NAND=m ++CONFIG_MTD_SPI_NOR=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_SCSI=y ++# CONFIG_SCSI_PROC_FS is not set ++CONFIG_BLK_DEV_SD=y ++CONFIG_BLK_DEV_SR=m ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_BFIN=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT_FF_MEMLESS=m ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_HID_A4TECH=y ++CONFIG_HID_APPLE=y ++CONFIG_HID_BELKIN=y ++CONFIG_HID_CHERRY=y ++CONFIG_HID_CHICONY=y ++CONFIG_HID_CYPRESS=y ++CONFIG_HID_EZKEY=y ++CONFIG_HID_GYRATION=y ++CONFIG_HID_LOGITECH=y ++CONFIG_HID_MICROSOFT=y ++CONFIG_HID_MONTEREY=y ++CONFIG_HID_PANTHERLORD=y ++CONFIG_HID_PETALYNX=y ++CONFIG_HID_SAMSUNG=y ++CONFIG_HID_SONY=y ++CONFIG_HID_SUNPLUS=y ++CONFIG_USB=y ++# CONFIG_USB_DEVICE_CLASS is not set ++CONFIG_USB_OTG_BLACKLIST_HUB=y ++CONFIG_USB_MON=y ++CONFIG_USB_STORAGE=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=m ++# CONFIG_DNOTIFY is not set ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_VFAT_FS=m ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRC_CCITT=m +diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig +new file mode 100644 +index 000000000000..4c517c443af5 +--- /dev/null ++++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig +@@ -0,0 +1,188 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF527=y ++CONFIG_BF_REV_0_2=y ++CONFIG_BFIN527_EZKIT_V2=y ++CONFIG_IRQ_USB_INT0=11 ++CONFIG_IRQ_USB_INT1=11 ++CONFIG_IRQ_USB_INT2=11 ++CONFIG_IRQ_USB_DMA=11 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++CONFIG_BFIN_SIR0=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_JEDECPROBE=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_NAND=m ++CONFIG_MTD_SPI_NOR=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_SCSI=y ++# CONFIG_SCSI_PROC_FS is not set ++CONFIG_BLK_DEV_SD=y ++CONFIG_BLK_DEV_SR=m ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_BFIN=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT_FF_MEMLESS=m ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=y ++CONFIG_KEYBOARD_ADP5520=y ++# CONFIG_KEYBOARD_ATKBD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_AD7879=y ++CONFIG_TOUCHSCREEN_AD7879_I2C=y ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_PMIC_ADP5520=y ++CONFIG_FB=y ++CONFIG_FB_BFIN_LQ035Q1=y ++CONFIG_BACKLIGHT_LCD_SUPPORT=y ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_LOGO=y ++# CONFIG_LOGO_LINUX_MONO is not set ++# CONFIG_LOGO_LINUX_VGA16 is not set ++# CONFIG_LOGO_LINUX_CLUT224 is not set ++# CONFIG_LOGO_BLACKFIN_VGA16 is not set ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_SOC=y ++CONFIG_SND_BF5XX_I2S=y ++CONFIG_SND_BF5XX_SOC_SSM2602=y ++CONFIG_HID_A4TECH=y ++CONFIG_HID_APPLE=y ++CONFIG_HID_BELKIN=y ++CONFIG_HID_CHERRY=y ++CONFIG_HID_CHICONY=y ++CONFIG_HID_CYPRESS=y ++CONFIG_HID_EZKEY=y ++CONFIG_HID_GYRATION=y ++CONFIG_HID_LOGITECH=y ++CONFIG_HID_MICROSOFT=y ++CONFIG_HID_MONTEREY=y ++CONFIG_HID_PANTHERLORD=y ++CONFIG_HID_PETALYNX=y ++CONFIG_HID_SAMSUNG=y ++CONFIG_HID_SONY=y ++CONFIG_HID_SUNPLUS=y ++CONFIG_USB=y ++# CONFIG_USB_DEVICE_CLASS is not set ++CONFIG_USB_OTG_BLACKLIST_HUB=y ++CONFIG_USB_MON=y ++CONFIG_USB_MUSB_HDRC=y ++CONFIG_USB_MUSB_BLACKFIN=y ++CONFIG_USB_STORAGE=y ++CONFIG_USB_GADGET=y ++CONFIG_NEW_LEDS=y ++CONFIG_LEDS_CLASS=y ++CONFIG_LEDS_ADP5520=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=m ++# CONFIG_DNOTIFY is not set ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_UDF_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig +new file mode 100644 +index 000000000000..bf8df3e6cf02 +--- /dev/null ++++ b/arch/blackfin/configs/BF527-EZKIT_defconfig +@@ -0,0 +1,181 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF527=y ++CONFIG_BF_REV_0_1=y ++CONFIG_IRQ_USB_INT0=11 ++CONFIG_IRQ_USB_INT1=11 ++CONFIG_IRQ_USB_INT2=11 ++CONFIG_IRQ_USB_DMA=11 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++CONFIG_BFIN_SIR0=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_JEDECPROBE=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_NAND=m ++CONFIG_MTD_SPI_NOR=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_SCSI=y ++# CONFIG_SCSI_PROC_FS is not set ++CONFIG_BLK_DEV_SD=y ++CONFIG_BLK_DEV_SR=m ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_BFIN=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT_FF_MEMLESS=m ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_FB=y ++CONFIG_FB_BFIN_T350MCQB=y ++CONFIG_BACKLIGHT_LCD_SUPPORT=y ++CONFIG_LCD_LTV350QV=m ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_LOGO=y ++# CONFIG_LOGO_LINUX_MONO is not set ++# CONFIG_LOGO_LINUX_VGA16 is not set ++# CONFIG_LOGO_LINUX_CLUT224 is not set ++# CONFIG_LOGO_BLACKFIN_VGA16 is not set ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_SOC=y ++CONFIG_SND_BF5XX_I2S=y ++CONFIG_SND_BF5XX_SOC_SSM2602=y ++CONFIG_HID_A4TECH=y ++CONFIG_HID_APPLE=y ++CONFIG_HID_BELKIN=y ++CONFIG_HID_CHERRY=y ++CONFIG_HID_CHICONY=y ++CONFIG_HID_CYPRESS=y ++CONFIG_HID_EZKEY=y ++CONFIG_HID_GYRATION=y ++CONFIG_HID_LOGITECH=y ++CONFIG_HID_MICROSOFT=y ++CONFIG_HID_MONTEREY=y ++CONFIG_HID_PANTHERLORD=y ++CONFIG_HID_PETALYNX=y ++CONFIG_HID_SAMSUNG=y ++CONFIG_HID_SONY=y ++CONFIG_HID_SUNPLUS=y ++CONFIG_USB=y ++# CONFIG_USB_DEVICE_CLASS is not set ++CONFIG_USB_OTG_BLACKLIST_HUB=y ++CONFIG_USB_MON=y ++CONFIG_USB_MUSB_HDRC=y ++CONFIG_MUSB_PIO_ONLY=y ++CONFIG_USB_MUSB_BLACKFIN=y ++CONFIG_MUSB_PIO_ONLY=y ++CONFIG_USB_STORAGE=y ++CONFIG_USB_GADGET=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=m ++# CONFIG_DNOTIFY is not set ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_UDF_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF527-TLL6527M_defconfig b/arch/blackfin/configs/BF527-TLL6527M_defconfig +new file mode 100644 +index 000000000000..0220b3b15c53 +--- /dev/null ++++ b/arch/blackfin/configs/BF527-TLL6527M_defconfig +@@ -0,0 +1,178 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_LOCALVERSION="DEV_0-1_pre2010" ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_PREEMPT=y ++CONFIG_BF527=y ++CONFIG_BF_REV_0_2=y ++CONFIG_BFIN527_TLL6527M=y ++CONFIG_BF527_UART1_PORTG=y ++CONFIG_IRQ_USB_INT0=11 ++CONFIG_IRQ_USB_INT1=11 ++CONFIG_IRQ_USB_INT2=11 ++CONFIG_IRQ_USB_DMA=11 ++CONFIG_BOOT_LOAD=0x400000 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=y ++CONFIG_DMA_UNCACHED_2M=y ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_0=0xFFC2 ++CONFIG_BANK_1=0xFFC2 ++CONFIG_BANK_2=0xFFC2 ++CONFIG_BANK_3=0xFFC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++CONFIG_BFIN_SIR0=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=y ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_GPIO_ADDR=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_SCSI=y ++# CONFIG_SCSI_PROC_FS is not set ++CONFIG_BLK_DEV_SD=y ++CONFIG_BLK_DEV_SR=m ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_ETHERNET=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_AD7879=m ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_AD714X=y ++CONFIG_INPUT_ADXL34X=y ++# CONFIG_SERIO is not set ++CONFIG_BFIN_PPI=m ++CONFIG_BFIN_SIMPLE_TIMER=m ++CONFIG_BFIN_SPORT=m ++# CONFIG_CONSOLE_TRANSLATIONS is not set ++# CONFIG_DEVKMEM is not set ++CONFIG_BFIN_JTAG_COMM=m ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C_CHARDEV=y ++# CONFIG_I2C_HELPER_AUTO is not set ++CONFIG_I2C_SMBUS=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_MEDIA_SUPPORT=y ++CONFIG_VIDEO_DEV=y ++# CONFIG_MEDIA_TUNER_CUSTOMISE is not set ++CONFIG_VIDEO_HELPER_CHIPS_AUTO=y ++CONFIG_VIDEO_BLACKFIN_CAM=m ++CONFIG_OV9655=y ++CONFIG_FB=y ++CONFIG_BACKLIGHT_LCD_SUPPORT=y ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_FONTS=y ++CONFIG_FONT_6x11=y ++CONFIG_LOGO=y ++# CONFIG_LOGO_LINUX_MONO is not set ++# CONFIG_LOGO_LINUX_VGA16 is not set ++# CONFIG_LOGO_LINUX_CLUT224 is not set ++# CONFIG_LOGO_BLACKFIN_VGA16 is not set ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_MIXER_OSS=y ++CONFIG_SND_PCM_OSS=y ++CONFIG_SND_SOC=y ++CONFIG_SND_BF5XX_I2S=y ++CONFIG_SND_BF5XX_SOC_SSM2602=y ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++CONFIG_MMC=m ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=y ++# CONFIG_DNOTIFY is not set ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_UDF_FS=m ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_JFFS2_FS=y ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++# CONFIG_RPCSEC_GSS_KRB5 is not set ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRC7=m +diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig +new file mode 100644 +index 000000000000..6023e3fd2c48 +--- /dev/null ++++ b/arch/blackfin/configs/BF533-EZKIT_defconfig +@@ -0,0 +1,114 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BFIN533_EZKIT=y ++CONFIG_TIMER0=11 ++CONFIG_CLKIN_HZ=27000000 ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0xAAC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_JEDECPROBE=y ++CONFIG_MTD_CFI_AMDSTD=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=y ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_PHYSMAP=y ++CONFIG_MTD_PLATRAM=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++CONFIG_SMC91X=y ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT=m ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_USB_SUPPORT is not set ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig +new file mode 100644 +index 000000000000..f5cd0f18b711 +--- /dev/null ++++ b/arch/blackfin/configs/BF533-STAMP_defconfig +@@ -0,0 +1,124 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_TIMER0=11 ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0xAAC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=m ++CONFIG_MTD_CFI_AMDSTD=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++CONFIG_SMC91X=y ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=m ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_GPIO=m ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_FB=m ++CONFIG_FIRMWARE_EDID=y ++CONFIG_SOUND=m ++CONFIG_SND=m ++CONFIG_SND_MIXER_OSS=m ++CONFIG_SND_PCM_OSS=m ++CONFIG_SND_SOC=m ++CONFIG_SND_BF5XX_I2S=m ++CONFIG_SND_BF5XX_SOC_AD73311=m ++# CONFIG_USB_SUPPORT is not set ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig +new file mode 100644 +index 000000000000..48085fde7f9e +--- /dev/null ++++ b/arch/blackfin/configs/BF537-STAMP_defconfig +@@ -0,0 +1,136 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF537=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_CAN=m ++CONFIG_CAN_RAW=m ++CONFIG_CAN_BCM=m ++CONFIG_CAN_BFIN=m ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++CONFIG_BFIN_SIR1=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=m ++CONFIG_MTD_CFI_AMDSTD=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_PHYSMAP=m ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_SPI_NOR=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++CONFIG_NET_BFIN=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=m ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_BLACKFIN_TWI=m ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_FB=m ++CONFIG_FIRMWARE_EDID=y ++CONFIG_BACKLIGHT_LCD_SUPPORT=y ++CONFIG_SOUND=m ++CONFIG_SND=m ++CONFIG_SND_MIXER_OSS=m ++CONFIG_SND_PCM_OSS=m ++CONFIG_SND_SOC=m ++CONFIG_SND_BF5XX_I2S=m ++CONFIG_SND_BF5XX_SOC_AD73311=m ++# CONFIG_USB_SUPPORT is not set ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig +new file mode 100644 +index 000000000000..12deeaaef3cb +--- /dev/null ++++ b/arch/blackfin/configs/BF538-EZKIT_defconfig +@@ -0,0 +1,133 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF538=y ++CONFIG_IRQ_TIMER0=12 ++CONFIG_IRQ_TIMER1=12 ++CONFIG_IRQ_TIMER2=12 ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_PM=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_CAN=m ++CONFIG_CAN_RAW=m ++CONFIG_CAN_BCM=m ++CONFIG_CAN_DEV=m ++CONFIG_CAN_BFIN=m ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=m ++CONFIG_MTD_CFI_AMDSTD=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_PHYSMAP=m ++CONFIG_MTD_NAND=m ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++CONFIG_PHYLIB=y ++CONFIG_SMSC_PHY=y ++CONFIG_NET_ETHERNET=y ++CONFIG_SMC91X=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_AD7879=y ++CONFIG_TOUCHSCREEN_AD7879_SPI=y ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_DEVKMEM is not set ++CONFIG_BFIN_JTAG_COMM=m ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++CONFIG_SERIAL_BFIN_UART1=y ++CONFIG_SERIAL_BFIN_UART2=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=m ++CONFIG_I2C_BLACKFIN_TWI=m ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_FB=m ++CONFIG_FB_BFIN_LQ035Q1=m ++# CONFIG_USB_SUPPORT is not set ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_SMB_FS=m ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig +new file mode 100644 +index 000000000000..6a68ffc55b5a +--- /dev/null ++++ b/arch/blackfin/configs/BF548-EZKIT_defconfig +@@ -0,0 +1,207 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF548_std=y ++CONFIG_IRQ_TIMER0=11 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_CACHELINE_ALIGNED_L1=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_DMA_UNCACHED_2M=y ++CONFIG_BFIN_EXTMEM_WRITETHROUGH=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_EBIU_MBSCTLVAL=0x0 ++CONFIG_EBIU_MODEVAL=0x1 ++CONFIG_EBIU_FCTLVAL=0x6 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_CAN=m ++CONFIG_CAN_RAW=m ++CONFIG_CAN_BCM=m ++CONFIG_CAN_BFIN=m ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++CONFIG_BFIN_SIR3=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_FW_LOADER=m ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_PHYSMAP=y ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_NAND=y ++CONFIG_MTD_NAND_BF5XX=y ++# CONFIG_MTD_NAND_BF5XX_HWECC is not set ++CONFIG_MTD_SPI_NOR=y ++CONFIG_BLK_DEV_RAM=y ++# CONFIG_SCSI_PROC_FS is not set ++CONFIG_BLK_DEV_SD=y ++CONFIG_BLK_DEV_SR=m ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_ATA=y ++# CONFIG_SATA_PMP is not set ++CONFIG_PATA_BF54X=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++CONFIG_SMSC911X=y ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT_FF_MEMLESS=m ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++CONFIG_INPUT_EVBUG=m ++# CONFIG_KEYBOARD_ATKBD is not set ++CONFIG_KEYBOARD_BFIN=y ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_AD7877=m ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_FB=y ++CONFIG_FIRMWARE_EDID=y ++CONFIG_FB_BF54X_LQ043=y ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_FONTS=y ++CONFIG_FONT_6x11=y ++CONFIG_LOGO=y ++# CONFIG_LOGO_LINUX_MONO is not set ++# CONFIG_LOGO_LINUX_VGA16 is not set ++# CONFIG_LOGO_LINUX_CLUT224 is not set ++# CONFIG_LOGO_BLACKFIN_VGA16 is not set ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_MIXER_OSS=y ++CONFIG_SND_PCM_OSS=y ++CONFIG_SND_SOC=y ++CONFIG_SND_BF5XX_AC97=y ++CONFIG_SND_BF5XX_SOC_AD1980=y ++CONFIG_HID_A4TECH=y ++CONFIG_HID_APPLE=y ++CONFIG_HID_BELKIN=y ++CONFIG_HID_CHERRY=y ++CONFIG_HID_CHICONY=y ++CONFIG_HID_CYPRESS=y ++CONFIG_HID_EZKEY=y ++CONFIG_HID_GYRATION=y ++CONFIG_HID_LOGITECH=y ++CONFIG_HID_MICROSOFT=y ++CONFIG_HID_MONTEREY=y ++CONFIG_HID_PANTHERLORD=y ++CONFIG_HID_PETALYNX=y ++CONFIG_HID_SAMSUNG=y ++CONFIG_HID_SONY=y ++CONFIG_HID_SUNPLUS=y ++CONFIG_USB=y ++# CONFIG_USB_DEVICE_CLASS is not set ++CONFIG_USB_OTG_BLACKLIST_HUB=y ++CONFIG_USB_MON=y ++CONFIG_USB_MUSB_HDRC=y ++CONFIG_USB_MUSB_BLACKFIN=y ++CONFIG_USB_STORAGE=y ++CONFIG_USB_GADGET=y ++CONFIG_MMC=y ++CONFIG_MMC_BLOCK=m ++CONFIG_SDH_BFIN=y ++CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++# CONFIG_DNOTIFY is not set ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_ZISOFS=y ++CONFIG_MSDOS_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_NTFS_FS=m ++CONFIG_NTFS_RW=y ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_NFSD=m ++CONFIG_NFSD_V3=y ++CONFIG_CIFS=y ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig +new file mode 100644 +index 000000000000..e9f3ba783a4e +--- /dev/null ++++ b/arch/blackfin/configs/BF561-ACVILON_defconfig +@@ -0,0 +1,149 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_SYSFS_DEPRECATED_V2=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_PREEMPT=y ++CONFIG_BF561=y ++CONFIG_BF_REV_0_5=y ++CONFIG_IRQ_TIMER0=10 ++CONFIG_BFIN561_ACVILON=y ++# CONFIG_BF561_COREB is not set ++CONFIG_CLKIN_HZ=12000000 ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=y ++CONFIG_DMA_UNCACHED_4M=y ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_0=0x99b2 ++CONFIG_BANK_1=0x3350 ++CONFIG_BANK_3=0xAAC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++CONFIG_SYN_COOKIES=y ++# CONFIG_INET_LRO is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_PLATRAM=y ++CONFIG_MTD_PHRAM=y ++CONFIG_MTD_BLOCK2MTD=y ++CONFIG_MTD_NAND=y ++CONFIG_MTD_NAND_PLATFORM=y ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_COUNT=2 ++CONFIG_BLK_DEV_RAM_SIZE=16384 ++CONFIG_SCSI=y ++# CONFIG_SCSI_PROC_FS is not set ++CONFIG_BLK_DEV_SD=y ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_ETHERNET=y ++CONFIG_SMSC911X=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_PIO=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_PCA_PLATFORM=y ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_SPI_SPIDEV=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_GPIO_PCF857X=y ++CONFIG_SENSORS_LM75=y ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_MIXER_OSS=y ++CONFIG_SND_PCM_OSS=y ++# CONFIG_SND_DRIVERS is not set ++# CONFIG_SND_USB is not set ++CONFIG_SND_SOC=y ++CONFIG_SND_BF5XX_I2S=y ++CONFIG_SND_BF5XX_SPORT_NUM=1 ++CONFIG_USB=y ++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y ++# CONFIG_USB_DEVICE_CLASS is not set ++CONFIG_USB_MON=y ++CONFIG_USB_STORAGE=y ++CONFIG_USB_SERIAL=y ++CONFIG_USB_SERIAL_FTDI_SIO=y ++CONFIG_USB_SERIAL_PL2303=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_DS1307=y ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++CONFIG_EXT2_FS_POSIX_ACL=y ++CONFIG_EXT2_FS_SECURITY=y ++# CONFIG_DNOTIFY is not set ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_FAT_DEFAULT_CODEPAGE=866 ++CONFIG_FAT_DEFAULT_IOCHARSET="cp1251" ++CONFIG_NTFS_FS=y ++CONFIG_CONFIGFS_FS=y ++CONFIG_JFFS2_FS=y ++CONFIG_JFFS2_COMPRESSION_OPTIONS=y ++# CONFIG_JFFS2_ZLIB is not set ++CONFIG_JFFS2_LZO=y ++# CONFIG_JFFS2_RTIME is not set ++CONFIG_JFFS2_CMODE_FAVOURLZO=y ++CONFIG_CRAMFS=y ++CONFIG_MINIX_FS=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++CONFIG_ROOT_NFS=y ++CONFIG_NLS_DEFAULT="cp1251" ++CONFIG_NLS_CODEPAGE_866=y ++CONFIG_NLS_CODEPAGE_1251=y ++CONFIG_NLS_KOI8_R=y ++CONFIG_NLS_UTF8=y ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++# CONFIG_DEBUG_BUGVERBOSE is not set ++CONFIG_DEBUG_INFO=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set ++CONFIG_CPLB_INFO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig +new file mode 100644 +index 000000000000..89b75a6c3fab +--- /dev/null ++++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig +@@ -0,0 +1,112 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF561=y ++CONFIG_SMP=y ++CONFIG_IRQ_TIMER0=10 ++CONFIG_CLKIN_HZ=30000000 ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0xAAC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_AMDSTD=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_PHYSMAP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++CONFIG_SMC91X=y ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT=m ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_USB_SUPPORT is not set ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig +new file mode 100644 +index 000000000000..67b3d2f419ba +--- /dev/null ++++ b/arch/blackfin/configs/BF561-EZKIT_defconfig +@@ -0,0 +1,114 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF561=y ++CONFIG_IRQ_TIMER0=10 ++CONFIG_CLKIN_HZ=30000000 ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_BFIN_EXTMEM_WRITETHROUGH=y ++CONFIG_BFIN_L2_DCACHEABLE=y ++CONFIG_BFIN_L2_WRITETHROUGH=y ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0xAAC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_AMDSTD=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_PHYSMAP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++CONFIG_SMC91X=y ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT=m ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_USB_SUPPORT is not set ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig +new file mode 100644 +index 000000000000..8cc75d4218fb +--- /dev/null ++++ b/arch/blackfin/configs/BF609-EZKIT_defconfig +@@ -0,0 +1,154 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF609=y ++CONFIG_PINT1_ASSIGN=0x01010000 ++CONFIG_PINT2_ASSIGN=0x07000101 ++CONFIG_PINT3_ASSIGN=0x02020303 ++CONFIG_IP_CHECKSUM_L1=y ++CONFIG_SYSCALL_TAB_L1=y ++CONFIG_CPLB_SWITCH_TAB_L1=y ++# CONFIG_APP_STACK_L1 is not set ++# CONFIG_BFIN_INS_LOWOVERHEAD is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_PM_BFIN_WAKE_PE12=y ++CONFIG_PM_BFIN_WAKE_PE12_POL=1 ++CONFIG_CPU_FREQ=y ++CONFIG_CPU_FREQ_GOV_POWERSAVE=y ++CONFIG_CPU_FREQ_GOV_ONDEMAND=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++CONFIG_IP_PNP_DHCP=y ++CONFIG_IP_PNP_BOOTP=y ++CONFIG_IP_PNP_RARP=y ++# CONFIG_IPV6 is not set ++CONFIG_NETFILTER=y ++CONFIG_CAN=y ++CONFIG_CAN_BFIN=y ++CONFIG_IRDA=y ++CONFIG_IRTTY_SIR=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_FW_LOADER=m ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_CFI_STAA=y ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_PHYSMAP=y ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_SPI_NOR=y ++CONFIG_MTD_UBI=m ++CONFIG_SCSI=y ++CONFIG_BLK_DEV_SD=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++CONFIG_STMMAC_ETH=y ++CONFIG_STMMAC_IEEE1588=y ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_BFIN_ROTARY=y ++# CONFIG_SERIO is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_SIMPLE_TIMER=m ++# CONFIG_BFIN_CRC is not set ++CONFIG_BFIN_LINKPORT=y ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_ADI_V3=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_PINCTRL_MCP23S08=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_SOUND=m ++CONFIG_SND=m ++CONFIG_SND_MIXER_OSS=m ++CONFIG_SND_PCM_OSS=m ++# CONFIG_SND_DRIVERS is not set ++# CONFIG_SND_SPI is not set ++# CONFIG_SND_USB is not set ++CONFIG_SND_SOC=m ++CONFIG_USB=y ++CONFIG_USB_MUSB_HDRC=y ++CONFIG_USB_MUSB_BLACKFIN=m ++CONFIG_USB_STORAGE=y ++CONFIG_USB_GADGET=y ++CONFIG_USB_GADGET_MUSB_HDRC=y ++CONFIG_USB_ZERO=y ++CONFIG_MMC=y ++CONFIG_SDH_BFIN=y ++# CONFIG_IOMMU_SUPPORT is not set ++CONFIG_EXT2_FS=y ++# CONFIG_DNOTIFY is not set ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_JFFS2_FS=m ++CONFIG_UBIFS_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_DEBUG_FS=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++CONFIG_FRAME_POINTER=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO_HMAC=m ++CONFIG_CRYPTO_MD4=m ++CONFIG_CRYPTO_MD5=m ++CONFIG_CRYPTO_ARC4=m ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRYPTO_DEV_BFIN_CRC=m +diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig +new file mode 100644 +index 000000000000..9faf0ec7007f +--- /dev/null ++++ b/arch/blackfin/configs/BlackStamp_defconfig +@@ -0,0 +1,108 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_SYSFS_DEPRECATED_V2=y ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODULE_FORCE_UNLOAD=y ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_PREEMPT=y ++CONFIG_BF532=y ++CONFIG_BF_REV_0_5=y ++CONFIG_BLACKSTAMP=y ++CONFIG_TIMER0=11 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_ROMKERNEL=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=y ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0xAAC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_BINFMT_SHARED_FLAT=y ++CONFIG_PM=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_LRO is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=m ++CONFIG_MTD_CFI_AMDSTD=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_SPI_NOR=y ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_NBD=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_MISC_DEVICES=y ++CONFIG_EEPROM_AT25=y ++CONFIG_NETDEVICES=y ++CONFIG_NET_ETHERNET=y ++CONFIG_SMC91X=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_HW_RANDOM=y ++CONFIG_I2C=m ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_GPIO=m ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_SPI_SPIDEV=m ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_USB_SUPPORT is not set ++CONFIG_MMC=y ++CONFIG_MMC_SPI=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++# CONFIG_DNOTIFY is not set ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_JFFS2_FS=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++CONFIG_NFS_V4=y ++CONFIG_SMB_FS=y ++CONFIG_CIFS=y ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_UTF8=y ++CONFIG_SYSCTL_SYSCALL_CHECK=y ++CONFIG_DEBUG_MMRS=y ++# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_CRC_CCITT=m +diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig +new file mode 100644 +index 000000000000..4a1ad4fd7bb2 +--- /dev/null ++++ b/arch/blackfin/configs/CM-BF527_defconfig +@@ -0,0 +1,129 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_KERNEL_LZMA=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_RD_GZIP is not set ++CONFIG_RD_LZMA=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_PREEMPT=y ++CONFIG_BF527=y ++CONFIG_BF_REV_0_1=y ++CONFIG_IRQ_TIMER0=12 ++CONFIG_BFIN527_BLUETECHNIX_CM=y ++CONFIG_IRQ_USB_INT0=11 ++CONFIG_IRQ_USB_INT1=11 ++CONFIG_IRQ_USB_INT2=11 ++CONFIG_IRQ_USB_DMA=11 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=y ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0xFFC0 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_GPIO_ADDR=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_SCSI=y ++CONFIG_BLK_DEV_SD=y ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_ETHERNET=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_BLACKFIN_TWI=m ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_USB=m ++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y ++# CONFIG_USB_DEVICE_CLASS is not set ++CONFIG_USB_OTG_BLACKLIST_HUB=y ++CONFIG_USB_MON=m ++CONFIG_USB_MUSB_HDRC=m ++CONFIG_USB_MUSB_PERIPHERAL=y ++CONFIG_USB_GADGET_MUSB_HDRC=y ++CONFIG_MUSB_PIO_ONLY=y ++CONFIG_USB_STORAGE=m ++CONFIG_USB_GADGET=m ++CONFIG_USB_ETH=m ++CONFIG_USB_MASS_STORAGE=m ++CONFIG_USB_G_SERIAL=m ++CONFIG_USB_G_PRINTER=m ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++# CONFIG_DNOTIFY is not set ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_JFFS2_FS=y ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_SMB_FS=m ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_DEBUG_FS=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set ++CONFIG_EARLY_PRINTK=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRC_CCITT=m ++CONFIG_CRC_ITU_T=y ++CONFIG_CRC7=y +diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig +new file mode 100644 +index 000000000000..9d787e28bbe8 +--- /dev/null ++++ b/arch/blackfin/configs/PNAV-10_defconfig +@@ -0,0 +1,111 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_LOG_BUF_SHIFT=14 ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF537=y ++CONFIG_IRQ_TIMER0=12 ++CONFIG_PNAV10=y ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++CONFIG_IP_CHECKSUM_L1=y ++CONFIG_SYSCALL_TAB_L1=y ++CONFIG_CPLB_SWITCH_TAB_L1=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=y ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_1=0x33B0 ++CONFIG_BANK_2=0x33B0 ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_UCLINUX=y ++CONFIG_MTD_NAND=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++CONFIG_NET_ETHERNET=y ++CONFIG_BFIN_MAC=y ++# CONFIG_BFIN_MAC_USE_L1 is not set ++CONFIG_BFIN_TX_DESC_NUM=100 ++CONFIG_BFIN_RX_DESC_NUM=100 ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_AD7877=y ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_UINPUT=y ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_HW_RANDOM=y ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_FB=y ++CONFIG_FIRMWARE_EDID=y ++CONFIG_BACKLIGHT_LCD_SUPPORT=y ++CONFIG_LCD_CLASS_DEVICE=y ++CONFIG_BACKLIGHT_CLASS_DEVICE=y ++CONFIG_SOUND=y ++CONFIG_SND=m ++# CONFIG_SND_SUPPORT_OLD_API is not set ++# CONFIG_SND_VERBOSE_PROCFS is not set ++CONFIG_SOUND_PRIME=y ++# CONFIG_HID is not set ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++# CONFIG_DNOTIFY is not set ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_SMB_FS=m ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_DEBUG_HUNT_FOR_ZERO is not set ++# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set ++# CONFIG_ACCESS_CHECK is not set ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRC_CCITT=m +diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig +new file mode 100644 +index 000000000000..225df32dc9a8 +--- /dev/null ++++ b/arch/blackfin/configs/SRV1_defconfig +@@ -0,0 +1,88 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++CONFIG_KALLSYMS_ALL=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_PREEMPT=y ++CONFIG_BF537=y ++CONFIG_IRQ_TIMER0=12 ++CONFIG_BOOT_LOAD=0x400000 ++CONFIG_CLKIN_HZ=22118400 ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_DMA_UNCACHED_2M=y ++CONFIG_C_CDPRIO=y ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_PM=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++# CONFIG_WIRELESS is not set ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_JEDECPROBE=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_UCLINUX=y ++CONFIG_MTD_NAND=m ++CONFIG_BLK_DEV_RAM=y ++CONFIG_MISC_DEVICES=y ++CONFIG_EEPROM_AT25=m ++CONFIG_NETDEVICES=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_UINPUT=y ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_HWMON=m ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_HID is not set ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_SMB_FS=m ++CONFIG_DEBUG_KERNEL=y ++# CONFIG_DEBUG_BUGVERBOSE is not set ++CONFIG_DEBUG_INFO=y ++# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set ++CONFIG_CPLB_INFO=y +diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig +new file mode 100644 +index 000000000000..425c24e43c34 +--- /dev/null ++++ b/arch/blackfin/configs/TCM-BF518_defconfig +@@ -0,0 +1,131 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_KERNEL_LZMA=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_RD_GZIP is not set ++CONFIG_RD_LZMA=y ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF518=y ++CONFIG_BF_REV_0_1=y ++CONFIG_BFIN518F_TCM=y ++CONFIG_IRQ_TIMER0=12 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_ADV_OPTIONS=y ++CONFIG_MTD_CFI_GEOMETRY=y ++# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set ++# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set ++# CONFIG_MTD_CFI_I2 is not set ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_PHYSMAP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++CONFIG_NET_ETHERNET=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_DEVKMEM is not set ++CONFIG_BFIN_JTAG_COMM=m ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++CONFIG_MMC=y ++CONFIG_MMC_DEBUG=y ++CONFIG_MMC_SPI=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=y ++# CONFIG_DNOTIFY is not set ++CONFIG_VFAT_FS=m ++# CONFIG_MISC_FILESYSTEMS is not set ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++CONFIG_ROOT_NFS=y ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRC_CCITT=m +diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig +index 8bcb61a6ec15..6b0912ba544a 100644 +--- a/arch/mips/configs/fuloong2e_defconfig ++++ b/arch/mips/configs/fuloong2e_defconfig +@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y + CONFIG_POSIX_MQUEUE=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_BSD_PROCESS_ACCT=y + CONFIG_IKCONFIG=y + CONFIG_IKCONFIG_PROC=y +diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig +index 9d9af5f923c3..acd27816d300 100644 +--- a/arch/mips/configs/gpr_defconfig ++++ b/arch/mips/configs/gpr_defconfig +@@ -1,8 +1,8 @@ ++CONFIG_PREEMPT=y + # CONFIG_LOCALVERSION_AUTO is not set + CONFIG_SYSVIPC=y + CONFIG_POSIX_MQUEUE=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_BSD_PROCESS_ACCT=y + CONFIG_BSD_PROCESS_ACCT_V3=y + CONFIG_RELAY=y +diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig +index ff40fbc2f439..2bca2257a8bb 100644 +--- a/arch/mips/configs/ip22_defconfig ++++ b/arch/mips/configs/ip22_defconfig +@@ -1,7 +1,7 @@ + CONFIG_SYSVIPC=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_IKCONFIG=y + CONFIG_IKCONFIG_PROC=y + CONFIG_LOG_BUF_SHIFT=14 +diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig +index 0921ef38e9fb..6da05cef46f8 100644 +--- a/arch/mips/configs/ip28_defconfig ++++ b/arch/mips/configs/ip28_defconfig +@@ -1,5 +1,5 @@ + CONFIG_SYSVIPC=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_IKCONFIG=y + CONFIG_IKCONFIG_PROC=y + CONFIG_LOG_BUF_SHIFT=14 +diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig +index 328d4dfeb4cb..e17cb23173ea 100644 +--- a/arch/mips/configs/jazz_defconfig ++++ b/arch/mips/configs/jazz_defconfig +@@ -1,6 +1,6 @@ ++CONFIG_PREEMPT=y + CONFIG_SYSVIPC=y + CONFIG_POSIX_MQUEUE=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_BSD_PROCESS_ACCT=y + CONFIG_IKCONFIG=y + CONFIG_IKCONFIG_PROC=y +diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig +index 16bef819fe98..a426aeb3a603 100644 +--- a/arch/mips/configs/mtx1_defconfig ++++ b/arch/mips/configs/mtx1_defconfig +@@ -1,8 +1,8 @@ ++CONFIG_PREEMPT=y + # CONFIG_LOCALVERSION_AUTO is not set + CONFIG_SYSVIPC=y + CONFIG_POSIX_MQUEUE=y + CONFIG_AUDIT=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_BSD_PROCESS_ACCT=y + CONFIG_BSD_PROCESS_ACCT_V3=y + CONFIG_RELAY=y +diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig +index 4ecb157e56d4..ea7309283b01 100644 +--- a/arch/mips/configs/nlm_xlr_defconfig ++++ b/arch/mips/configs/nlm_xlr_defconfig +@@ -1,10 +1,10 @@ ++CONFIG_PREEMPT=y + # CONFIG_LOCALVERSION_AUTO is not set + CONFIG_SYSVIPC=y + CONFIG_POSIX_MQUEUE=y + CONFIG_AUDIT=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_BSD_PROCESS_ACCT=y + CONFIG_BSD_PROCESS_ACCT_V3=y + CONFIG_TASKSTATS=y +diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig +index 63fe2da1b37f..7f08ee237345 100644 +--- a/arch/mips/configs/pic32mzda_defconfig ++++ b/arch/mips/configs/pic32mzda_defconfig +@@ -1,7 +1,7 @@ ++CONFIG_PREEMPT=y + CONFIG_SYSVIPC=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_IKCONFIG=y + CONFIG_IKCONFIG_PROC=y + CONFIG_LOG_BUF_SHIFT=14 +diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig +index 2f08d071ada6..11118f5f507a 100644 +--- a/arch/mips/configs/pistachio_defconfig ++++ b/arch/mips/configs/pistachio_defconfig +@@ -1,9 +1,9 @@ ++CONFIG_PREEMPT=y + # CONFIG_LOCALVERSION_AUTO is not set + CONFIG_DEFAULT_HOSTNAME="localhost" + CONFIG_SYSVIPC=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_IKCONFIG=m + CONFIG_IKCONFIG_PROC=y + CONFIG_LOG_BUF_SHIFT=18 +diff --git a/arch/mips/configs/pnx8335_stb225_defconfig b/arch/mips/configs/pnx8335_stb225_defconfig +index aa0b169800e0..d2177f598a1d 100644 +--- a/arch/mips/configs/pnx8335_stb225_defconfig ++++ b/arch/mips/configs/pnx8335_stb225_defconfig +@@ -1,9 +1,9 @@ ++CONFIG_PREEMPT=y + # CONFIG_LOCALVERSION_AUTO is not set + # CONFIG_SWAP is not set + CONFIG_SYSVIPC=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_LOG_BUF_SHIFT=14 + CONFIG_EXPERT=y + CONFIG_SLAB=y +diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig +index 0f4b09f8a0ee..6ba5086f6dff 100644 +--- a/arch/mips/configs/rm200_defconfig ++++ b/arch/mips/configs/rm200_defconfig +@@ -1,6 +1,6 @@ ++CONFIG_PREEMPT=y + CONFIG_SYSVIPC=y + CONFIG_POSIX_MQUEUE=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_BSD_PROCESS_ACCT=y + CONFIG_IKCONFIG=y + CONFIG_IKCONFIG_PROC=y +diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig +index ccc109761f44..a6a5b0b7a9c9 100644 +--- a/arch/parisc/configs/712_defconfig ++++ b/arch/parisc/configs/712_defconfig +@@ -13,7 +13,7 @@ CONFIG_MODULES=y + CONFIG_MODULE_UNLOAD=y + CONFIG_MODULE_FORCE_UNLOAD=y + CONFIG_PA7100LC=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_GSC_LASI=y + # CONFIG_PDC_CHASSIS is not set + CONFIG_BINFMT_MISC=m +diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig +index 8d41a73bd71b..b8e0a6662ff9 100644 +--- a/arch/parisc/configs/c3000_defconfig ++++ b/arch/parisc/configs/c3000_defconfig +@@ -13,7 +13,7 @@ CONFIG_MODULES=y + CONFIG_MODULE_UNLOAD=y + CONFIG_MODULE_FORCE_UNLOAD=y + CONFIG_PA8X00=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + # CONFIG_GSC is not set + CONFIG_PCI=y + CONFIG_PCI_LBA=y +diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig +index 52c9050a7c5c..8d86d2e989f4 100644 +--- a/arch/parisc/configs/default_defconfig ++++ b/arch/parisc/configs/default_defconfig +@@ -14,7 +14,7 @@ CONFIG_MODULE_UNLOAD=y + CONFIG_MODULE_FORCE_UNLOAD=y + # CONFIG_BLK_DEV_BSG is not set + CONFIG_PA7100LC=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_IOMMU_CCIO=y + CONFIG_GSC_LASI=y + CONFIG_GSC_WAX=y +diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig +new file mode 100644 +index 000000000000..04fee07ea6c5 +--- /dev/null ++++ b/arch/powerpc/configs/c2k_defconfig +@@ -0,0 +1,389 @@ ++CONFIG_SYSVIPC=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_AUDIT=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_PROFILING=y ++CONFIG_OPROFILE=m ++CONFIG_KPROBES=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODVERSIONS=y ++CONFIG_PARTITION_ADVANCED=y ++CONFIG_OSF_PARTITION=y ++CONFIG_MAC_PARTITION=y ++CONFIG_BSD_DISKLABEL=y ++CONFIG_MINIX_SUBPARTITION=y ++CONFIG_SOLARIS_X86_PARTITION=y ++CONFIG_UNIXWARE_DISKLABEL=y ++CONFIG_SGI_PARTITION=y ++CONFIG_SUN_PARTITION=y ++# CONFIG_PPC_CHRP is not set ++# CONFIG_PPC_PMAC is not set ++CONFIG_EMBEDDED6xx=y ++CONFIG_PPC_C2K=y ++CONFIG_CPU_FREQ=y ++CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y ++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y ++CONFIG_CPU_FREQ_GOV_POWERSAVE=m ++CONFIG_CPU_FREQ_GOV_ONDEMAND=m ++CONFIG_GEN_RTC=y ++CONFIG_HIGHMEM=y ++CONFIG_PREEMPT=y ++CONFIG_BINFMT_MISC=y ++CONFIG_PM=y ++CONFIG_PCI_MSI=y ++CONFIG_HOTPLUG_PCI=y ++CONFIG_HOTPLUG_PCI_SHPC=m ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_XFRM_USER=y ++CONFIG_NET_KEY=m ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++CONFIG_IP_MULTIPLE_TABLES=y ++CONFIG_IP_ROUTE_MULTIPATH=y ++CONFIG_IP_ROUTE_VERBOSE=y ++CONFIG_IP_PNP=y ++CONFIG_IP_PNP_DHCP=y ++CONFIG_NET_IPIP=m ++CONFIG_IP_MROUTE=y ++CONFIG_IP_PIMSM_V1=y ++CONFIG_IP_PIMSM_V2=y ++CONFIG_SYN_COOKIES=y ++CONFIG_INET_AH=m ++CONFIG_INET_ESP=m ++CONFIG_INET_IPCOMP=m ++CONFIG_INET6_AH=m ++CONFIG_INET6_ESP=m ++CONFIG_INET6_IPCOMP=m ++CONFIG_IPV6_TUNNEL=m ++CONFIG_NETFILTER=y ++# CONFIG_NETFILTER_XT_MATCH_SCTP is not set ++CONFIG_IP_NF_IPTABLES=m ++CONFIG_IP_NF_MATCH_ECN=m ++CONFIG_IP_NF_MATCH_TTL=m ++CONFIG_IP_NF_FILTER=m ++CONFIG_IP_NF_TARGET_REJECT=m ++CONFIG_IP_NF_MANGLE=m ++CONFIG_IP_NF_TARGET_ECN=m ++CONFIG_IP_NF_RAW=m ++CONFIG_IP_NF_ARPTABLES=m ++CONFIG_IP_NF_ARPFILTER=m ++CONFIG_IP_NF_ARP_MANGLE=m ++CONFIG_IP6_NF_IPTABLES=m ++CONFIG_IP6_NF_MATCH_EUI64=m ++CONFIG_IP6_NF_MATCH_FRAG=m ++CONFIG_IP6_NF_MATCH_OPTS=m ++CONFIG_IP6_NF_MATCH_HL=m ++CONFIG_IP6_NF_MATCH_IPV6HEADER=m ++CONFIG_IP6_NF_MATCH_RT=m ++CONFIG_IP6_NF_FILTER=m ++CONFIG_IP6_NF_MANGLE=m ++CONFIG_IP6_NF_RAW=m ++CONFIG_BRIDGE_NF_EBTABLES=m ++CONFIG_BRIDGE_EBT_BROUTE=m ++CONFIG_BRIDGE_EBT_T_FILTER=m ++CONFIG_BRIDGE_EBT_T_NAT=m ++CONFIG_BRIDGE_EBT_802_3=m ++CONFIG_BRIDGE_EBT_AMONG=m ++CONFIG_BRIDGE_EBT_ARP=m ++CONFIG_BRIDGE_EBT_IP=m ++CONFIG_BRIDGE_EBT_LIMIT=m ++CONFIG_BRIDGE_EBT_MARK=m ++CONFIG_BRIDGE_EBT_PKTTYPE=m ++CONFIG_BRIDGE_EBT_STP=m ++CONFIG_BRIDGE_EBT_VLAN=m ++CONFIG_BRIDGE_EBT_ARPREPLY=m ++CONFIG_BRIDGE_EBT_DNAT=m ++CONFIG_BRIDGE_EBT_MARK_T=m ++CONFIG_BRIDGE_EBT_REDIRECT=m ++CONFIG_BRIDGE_EBT_SNAT=m ++CONFIG_BRIDGE_EBT_LOG=m ++CONFIG_IP_SCTP=m ++CONFIG_ATM=m ++CONFIG_ATM_CLIP=m ++CONFIG_ATM_LANE=m ++CONFIG_ATM_BR2684=m ++CONFIG_BRIDGE=m ++CONFIG_VLAN_8021Q=m ++CONFIG_NET_SCHED=y ++CONFIG_NET_SCH_CBQ=m ++CONFIG_NET_SCH_HTB=m ++CONFIG_NET_SCH_HFSC=m ++CONFIG_NET_SCH_ATM=m ++CONFIG_NET_SCH_PRIO=m ++CONFIG_NET_SCH_RED=m ++CONFIG_NET_SCH_SFQ=m ++CONFIG_NET_SCH_TEQL=m ++CONFIG_NET_SCH_TBF=m ++CONFIG_NET_SCH_GRED=m ++CONFIG_NET_SCH_DSMARK=m ++CONFIG_NET_SCH_NETEM=m ++CONFIG_NET_CLS_TCINDEX=m ++CONFIG_NET_CLS_ROUTE4=m ++CONFIG_NET_CLS_FW=m ++CONFIG_NET_CLS_U32=m ++CONFIG_CLS_U32_PERF=y ++CONFIG_NET_CLS_RSVP=m ++CONFIG_NET_CLS_RSVP6=m ++CONFIG_NET_CLS_IND=y ++CONFIG_BT=m ++CONFIG_BT_RFCOMM=m ++CONFIG_BT_RFCOMM_TTY=y ++CONFIG_BT_BNEP=m ++CONFIG_BT_BNEP_MC_FILTER=y ++CONFIG_BT_BNEP_PROTO_FILTER=y ++CONFIG_BT_HIDP=m ++CONFIG_BT_HCIUART=m ++CONFIG_BT_HCIUART_H4=y ++CONFIG_BT_HCIUART_BCSP=y ++CONFIG_BT_HCIBCM203X=m ++CONFIG_BT_HCIBFUSB=m ++CONFIG_BT_HCIVHCI=m ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_AMDSTD=y ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_PHYSMAP_OF=y ++CONFIG_BLK_DEV_LOOP=m ++CONFIG_BLK_DEV_CRYPTOLOOP=m ++CONFIG_BLK_DEV_NBD=m ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_SIZE=16384 ++CONFIG_SCSI=m ++CONFIG_BLK_DEV_SD=m ++CONFIG_CHR_DEV_ST=m ++CONFIG_CHR_DEV_OSST=m ++CONFIG_BLK_DEV_SR=m ++CONFIG_BLK_DEV_SR_VENDOR=y ++CONFIG_CHR_DEV_SG=m ++CONFIG_SCSI_CONSTANTS=y ++CONFIG_SCSI_LOGGING=y ++CONFIG_SCSI_ISCSI_ATTRS=m ++CONFIG_BLK_DEV_3W_XXXX_RAID=m ++CONFIG_SCSI_3W_9XXX=m ++CONFIG_SCSI_ACARD=m ++CONFIG_SCSI_AACRAID=m ++CONFIG_SCSI_AIC7XXX=m ++CONFIG_AIC7XXX_CMDS_PER_DEVICE=4 ++CONFIG_AIC7XXX_RESET_DELAY_MS=15000 ++# CONFIG_AIC7XXX_DEBUG_ENABLE is not set ++# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set ++CONFIG_SCSI_AIC79XX=m ++CONFIG_AIC79XX_CMDS_PER_DEVICE=4 ++CONFIG_AIC79XX_RESET_DELAY_MS=15000 ++# CONFIG_AIC79XX_DEBUG_ENABLE is not set ++# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set ++CONFIG_SCSI_ARCMSR=m ++CONFIG_MEGARAID_NEWGEN=y ++CONFIG_MEGARAID_MM=m ++CONFIG_MEGARAID_MAILBOX=m ++CONFIG_MEGARAID_SAS=m ++CONFIG_SCSI_GDTH=m ++CONFIG_SCSI_IPS=m ++CONFIG_SCSI_INITIO=m ++CONFIG_SCSI_SYM53C8XX_2=m ++CONFIG_SCSI_QLOGIC_1280=m ++CONFIG_NETDEVICES=y ++CONFIG_BONDING=m ++CONFIG_DUMMY=m ++CONFIG_NETCONSOLE=m ++CONFIG_TUN=m ++# CONFIG_ATM_DRIVERS is not set ++CONFIG_MV643XX_ETH=y ++CONFIG_VITESSE_PHY=y ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_UINPUT=m ++# CONFIG_SERIO is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_SERIAL_NONSTANDARD=y ++CONFIG_SERIAL_MPSC=y ++CONFIG_SERIAL_MPSC_CONSOLE=y ++CONFIG_NVRAM=m ++CONFIG_RAW_DRIVER=y ++CONFIG_MAX_RAW_DEVS=8192 ++CONFIG_I2C=m ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_MV64XXX=m ++CONFIG_HWMON=m ++CONFIG_SENSORS_ADM1021=m ++CONFIG_SENSORS_ADM1025=m ++CONFIG_SENSORS_ADM1026=m ++CONFIG_SENSORS_ADM1031=m ++CONFIG_SENSORS_DS1621=m ++CONFIG_SENSORS_GL518SM=m ++CONFIG_SENSORS_MAX1619=m ++CONFIG_SENSORS_LM75=m ++CONFIG_SENSORS_LM77=m ++CONFIG_SENSORS_LM78=m ++CONFIG_SENSORS_LM80=m ++CONFIG_SENSORS_LM83=m ++CONFIG_SENSORS_LM85=m ++CONFIG_SENSORS_LM87=m ++CONFIG_SENSORS_LM90=m ++CONFIG_SENSORS_PCF8591=m ++CONFIG_SENSORS_VIA686A=m ++CONFIG_SENSORS_W83781D=m ++CONFIG_SENSORS_W83L785TS=m ++CONFIG_WATCHDOG=y ++CONFIG_SOFT_WATCHDOG=m ++CONFIG_PCIPCWATCHDOG=m ++CONFIG_WDTPCI=m ++CONFIG_USBPCWATCHDOG=m ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_USB=m ++CONFIG_USB_MON=m ++CONFIG_USB_EHCI_HCD=m ++CONFIG_USB_EHCI_ROOT_HUB_TT=y ++CONFIG_USB_OHCI_HCD=m ++CONFIG_USB_OHCI_HCD_PPC_OF_BE=y ++CONFIG_USB_UHCI_HCD=m ++CONFIG_USB_ACM=m ++CONFIG_USB_PRINTER=m ++CONFIG_USB_STORAGE=m ++CONFIG_USB_STORAGE_DATAFAB=m ++CONFIG_USB_STORAGE_FREECOM=m ++CONFIG_USB_STORAGE_ISD200=m ++CONFIG_USB_STORAGE_SDDR09=m ++CONFIG_USB_STORAGE_SDDR55=m ++CONFIG_USB_STORAGE_JUMPSHOT=m ++CONFIG_USB_MDC800=m ++CONFIG_USB_MICROTEK=m ++CONFIG_USB_SERIAL=m ++CONFIG_USB_SERIAL_GENERIC=y ++CONFIG_USB_SERIAL_BELKIN=m ++CONFIG_USB_SERIAL_WHITEHEAT=m ++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m ++CONFIG_USB_SERIAL_EMPEG=m ++CONFIG_USB_SERIAL_FTDI_SIO=m ++CONFIG_USB_SERIAL_VISOR=m ++CONFIG_USB_SERIAL_IPAQ=m ++CONFIG_USB_SERIAL_IR=m ++CONFIG_USB_SERIAL_EDGEPORT=m ++CONFIG_USB_SERIAL_EDGEPORT_TI=m ++CONFIG_USB_SERIAL_KEYSPAN_PDA=m ++CONFIG_USB_SERIAL_KEYSPAN=m ++CONFIG_USB_SERIAL_KLSI=m ++CONFIG_USB_SERIAL_KOBIL_SCT=m ++CONFIG_USB_SERIAL_MCT_U232=m ++CONFIG_USB_SERIAL_PL2303=m ++CONFIG_USB_SERIAL_SAFE=m ++CONFIG_USB_SERIAL_SAFE_PADDED=y ++CONFIG_USB_SERIAL_CYBERJACK=m ++CONFIG_USB_SERIAL_XIRCOM=m ++CONFIG_USB_SERIAL_OMNINET=m ++CONFIG_USB_EMI62=m ++CONFIG_USB_RIO500=m ++CONFIG_USB_LEGOTOWER=m ++CONFIG_USB_LCD=m ++CONFIG_USB_LED=m ++CONFIG_USB_TEST=m ++CONFIG_USB_ATM=m ++CONFIG_USB_SPEEDTOUCH=m ++CONFIG_INFINIBAND=m ++CONFIG_INFINIBAND_USER_MAD=m ++CONFIG_INFINIBAND_USER_ACCESS=m ++CONFIG_INFINIBAND_MTHCA=m ++CONFIG_INFINIBAND_IPOIB=m ++CONFIG_INFINIBAND_IPOIB_CM=y ++CONFIG_INFINIBAND_SRP=m ++CONFIG_DMADEVICES=y ++CONFIG_EXT4_FS=m ++CONFIG_EXT4_FS_POSIX_ACL=y ++CONFIG_EXT4_FS_SECURITY=y ++CONFIG_QUOTA=y ++CONFIG_QFMT_V2=y ++CONFIG_AUTOFS4_FS=m ++CONFIG_UDF_FS=m ++CONFIG_MSDOS_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_FAT_DEFAULT_IOCHARSET="ascii" ++CONFIG_PROC_KCORE=y ++CONFIG_TMPFS=y ++CONFIG_HFS_FS=m ++CONFIG_HFSPLUS_FS=m ++CONFIG_JFFS2_FS=y ++CONFIG_CRAMFS=m ++CONFIG_VXFS_FS=m ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3_ACL=y ++CONFIG_NFS_V4=y ++CONFIG_ROOT_NFS=y ++CONFIG_CIFS=m ++CONFIG_CIFS_XATTR=y ++CONFIG_CIFS_POSIX=y ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="utf8" ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_CODEPAGE_737=m ++CONFIG_NLS_CODEPAGE_775=m ++CONFIG_NLS_CODEPAGE_850=m ++CONFIG_NLS_CODEPAGE_852=m ++CONFIG_NLS_CODEPAGE_855=m ++CONFIG_NLS_CODEPAGE_857=m ++CONFIG_NLS_CODEPAGE_860=m ++CONFIG_NLS_CODEPAGE_861=m ++CONFIG_NLS_CODEPAGE_862=m ++CONFIG_NLS_CODEPAGE_863=m ++CONFIG_NLS_CODEPAGE_864=m ++CONFIG_NLS_CODEPAGE_865=m ++CONFIG_NLS_CODEPAGE_866=m ++CONFIG_NLS_CODEPAGE_869=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_CODEPAGE_950=m ++CONFIG_NLS_CODEPAGE_932=m ++CONFIG_NLS_CODEPAGE_949=m ++CONFIG_NLS_CODEPAGE_874=m ++CONFIG_NLS_ISO8859_8=m ++CONFIG_NLS_CODEPAGE_1250=m ++CONFIG_NLS_CODEPAGE_1251=m ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_ISO8859_2=m ++CONFIG_NLS_ISO8859_3=m ++CONFIG_NLS_ISO8859_4=m ++CONFIG_NLS_ISO8859_5=m ++CONFIG_NLS_ISO8859_6=m ++CONFIG_NLS_ISO8859_7=m ++CONFIG_NLS_ISO8859_9=m ++CONFIG_NLS_ISO8859_13=m ++CONFIG_NLS_ISO8859_14=m ++CONFIG_NLS_ISO8859_15=m ++CONFIG_NLS_KOI8_R=m ++CONFIG_NLS_KOI8_U=m ++CONFIG_CRC_CCITT=m ++CONFIG_CRC_T10DIF=m ++CONFIG_DEBUG_INFO=y ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_STACK_USAGE=y ++CONFIG_DEBUG_HIGHMEM=y ++CONFIG_DEBUG_STACKOVERFLOW=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_SPINLOCK=y ++CONFIG_BOOTX_TEXT=y ++CONFIG_PPC_EARLY_DEBUG=y ++CONFIG_SECURITY=y ++CONFIG_SECURITY_NETWORK=y ++CONFIG_SECURITY_SELINUX=y ++CONFIG_SECURITY_SELINUX_BOOTPARAM=y ++CONFIG_SECURITY_SELINUX_DISABLE=y ++CONFIG_CRYPTO_HMAC=y ++CONFIG_CRYPTO_MICHAEL_MIC=m ++CONFIG_CRYPTO_SHA1=y ++CONFIG_CRYPTO_SHA512=m ++CONFIG_CRYPTO_WP512=m ++CONFIG_CRYPTO_BLOWFISH=m ++CONFIG_CRYPTO_CAST6=m ++CONFIG_CRYPTO_KHAZAD=m ++CONFIG_CRYPTO_SERPENT=m ++CONFIG_CRYPTO_TEA=m ++CONFIG_CRYPTO_TWOFISH=m +diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig +index 7c6baf6df139..9aea58fa9dba 100644 +--- a/arch/powerpc/configs/ppc6xx_defconfig ++++ b/arch/powerpc/configs/ppc6xx_defconfig +@@ -74,7 +74,7 @@ CONFIG_QE_GPIO=y + CONFIG_MCU_MPC8349EMITX=y + CONFIG_HIGHMEM=y + CONFIG_HZ_1000=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_BINFMT_MISC=y + CONFIG_HIBERNATION=y + CONFIG_PM_DEBUG=y +diff --git a/arch/score/configs/spct6600_defconfig b/arch/score/configs/spct6600_defconfig +new file mode 100644 +index 000000000000..46434ca1fa10 +--- /dev/null ++++ b/arch/score/configs/spct6600_defconfig +@@ -0,0 +1,84 @@ ++CONFIG_HZ_100=y ++CONFIG_PREEMPT=y ++CONFIG_EXPERIMENTAL=y ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_SYSVIPC=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_LOG_BUF_SHIFT=12 ++CONFIG_SYSFS_DEPRECATED_V2=y ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_KALLSYMS is not set ++# CONFIG_HOTPLUG is not set ++CONFIG_SLAB=y ++CONFIG_MODULES=y ++CONFIG_MODULE_FORCE_LOAD=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODULE_FORCE_UNLOAD=y ++# CONFIG_BLK_DEV_BSG is not set ++CONFIG_BINFMT_MISC=y ++CONFIG_NET=y ++CONFIG_UNIX=y ++CONFIG_NET_KEY=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_ARPD=y ++# CONFIG_INET_LRO is not set ++# CONFIG_IPV6 is not set ++# CONFIG_STANDALONE is not set ++# CONFIG_PREVENT_FIRMWARE_BUILD is not set ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_CRYPTOLOOP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_COUNT=1 ++# CONFIG_MISC_DEVICES is not set ++CONFIG_NETDEVICES=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++CONFIG_SERIAL_NONSTANDARD=y ++CONFIG_STALDRV=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_RAW_DRIVER=y ++CONFIG_MAX_RAW_DEVS=8192 ++# CONFIG_HWMON is not set ++# CONFIG_VGA_CONSOLE is not set ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++CONFIG_EXT2_FS_POSIX_ACL=y ++CONFIG_EXT3_FS=y ++# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set ++CONFIG_EXT3_FS_POSIX_ACL=y ++CONFIG_AUTOFS_FS=y ++CONFIG_AUTOFS4_FS=y ++CONFIG_PROC_KCORE=y ++# CONFIG_PROC_PAGE_MONITOR is not set ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++CONFIG_NFS_V3_ACL=y ++CONFIG_NFS_V4=y ++CONFIG_NFSD=y ++CONFIG_NFSD_V3_ACL=y ++CONFIG_NFSD_V4=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++CONFIG_SECURITY=y ++CONFIG_SECURITY_NETWORK=y ++CONFIG_CRYPTO_NULL=y ++CONFIG_CRYPTO_CRYPTD=y ++CONFIG_CRYPTO_SEQIV=y ++CONFIG_CRYPTO_MD4=y ++CONFIG_CRYPTO_MICHAEL_MIC=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++# CONFIG_CRYPTO_HW is not set ++CONFIG_CRC_CCITT=y ++CONFIG_CRC16=y ++CONFIG_LIBCRC32C=y +diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig +index 5a1097641247..eb5fbf554e7f 100644 +--- a/arch/sh/configs/se7712_defconfig ++++ b/arch/sh/configs/se7712_defconfig +@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y + CONFIG_SH_SOLUTION_ENGINE=y + CONFIG_SH_PCLK_FREQ=66666666 + CONFIG_HEARTBEAT=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_CMDLINE_OVERWRITE=y + CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1" + CONFIG_NET=y +diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig +index 9c0ef13bee10..cbaa65c8bf9e 100644 +--- a/arch/sh/configs/se7721_defconfig ++++ b/arch/sh/configs/se7721_defconfig +@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y + CONFIG_SH_7721_SOLUTION_ENGINE=y + CONFIG_SH_PCLK_FREQ=33333333 + CONFIG_HEARTBEAT=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_CMDLINE_OVERWRITE=y + CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda2" + CONFIG_NET=y +diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig +index ceb48e9b70f4..1a69eda6610c 100644 +--- a/arch/sh/configs/titan_defconfig ++++ b/arch/sh/configs/titan_defconfig +@@ -20,7 +20,7 @@ CONFIG_SH_TITAN=y + CONFIG_SH_PCLK_FREQ=30000000 + CONFIG_SH_DMA=y + CONFIG_SH_DMA_API=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_CMDLINE_OVERWRITE=y + CONFIG_CMDLINE="console=ttySC1,38400N81 root=/dev/nfs ip=:::::eth1:autoconf rw" + CONFIG_PCI=y +diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig +index ea547d596fcf..e1f11071da4c 100644 +--- a/arch/sparc/configs/sparc64_defconfig ++++ b/arch/sparc/configs/sparc64_defconfig +@@ -22,7 +22,7 @@ CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y + CONFIG_NUMA=y + CONFIG_DEFAULT_MMAP_MIN_ADDR=8192 +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_SUN_LDOMS=y + CONFIG_PCI=y + CONFIG_PCI_MSI=y +diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig +new file mode 100644 +index 000000000000..939c63ba7e6e +--- /dev/null ++++ b/arch/tile/configs/tilegx_defconfig +@@ -0,0 +1,411 @@ ++CONFIG_TILEGX=y ++CONFIG_SYSVIPC=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_FHANDLE=y ++CONFIG_AUDIT=y ++CONFIG_NO_HZ=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++CONFIG_TASKSTATS=y ++CONFIG_TASK_DELAY_ACCT=y ++CONFIG_TASK_XACCT=y ++CONFIG_TASK_IO_ACCOUNTING=y ++CONFIG_LOG_BUF_SHIFT=19 ++CONFIG_CGROUPS=y ++CONFIG_CGROUP_DEBUG=y ++CONFIG_CGROUP_DEVICE=y ++CONFIG_CPUSETS=y ++CONFIG_CGROUP_CPUACCT=y ++CONFIG_CGROUP_SCHED=y ++CONFIG_RT_GROUP_SCHED=y ++CONFIG_BLK_CGROUP=y ++CONFIG_NAMESPACES=y ++CONFIG_RELAY=y ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_RD_XZ=y ++CONFIG_SYSCTL_SYSCALL=y ++CONFIG_EMBEDDED=y ++# CONFIG_COMPAT_BRK is not set ++CONFIG_PROFILING=y ++CONFIG_KPROBES=y ++CONFIG_MODULES=y ++CONFIG_MODULE_FORCE_LOAD=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_BLK_DEV_INTEGRITY=y ++CONFIG_PARTITION_ADVANCED=y ++CONFIG_OSF_PARTITION=y ++CONFIG_AMIGA_PARTITION=y ++CONFIG_MAC_PARTITION=y ++CONFIG_BSD_DISKLABEL=y ++CONFIG_MINIX_SUBPARTITION=y ++CONFIG_SOLARIS_X86_PARTITION=y ++CONFIG_UNIXWARE_DISKLABEL=y ++CONFIG_SGI_PARTITION=y ++CONFIG_SUN_PARTITION=y ++CONFIG_KARMA_PARTITION=y ++CONFIG_CFQ_GROUP_IOSCHED=y ++CONFIG_NR_CPUS=100 ++CONFIG_HZ_100=y ++# CONFIG_COMPACTION is not set ++CONFIG_PREEMPT=y ++CONFIG_TILE_PCI_IO=y ++CONFIG_PCI_DEBUG=y ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_BINFMT_MISC=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_XFRM_USER=y ++CONFIG_XFRM_SUB_POLICY=y ++CONFIG_XFRM_STATISTICS=y ++CONFIG_NET_KEY=m ++CONFIG_NET_KEY_MIGRATE=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++CONFIG_IP_MULTIPLE_TABLES=y ++CONFIG_IP_ROUTE_MULTIPATH=y ++CONFIG_IP_ROUTE_VERBOSE=y ++CONFIG_NET_IPIP=m ++CONFIG_IP_MROUTE=y ++CONFIG_IP_PIMSM_V1=y ++CONFIG_IP_PIMSM_V2=y ++CONFIG_SYN_COOKIES=y ++CONFIG_INET_AH=m ++CONFIG_INET_ESP=m ++CONFIG_INET_IPCOMP=m ++CONFIG_INET_XFRM_MODE_TRANSPORT=m ++CONFIG_INET_XFRM_MODE_TUNNEL=m ++CONFIG_INET_XFRM_MODE_BEET=m ++CONFIG_INET_DIAG=m ++CONFIG_TCP_CONG_ADVANCED=y ++CONFIG_TCP_CONG_HSTCP=m ++CONFIG_TCP_CONG_HYBLA=m ++CONFIG_TCP_CONG_SCALABLE=m ++CONFIG_TCP_CONG_LP=m ++CONFIG_TCP_CONG_VENO=m ++CONFIG_TCP_CONG_YEAH=m ++CONFIG_TCP_CONG_ILLINOIS=m ++CONFIG_TCP_MD5SIG=y ++CONFIG_IPV6=y ++CONFIG_IPV6_ROUTER_PREF=y ++CONFIG_IPV6_ROUTE_INFO=y ++CONFIG_IPV6_OPTIMISTIC_DAD=y ++CONFIG_INET6_AH=m ++CONFIG_INET6_ESP=m ++CONFIG_INET6_IPCOMP=m ++CONFIG_IPV6_MIP6=m ++CONFIG_INET6_XFRM_MODE_TRANSPORT=m ++CONFIG_INET6_XFRM_MODE_TUNNEL=m ++CONFIG_INET6_XFRM_MODE_BEET=m ++CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m ++CONFIG_IPV6_SIT=m ++CONFIG_IPV6_TUNNEL=m ++CONFIG_IPV6_MULTIPLE_TABLES=y ++CONFIG_IPV6_MROUTE=y ++CONFIG_IPV6_PIMSM_V2=y ++CONFIG_NETLABEL=y ++CONFIG_RDS=m ++CONFIG_RDS_TCP=m ++CONFIG_BRIDGE=m ++CONFIG_VLAN_8021Q=m ++CONFIG_VLAN_8021Q_GVRP=y ++CONFIG_PHONET=m ++CONFIG_NET_SCHED=y ++CONFIG_NET_SCH_CBQ=m ++CONFIG_NET_SCH_HTB=m ++CONFIG_NET_SCH_HFSC=m ++CONFIG_NET_SCH_PRIO=m ++CONFIG_NET_SCH_MULTIQ=m ++CONFIG_NET_SCH_RED=m ++CONFIG_NET_SCH_SFQ=m ++CONFIG_NET_SCH_TEQL=m ++CONFIG_NET_SCH_TBF=m ++CONFIG_NET_SCH_GRED=m ++CONFIG_NET_SCH_DSMARK=m ++CONFIG_NET_SCH_NETEM=m ++CONFIG_NET_SCH_DRR=m ++CONFIG_NET_SCH_INGRESS=m ++CONFIG_NET_CLS_BASIC=m ++CONFIG_NET_CLS_TCINDEX=m ++CONFIG_NET_CLS_ROUTE4=m ++CONFIG_NET_CLS_FW=m ++CONFIG_NET_CLS_U32=m ++CONFIG_CLS_U32_PERF=y ++CONFIG_CLS_U32_MARK=y ++CONFIG_NET_CLS_RSVP=m ++CONFIG_NET_CLS_RSVP6=m ++CONFIG_NET_CLS_FLOW=m ++CONFIG_NET_CLS_CGROUP=y ++CONFIG_NET_EMATCH=y ++CONFIG_NET_EMATCH_CMP=m ++CONFIG_NET_EMATCH_NBYTE=m ++CONFIG_NET_EMATCH_U32=m ++CONFIG_NET_EMATCH_META=m ++CONFIG_NET_EMATCH_TEXT=m ++CONFIG_NET_CLS_ACT=y ++CONFIG_NET_ACT_POLICE=m ++CONFIG_NET_ACT_GACT=m ++CONFIG_GACT_PROB=y ++CONFIG_NET_ACT_MIRRED=m ++CONFIG_NET_ACT_NAT=m ++CONFIG_NET_ACT_PEDIT=m ++CONFIG_NET_ACT_SIMP=m ++CONFIG_NET_ACT_SKBEDIT=m ++CONFIG_NET_CLS_IND=y ++CONFIG_DCB=y ++CONFIG_DNS_RESOLVER=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_DEVTMPFS=y ++CONFIG_DEVTMPFS_MOUNT=y ++CONFIG_CONNECTOR=y ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_CRYPTOLOOP=m ++CONFIG_BLK_DEV_SX8=m ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_SIZE=16384 ++CONFIG_ATA_OVER_ETH=m ++CONFIG_RAID_ATTRS=m ++CONFIG_BLK_DEV_SD=y ++CONFIG_SCSI_CONSTANTS=y ++CONFIG_SCSI_LOGGING=y ++CONFIG_SCSI_SAS_ATA=y ++CONFIG_ISCSI_TCP=m ++CONFIG_SCSI_MVSAS=y ++# CONFIG_SCSI_MVSAS_DEBUG is not set ++CONFIG_SCSI_MVSAS_TASKLET=y ++CONFIG_ATA=y ++CONFIG_SATA_AHCI=y ++CONFIG_SATA_SIL24=y ++# CONFIG_ATA_SFF is not set ++CONFIG_MD=y ++CONFIG_BLK_DEV_MD=y ++CONFIG_MD_LINEAR=m ++CONFIG_MD_RAID0=m ++CONFIG_MD_RAID1=m ++CONFIG_MD_RAID10=m ++CONFIG_MD_RAID456=m ++CONFIG_MD_FAULTY=m ++CONFIG_BLK_DEV_DM=m ++CONFIG_DM_DEBUG=y ++CONFIG_DM_CRYPT=m ++CONFIG_DM_SNAPSHOT=m ++CONFIG_DM_MIRROR=m ++CONFIG_DM_LOG_USERSPACE=m ++CONFIG_DM_ZERO=m ++CONFIG_DM_MULTIPATH=m ++CONFIG_DM_MULTIPATH_QL=m ++CONFIG_DM_MULTIPATH_ST=m ++CONFIG_DM_DELAY=m ++CONFIG_DM_UEVENT=y ++CONFIG_TARGET_CORE=m ++CONFIG_TCM_IBLOCK=m ++CONFIG_TCM_FILEIO=m ++CONFIG_TCM_PSCSI=m ++CONFIG_LOOPBACK_TARGET=m ++CONFIG_ISCSI_TARGET=m ++CONFIG_FUSION=y ++CONFIG_FUSION_SAS=y ++CONFIG_NETDEVICES=y ++CONFIG_BONDING=m ++CONFIG_DUMMY=m ++CONFIG_IFB=m ++CONFIG_MACVLAN=m ++CONFIG_MACVTAP=m ++CONFIG_NETCONSOLE=m ++CONFIG_NETCONSOLE_DYNAMIC=y ++CONFIG_TUN=y ++CONFIG_VETH=m ++CONFIG_NET_DSA_MV88E6060=y ++CONFIG_NET_DSA_MV88E6XXX=y ++CONFIG_SKY2=y ++CONFIG_PTP_1588_CLOCK_TILEGX=y ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_SERIAL_TILEGX=y ++CONFIG_HW_RANDOM=y ++CONFIG_HW_RANDOM_TIMERIOMEM=m ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_WATCHDOG_NOWAYOUT=y ++# CONFIG_VGA_ARB is not set ++CONFIG_DRM=m ++CONFIG_DRM_TDFX=m ++CONFIG_DRM_R128=m ++CONFIG_DRM_MGA=m ++CONFIG_DRM_VIA=m ++CONFIG_DRM_SAVAGE=m ++CONFIG_USB=y ++CONFIG_USB_EHCI_HCD=y ++CONFIG_USB_OHCI_HCD=y ++CONFIG_USB_STORAGE=y ++CONFIG_EDAC=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_TILE=y ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++CONFIG_EXT2_FS_POSIX_ACL=y ++CONFIG_EXT2_FS_SECURITY=y ++CONFIG_EXT2_FS_XIP=y ++CONFIG_EXT3_FS=y ++CONFIG_EXT3_FS_POSIX_ACL=y ++CONFIG_EXT3_FS_SECURITY=y ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_FS_POSIX_ACL=y ++CONFIG_EXT4_FS_SECURITY=y ++CONFIG_XFS_FS=y ++CONFIG_XFS_QUOTA=y ++CONFIG_XFS_POSIX_ACL=y ++CONFIG_GFS2_FS=m ++CONFIG_GFS2_FS_LOCKING_DLM=y ++CONFIG_BTRFS_FS=m ++CONFIG_BTRFS_FS_POSIX_ACL=y ++CONFIG_QUOTA=y ++CONFIG_QUOTA_NETLINK_INTERFACE=y ++# CONFIG_PRINT_QUOTA_WARNING is not set ++CONFIG_QFMT_V2=y ++CONFIG_AUTOFS4_FS=m ++CONFIG_FUSE_FS=y ++CONFIG_CUSE=m ++CONFIG_FSCACHE=m ++CONFIG_FSCACHE_STATS=y ++CONFIG_CACHEFILES=m ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_ZISOFS=y ++CONFIG_UDF_FS=m ++CONFIG_MSDOS_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_FAT_DEFAULT_IOCHARSET="ascii" ++CONFIG_PROC_KCORE=y ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_HUGETLBFS=y ++CONFIG_ECRYPT_FS=m ++CONFIG_CRAMFS=m ++CONFIG_SQUASHFS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3_ACL=y ++CONFIG_NFS_V4=m ++CONFIG_NFS_V4_1=y ++CONFIG_NFS_FSCACHE=y ++CONFIG_NFSD=m ++CONFIG_NFSD_V3_ACL=y ++CONFIG_NFSD_V4=y ++CONFIG_CIFS=m ++CONFIG_CIFS_STATS=y ++CONFIG_CIFS_WEAK_PW_HASH=y ++CONFIG_CIFS_UPCALL=y ++CONFIG_CIFS_XATTR=y ++CONFIG_CIFS_POSIX=y ++CONFIG_CIFS_DFS_UPCALL=y ++CONFIG_CIFS_FSCACHE=y ++CONFIG_NLS_DEFAULT="utf8" ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_CODEPAGE_737=m ++CONFIG_NLS_CODEPAGE_775=m ++CONFIG_NLS_CODEPAGE_850=m ++CONFIG_NLS_CODEPAGE_852=m ++CONFIG_NLS_CODEPAGE_855=m ++CONFIG_NLS_CODEPAGE_857=m ++CONFIG_NLS_CODEPAGE_860=m ++CONFIG_NLS_CODEPAGE_861=m ++CONFIG_NLS_CODEPAGE_862=m ++CONFIG_NLS_CODEPAGE_863=m ++CONFIG_NLS_CODEPAGE_864=m ++CONFIG_NLS_CODEPAGE_865=m ++CONFIG_NLS_CODEPAGE_866=m ++CONFIG_NLS_CODEPAGE_869=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_CODEPAGE_950=m ++CONFIG_NLS_CODEPAGE_932=m ++CONFIG_NLS_CODEPAGE_949=m ++CONFIG_NLS_CODEPAGE_874=m ++CONFIG_NLS_ISO8859_8=m ++CONFIG_NLS_CODEPAGE_1250=m ++CONFIG_NLS_CODEPAGE_1251=m ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_ISO8859_2=m ++CONFIG_NLS_ISO8859_3=m ++CONFIG_NLS_ISO8859_4=m ++CONFIG_NLS_ISO8859_5=m ++CONFIG_NLS_ISO8859_6=m ++CONFIG_NLS_ISO8859_7=m ++CONFIG_NLS_ISO8859_9=m ++CONFIG_NLS_ISO8859_13=m ++CONFIG_NLS_ISO8859_14=m ++CONFIG_NLS_ISO8859_15=m ++CONFIG_NLS_KOI8_R=m ++CONFIG_NLS_KOI8_U=m ++CONFIG_NLS_UTF8=m ++CONFIG_DLM=m ++CONFIG_DLM_DEBUG=y ++CONFIG_DYNAMIC_DEBUG=y ++CONFIG_DEBUG_INFO=y ++CONFIG_DEBUG_INFO_REDUCED=y ++# CONFIG_ENABLE_WARN_DEPRECATED is not set ++CONFIG_STRIP_ASM_SYMS=y ++CONFIG_DEBUG_FS=y ++CONFIG_HEADERS_CHECK=y ++# CONFIG_FRAME_POINTER is not set ++CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y ++CONFIG_DEBUG_VM=y ++CONFIG_DEBUG_MEMORY_INIT=y ++CONFIG_DEBUG_STACKOVERFLOW=y ++CONFIG_LOCKUP_DETECTOR=y ++CONFIG_SCHEDSTATS=y ++CONFIG_TIMER_STATS=y ++CONFIG_DEBUG_LIST=y ++CONFIG_DEBUG_CREDENTIALS=y ++CONFIG_RCU_CPU_STALL_TIMEOUT=60 ++CONFIG_ASYNC_RAID6_TEST=m ++CONFIG_KGDB=y ++CONFIG_SECURITY=y ++CONFIG_SECURITYFS=y ++CONFIG_SECURITY_NETWORK=y ++CONFIG_SECURITY_NETWORK_XFRM=y ++CONFIG_SECURITY_SELINUX=y ++CONFIG_SECURITY_SELINUX_BOOTPARAM=y ++CONFIG_SECURITY_SELINUX_DISABLE=y ++CONFIG_CRYPTO_PCRYPT=m ++CONFIG_CRYPTO_CRYPTD=m ++CONFIG_CRYPTO_TEST=m ++CONFIG_CRYPTO_CCM=m ++CONFIG_CRYPTO_GCM=m ++CONFIG_CRYPTO_CTS=m ++CONFIG_CRYPTO_LRW=m ++CONFIG_CRYPTO_PCBC=m ++CONFIG_CRYPTO_XTS=m ++CONFIG_CRYPTO_HMAC=y ++CONFIG_CRYPTO_XCBC=m ++CONFIG_CRYPTO_VMAC=m ++CONFIG_CRYPTO_MICHAEL_MIC=m ++CONFIG_CRYPTO_RMD128=m ++CONFIG_CRYPTO_RMD160=m ++CONFIG_CRYPTO_RMD256=m ++CONFIG_CRYPTO_RMD320=m ++CONFIG_CRYPTO_SHA1=y ++CONFIG_CRYPTO_SHA512=m ++CONFIG_CRYPTO_TGR192=m ++CONFIG_CRYPTO_WP512=m ++CONFIG_CRYPTO_ANUBIS=m ++CONFIG_CRYPTO_BLOWFISH=m ++CONFIG_CRYPTO_CAMELLIA=m ++CONFIG_CRYPTO_CAST5=m ++CONFIG_CRYPTO_CAST6=m ++CONFIG_CRYPTO_FCRYPT=m ++CONFIG_CRYPTO_KHAZAD=m ++CONFIG_CRYPTO_SEED=m ++CONFIG_CRYPTO_SERPENT=m ++CONFIG_CRYPTO_TEA=m ++CONFIG_CRYPTO_TWOFISH=m ++CONFIG_CRYPTO_LZO=m +diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig +new file mode 100644 +index 000000000000..e8c4003cbd81 +--- /dev/null ++++ b/arch/tile/configs/tilepro_defconfig +@@ -0,0 +1,524 @@ ++CONFIG_SYSVIPC=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_AUDIT=y ++CONFIG_NO_HZ=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++CONFIG_TASKSTATS=y ++CONFIG_TASK_DELAY_ACCT=y ++CONFIG_TASK_XACCT=y ++CONFIG_TASK_IO_ACCOUNTING=y ++CONFIG_LOG_BUF_SHIFT=19 ++CONFIG_CGROUPS=y ++CONFIG_CGROUP_DEBUG=y ++CONFIG_CGROUP_DEVICE=y ++CONFIG_CPUSETS=y ++CONFIG_CGROUP_CPUACCT=y ++CONFIG_CGROUP_SCHED=y ++CONFIG_RT_GROUP_SCHED=y ++CONFIG_BLK_CGROUP=y ++CONFIG_NAMESPACES=y ++CONFIG_RELAY=y ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_RD_XZ=y ++CONFIG_SYSCTL_SYSCALL=y ++CONFIG_EMBEDDED=y ++# CONFIG_COMPAT_BRK is not set ++CONFIG_PROFILING=y ++CONFIG_MODULES=y ++CONFIG_MODULE_FORCE_LOAD=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_BLK_DEV_INTEGRITY=y ++CONFIG_PARTITION_ADVANCED=y ++CONFIG_OSF_PARTITION=y ++CONFIG_AMIGA_PARTITION=y ++CONFIG_MAC_PARTITION=y ++CONFIG_BSD_DISKLABEL=y ++CONFIG_MINIX_SUBPARTITION=y ++CONFIG_SOLARIS_X86_PARTITION=y ++CONFIG_UNIXWARE_DISKLABEL=y ++CONFIG_SGI_PARTITION=y ++CONFIG_SUN_PARTITION=y ++CONFIG_KARMA_PARTITION=y ++CONFIG_CFQ_GROUP_IOSCHED=y ++CONFIG_HZ_100=y ++# CONFIG_COMPACTION is not set ++CONFIG_PREEMPT=y ++CONFIG_PCI_DEBUG=y ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_BINFMT_MISC=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_XFRM_USER=y ++CONFIG_XFRM_SUB_POLICY=y ++CONFIG_XFRM_STATISTICS=y ++CONFIG_NET_KEY=m ++CONFIG_NET_KEY_MIGRATE=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++CONFIG_IP_MULTIPLE_TABLES=y ++CONFIG_IP_ROUTE_MULTIPATH=y ++CONFIG_IP_ROUTE_VERBOSE=y ++CONFIG_NET_IPIP=m ++CONFIG_IP_MROUTE=y ++CONFIG_IP_PIMSM_V1=y ++CONFIG_IP_PIMSM_V2=y ++CONFIG_SYN_COOKIES=y ++CONFIG_INET_AH=m ++CONFIG_INET_ESP=m ++CONFIG_INET_IPCOMP=m ++CONFIG_INET_XFRM_MODE_TRANSPORT=m ++CONFIG_INET_XFRM_MODE_TUNNEL=m ++CONFIG_INET_XFRM_MODE_BEET=m ++CONFIG_INET_DIAG=m ++CONFIG_TCP_CONG_ADVANCED=y ++CONFIG_TCP_CONG_HSTCP=m ++CONFIG_TCP_CONG_HYBLA=m ++CONFIG_TCP_CONG_SCALABLE=m ++CONFIG_TCP_CONG_LP=m ++CONFIG_TCP_CONG_VENO=m ++CONFIG_TCP_CONG_YEAH=m ++CONFIG_TCP_CONG_ILLINOIS=m ++CONFIG_TCP_MD5SIG=y ++CONFIG_IPV6=y ++CONFIG_IPV6_ROUTER_PREF=y ++CONFIG_IPV6_ROUTE_INFO=y ++CONFIG_IPV6_OPTIMISTIC_DAD=y ++CONFIG_INET6_AH=m ++CONFIG_INET6_ESP=m ++CONFIG_INET6_IPCOMP=m ++CONFIG_IPV6_MIP6=m ++CONFIG_INET6_XFRM_MODE_TRANSPORT=m ++CONFIG_INET6_XFRM_MODE_TUNNEL=m ++CONFIG_INET6_XFRM_MODE_BEET=m ++CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m ++CONFIG_IPV6_SIT=m ++CONFIG_IPV6_TUNNEL=m ++CONFIG_IPV6_MULTIPLE_TABLES=y ++CONFIG_IPV6_MROUTE=y ++CONFIG_IPV6_PIMSM_V2=y ++CONFIG_NETLABEL=y ++CONFIG_NETFILTER=y ++CONFIG_NF_CONNTRACK=m ++CONFIG_NF_CONNTRACK_SECMARK=y ++CONFIG_NF_CONNTRACK_ZONES=y ++CONFIG_NF_CONNTRACK_EVENTS=y ++CONFIG_NF_CT_PROTO_DCCP=m ++CONFIG_NF_CT_PROTO_UDPLITE=m ++CONFIG_NF_CONNTRACK_AMANDA=m ++CONFIG_NF_CONNTRACK_FTP=m ++CONFIG_NF_CONNTRACK_H323=m ++CONFIG_NF_CONNTRACK_IRC=m ++CONFIG_NF_CONNTRACK_NETBIOS_NS=m ++CONFIG_NF_CONNTRACK_PPTP=m ++CONFIG_NF_CONNTRACK_SANE=m ++CONFIG_NF_CONNTRACK_SIP=m ++CONFIG_NF_CONNTRACK_TFTP=m ++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m ++CONFIG_NETFILTER_XT_TARGET_CONNMARK=m ++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m ++CONFIG_NETFILTER_XT_TARGET_DSCP=m ++CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m ++CONFIG_NETFILTER_XT_TARGET_MARK=m ++CONFIG_NETFILTER_XT_TARGET_NFLOG=m ++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m ++CONFIG_NETFILTER_XT_TARGET_NOTRACK=m ++CONFIG_NETFILTER_XT_TARGET_TEE=m ++CONFIG_NETFILTER_XT_TARGET_TPROXY=m ++CONFIG_NETFILTER_XT_TARGET_TRACE=m ++CONFIG_NETFILTER_XT_TARGET_SECMARK=m ++CONFIG_NETFILTER_XT_TARGET_TCPMSS=m ++CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m ++CONFIG_NETFILTER_XT_MATCH_CLUSTER=m ++CONFIG_NETFILTER_XT_MATCH_COMMENT=m ++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m ++CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m ++CONFIG_NETFILTER_XT_MATCH_CONNMARK=m ++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m ++CONFIG_NETFILTER_XT_MATCH_DCCP=m ++CONFIG_NETFILTER_XT_MATCH_DSCP=m ++CONFIG_NETFILTER_XT_MATCH_ESP=m ++CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m ++CONFIG_NETFILTER_XT_MATCH_HELPER=m ++CONFIG_NETFILTER_XT_MATCH_IPRANGE=m ++CONFIG_NETFILTER_XT_MATCH_IPVS=m ++CONFIG_NETFILTER_XT_MATCH_LENGTH=m ++CONFIG_NETFILTER_XT_MATCH_LIMIT=m ++CONFIG_NETFILTER_XT_MATCH_MAC=m ++CONFIG_NETFILTER_XT_MATCH_MARK=m ++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m ++CONFIG_NETFILTER_XT_MATCH_OSF=m ++CONFIG_NETFILTER_XT_MATCH_OWNER=m ++CONFIG_NETFILTER_XT_MATCH_POLICY=m ++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m ++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m ++CONFIG_NETFILTER_XT_MATCH_QUOTA=m ++CONFIG_NETFILTER_XT_MATCH_RATEEST=m ++CONFIG_NETFILTER_XT_MATCH_REALM=m ++CONFIG_NETFILTER_XT_MATCH_RECENT=m ++CONFIG_NETFILTER_XT_MATCH_SOCKET=m ++CONFIG_NETFILTER_XT_MATCH_STATE=m ++CONFIG_NETFILTER_XT_MATCH_STATISTIC=m ++CONFIG_NETFILTER_XT_MATCH_STRING=m ++CONFIG_NETFILTER_XT_MATCH_TCPMSS=m ++CONFIG_NETFILTER_XT_MATCH_TIME=m ++CONFIG_NETFILTER_XT_MATCH_U32=m ++CONFIG_IP_VS=m ++CONFIG_IP_VS_IPV6=y ++CONFIG_IP_VS_PROTO_TCP=y ++CONFIG_IP_VS_PROTO_UDP=y ++CONFIG_IP_VS_PROTO_ESP=y ++CONFIG_IP_VS_PROTO_AH=y ++CONFIG_IP_VS_PROTO_SCTP=y ++CONFIG_IP_VS_RR=m ++CONFIG_IP_VS_WRR=m ++CONFIG_IP_VS_LC=m ++CONFIG_IP_VS_WLC=m ++CONFIG_IP_VS_LBLC=m ++CONFIG_IP_VS_LBLCR=m ++CONFIG_IP_VS_SED=m ++CONFIG_IP_VS_NQ=m ++CONFIG_NF_CONNTRACK_IPV4=m ++# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set ++CONFIG_IP_NF_IPTABLES=y ++CONFIG_IP_NF_MATCH_AH=m ++CONFIG_IP_NF_MATCH_ECN=m ++CONFIG_IP_NF_MATCH_TTL=m ++CONFIG_IP_NF_FILTER=y ++CONFIG_IP_NF_TARGET_REJECT=y ++CONFIG_IP_NF_MANGLE=m ++CONFIG_IP_NF_TARGET_ECN=m ++CONFIG_IP_NF_TARGET_TTL=m ++CONFIG_IP_NF_RAW=m ++CONFIG_IP_NF_SECURITY=m ++CONFIG_IP_NF_ARPTABLES=m ++CONFIG_IP_NF_ARPFILTER=m ++CONFIG_IP_NF_ARP_MANGLE=m ++CONFIG_NF_CONNTRACK_IPV6=m ++CONFIG_IP6_NF_MATCH_AH=m ++CONFIG_IP6_NF_MATCH_EUI64=m ++CONFIG_IP6_NF_MATCH_FRAG=m ++CONFIG_IP6_NF_MATCH_OPTS=m ++CONFIG_IP6_NF_MATCH_HL=m ++CONFIG_IP6_NF_MATCH_IPV6HEADER=m ++CONFIG_IP6_NF_MATCH_MH=m ++CONFIG_IP6_NF_MATCH_RT=m ++CONFIG_IP6_NF_TARGET_HL=m ++CONFIG_IP6_NF_FILTER=m ++CONFIG_IP6_NF_TARGET_REJECT=m ++CONFIG_IP6_NF_MANGLE=m ++CONFIG_IP6_NF_RAW=m ++CONFIG_IP6_NF_SECURITY=m ++CONFIG_BRIDGE_NF_EBTABLES=m ++CONFIG_BRIDGE_EBT_BROUTE=m ++CONFIG_BRIDGE_EBT_T_FILTER=m ++CONFIG_BRIDGE_EBT_T_NAT=m ++CONFIG_BRIDGE_EBT_802_3=m ++CONFIG_BRIDGE_EBT_AMONG=m ++CONFIG_BRIDGE_EBT_ARP=m ++CONFIG_BRIDGE_EBT_IP=m ++CONFIG_BRIDGE_EBT_IP6=m ++CONFIG_BRIDGE_EBT_LIMIT=m ++CONFIG_BRIDGE_EBT_MARK=m ++CONFIG_BRIDGE_EBT_PKTTYPE=m ++CONFIG_BRIDGE_EBT_STP=m ++CONFIG_BRIDGE_EBT_VLAN=m ++CONFIG_BRIDGE_EBT_ARPREPLY=m ++CONFIG_BRIDGE_EBT_DNAT=m ++CONFIG_BRIDGE_EBT_MARK_T=m ++CONFIG_BRIDGE_EBT_REDIRECT=m ++CONFIG_BRIDGE_EBT_SNAT=m ++CONFIG_BRIDGE_EBT_LOG=m ++CONFIG_BRIDGE_EBT_ULOG=m ++CONFIG_BRIDGE_EBT_NFLOG=m ++CONFIG_RDS=m ++CONFIG_RDS_TCP=m ++CONFIG_BRIDGE=m ++CONFIG_VLAN_8021Q=m ++CONFIG_VLAN_8021Q_GVRP=y ++CONFIG_PHONET=m ++CONFIG_NET_SCHED=y ++CONFIG_NET_SCH_CBQ=m ++CONFIG_NET_SCH_HTB=m ++CONFIG_NET_SCH_HFSC=m ++CONFIG_NET_SCH_PRIO=m ++CONFIG_NET_SCH_MULTIQ=m ++CONFIG_NET_SCH_RED=m ++CONFIG_NET_SCH_SFQ=m ++CONFIG_NET_SCH_TEQL=m ++CONFIG_NET_SCH_TBF=m ++CONFIG_NET_SCH_GRED=m ++CONFIG_NET_SCH_DSMARK=m ++CONFIG_NET_SCH_NETEM=m ++CONFIG_NET_SCH_DRR=m ++CONFIG_NET_SCH_INGRESS=m ++CONFIG_NET_CLS_BASIC=m ++CONFIG_NET_CLS_TCINDEX=m ++CONFIG_NET_CLS_ROUTE4=m ++CONFIG_NET_CLS_FW=m ++CONFIG_NET_CLS_U32=m ++CONFIG_CLS_U32_PERF=y ++CONFIG_CLS_U32_MARK=y ++CONFIG_NET_CLS_RSVP=m ++CONFIG_NET_CLS_RSVP6=m ++CONFIG_NET_CLS_FLOW=m ++CONFIG_NET_CLS_CGROUP=y ++CONFIG_NET_EMATCH=y ++CONFIG_NET_EMATCH_CMP=m ++CONFIG_NET_EMATCH_NBYTE=m ++CONFIG_NET_EMATCH_U32=m ++CONFIG_NET_EMATCH_META=m ++CONFIG_NET_EMATCH_TEXT=m ++CONFIG_NET_CLS_ACT=y ++CONFIG_NET_ACT_POLICE=m ++CONFIG_NET_ACT_GACT=m ++CONFIG_GACT_PROB=y ++CONFIG_NET_ACT_MIRRED=m ++CONFIG_NET_ACT_IPT=m ++CONFIG_NET_ACT_NAT=m ++CONFIG_NET_ACT_PEDIT=m ++CONFIG_NET_ACT_SIMP=m ++CONFIG_NET_ACT_SKBEDIT=m ++CONFIG_NET_CLS_IND=y ++CONFIG_DCB=y ++CONFIG_DNS_RESOLVER=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_DEVTMPFS=y ++CONFIG_DEVTMPFS_MOUNT=y ++CONFIG_CONNECTOR=y ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_CRYPTOLOOP=m ++CONFIG_BLK_DEV_SX8=m ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_SIZE=16384 ++CONFIG_ATA_OVER_ETH=m ++CONFIG_RAID_ATTRS=m ++CONFIG_BLK_DEV_SD=y ++CONFIG_SCSI_CONSTANTS=y ++CONFIG_SCSI_LOGGING=y ++CONFIG_ATA=y ++CONFIG_SATA_SIL24=y ++# CONFIG_ATA_SFF is not set ++CONFIG_MD=y ++CONFIG_BLK_DEV_MD=y ++CONFIG_MD_LINEAR=m ++CONFIG_MD_RAID0=m ++CONFIG_MD_RAID1=m ++CONFIG_MD_RAID10=m ++CONFIG_MD_RAID456=m ++CONFIG_MD_FAULTY=m ++CONFIG_BLK_DEV_DM=m ++CONFIG_DM_DEBUG=y ++CONFIG_DM_CRYPT=m ++CONFIG_DM_SNAPSHOT=m ++CONFIG_DM_MIRROR=m ++CONFIG_DM_LOG_USERSPACE=m ++CONFIG_DM_ZERO=m ++CONFIG_DM_MULTIPATH=m ++CONFIG_DM_MULTIPATH_QL=m ++CONFIG_DM_MULTIPATH_ST=m ++CONFIG_DM_DELAY=m ++CONFIG_DM_UEVENT=y ++CONFIG_FUSION=y ++CONFIG_FUSION_SAS=y ++CONFIG_NETDEVICES=y ++CONFIG_BONDING=m ++CONFIG_DUMMY=m ++CONFIG_IFB=m ++CONFIG_MACVLAN=m ++CONFIG_MACVTAP=m ++CONFIG_NETCONSOLE=m ++CONFIG_NETCONSOLE_DYNAMIC=y ++CONFIG_TUN=y ++CONFIG_VETH=m ++CONFIG_NET_DSA_MV88E6060=y ++CONFIG_NET_DSA_MV88E6XXX=y ++# CONFIG_NET_VENDOR_3COM is not set ++CONFIG_E1000E=y ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_HW_RANDOM=y ++CONFIG_HW_RANDOM_TIMERIOMEM=m ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_WATCHDOG_NOWAYOUT=y ++# CONFIG_VGA_ARB is not set ++# CONFIG_USB_SUPPORT is not set ++CONFIG_EDAC=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_TILE=y ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++CONFIG_EXT2_FS_POSIX_ACL=y ++CONFIG_EXT2_FS_SECURITY=y ++CONFIG_EXT2_FS_XIP=y ++CONFIG_EXT3_FS=y ++CONFIG_EXT3_FS_POSIX_ACL=y ++CONFIG_EXT3_FS_SECURITY=y ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_FS_POSIX_ACL=y ++CONFIG_EXT4_FS_SECURITY=y ++CONFIG_XFS_FS=y ++CONFIG_XFS_QUOTA=y ++CONFIG_XFS_POSIX_ACL=y ++CONFIG_GFS2_FS=m ++CONFIG_GFS2_FS_LOCKING_DLM=y ++CONFIG_BTRFS_FS=m ++CONFIG_BTRFS_FS_POSIX_ACL=y ++CONFIG_QUOTA=y ++CONFIG_QUOTA_NETLINK_INTERFACE=y ++# CONFIG_PRINT_QUOTA_WARNING is not set ++CONFIG_QFMT_V2=y ++CONFIG_AUTOFS4_FS=m ++CONFIG_FUSE_FS=y ++CONFIG_CUSE=m ++CONFIG_FSCACHE=m ++CONFIG_FSCACHE_STATS=y ++CONFIG_CACHEFILES=m ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_ZISOFS=y ++CONFIG_UDF_FS=m ++CONFIG_MSDOS_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_FAT_DEFAULT_IOCHARSET="ascii" ++CONFIG_PROC_KCORE=y ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_HUGETLBFS=y ++CONFIG_CONFIGFS_FS=m ++CONFIG_ECRYPT_FS=m ++CONFIG_CRAMFS=m ++CONFIG_SQUASHFS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3_ACL=y ++CONFIG_NFS_V4=m ++CONFIG_NFS_V4_1=y ++CONFIG_NFS_FSCACHE=y ++CONFIG_NFSD=m ++CONFIG_NFSD_V3_ACL=y ++CONFIG_NFSD_V4=y ++CONFIG_CIFS=m ++CONFIG_CIFS_STATS=y ++CONFIG_CIFS_WEAK_PW_HASH=y ++CONFIG_CIFS_UPCALL=y ++CONFIG_CIFS_XATTR=y ++CONFIG_CIFS_POSIX=y ++CONFIG_CIFS_DFS_UPCALL=y ++CONFIG_CIFS_FSCACHE=y ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="utf8" ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_CODEPAGE_737=m ++CONFIG_NLS_CODEPAGE_775=m ++CONFIG_NLS_CODEPAGE_850=m ++CONFIG_NLS_CODEPAGE_852=m ++CONFIG_NLS_CODEPAGE_855=m ++CONFIG_NLS_CODEPAGE_857=m ++CONFIG_NLS_CODEPAGE_860=m ++CONFIG_NLS_CODEPAGE_861=m ++CONFIG_NLS_CODEPAGE_862=m ++CONFIG_NLS_CODEPAGE_863=m ++CONFIG_NLS_CODEPAGE_864=m ++CONFIG_NLS_CODEPAGE_865=m ++CONFIG_NLS_CODEPAGE_866=m ++CONFIG_NLS_CODEPAGE_869=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_CODEPAGE_950=m ++CONFIG_NLS_CODEPAGE_932=m ++CONFIG_NLS_CODEPAGE_949=m ++CONFIG_NLS_CODEPAGE_874=m ++CONFIG_NLS_ISO8859_8=m ++CONFIG_NLS_CODEPAGE_1250=m ++CONFIG_NLS_CODEPAGE_1251=m ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_ISO8859_2=m ++CONFIG_NLS_ISO8859_3=m ++CONFIG_NLS_ISO8859_4=m ++CONFIG_NLS_ISO8859_5=m ++CONFIG_NLS_ISO8859_6=m ++CONFIG_NLS_ISO8859_7=m ++CONFIG_NLS_ISO8859_9=m ++CONFIG_NLS_ISO8859_13=m ++CONFIG_NLS_ISO8859_14=m ++CONFIG_NLS_ISO8859_15=m ++CONFIG_NLS_KOI8_R=m ++CONFIG_NLS_KOI8_U=m ++CONFIG_NLS_UTF8=m ++CONFIG_DLM=m ++CONFIG_DLM_DEBUG=y ++CONFIG_DYNAMIC_DEBUG=y ++CONFIG_DEBUG_INFO=y ++CONFIG_DEBUG_INFO_REDUCED=y ++# CONFIG_ENABLE_WARN_DEPRECATED is not set ++CONFIG_FRAME_WARN=2048 ++CONFIG_STRIP_ASM_SYMS=y ++CONFIG_DEBUG_FS=y ++CONFIG_HEADERS_CHECK=y ++# CONFIG_FRAME_POINTER is not set ++CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_DEBUG_VM=y ++CONFIG_DEBUG_MEMORY_INIT=y ++CONFIG_DEBUG_STACKOVERFLOW=y ++CONFIG_LOCKUP_DETECTOR=y ++CONFIG_SCHEDSTATS=y ++CONFIG_TIMER_STATS=y ++CONFIG_DEBUG_LIST=y ++CONFIG_DEBUG_CREDENTIALS=y ++CONFIG_RCU_CPU_STALL_TIMEOUT=60 ++CONFIG_ASYNC_RAID6_TEST=m ++CONFIG_SECURITY=y ++CONFIG_SECURITYFS=y ++CONFIG_SECURITY_NETWORK=y ++CONFIG_SECURITY_NETWORK_XFRM=y ++CONFIG_SECURITY_SELINUX=y ++CONFIG_SECURITY_SELINUX_BOOTPARAM=y ++CONFIG_SECURITY_SELINUX_DISABLE=y ++CONFIG_CRYPTO_PCRYPT=m ++CONFIG_CRYPTO_CRYPTD=m ++CONFIG_CRYPTO_TEST=m ++CONFIG_CRYPTO_CCM=m ++CONFIG_CRYPTO_GCM=m ++CONFIG_CRYPTO_CTS=m ++CONFIG_CRYPTO_LRW=m ++CONFIG_CRYPTO_PCBC=m ++CONFIG_CRYPTO_XTS=m ++CONFIG_CRYPTO_HMAC=y ++CONFIG_CRYPTO_XCBC=m ++CONFIG_CRYPTO_VMAC=m ++CONFIG_CRYPTO_MICHAEL_MIC=m ++CONFIG_CRYPTO_RMD128=m ++CONFIG_CRYPTO_RMD160=m ++CONFIG_CRYPTO_RMD256=m ++CONFIG_CRYPTO_RMD320=m ++CONFIG_CRYPTO_SHA1=y ++CONFIG_CRYPTO_SHA512=m ++CONFIG_CRYPTO_TGR192=m ++CONFIG_CRYPTO_WP512=m ++CONFIG_CRYPTO_ANUBIS=m ++CONFIG_CRYPTO_BLOWFISH=m ++CONFIG_CRYPTO_CAMELLIA=m ++CONFIG_CRYPTO_CAST5=m ++CONFIG_CRYPTO_CAST6=m ++CONFIG_CRYPTO_FCRYPT=m ++CONFIG_CRYPTO_KHAZAD=m ++CONFIG_CRYPTO_SEED=m ++CONFIG_CRYPTO_SERPENT=m ++CONFIG_CRYPTO_TEA=m ++CONFIG_CRYPTO_TWOFISH=m ++CONFIG_CRYPTO_LZO=m ++CONFIG_CRC_CCITT=m ++CONFIG_CRC7=m +diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig +index 9f908112bbb9..25452ca053b8 100644 +--- a/arch/x86/configs/i386_defconfig ++++ b/arch/x86/configs/i386_defconfig +@@ -41,7 +41,7 @@ CONFIG_SMP=y + CONFIG_X86_GENERIC=y + CONFIG_HPET_TIMER=y + CONFIG_SCHED_SMT=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y + CONFIG_X86_MCE=y + CONFIG_X86_REBOOTFIXUPS=y +diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig +index 1d3badfda09e..235b63cb08fa 100644 +--- a/arch/x86/configs/x86_64_defconfig ++++ b/arch/x86/configs/x86_64_defconfig +@@ -40,7 +40,7 @@ CONFIG_SMP=y + CONFIG_CALGARY_IOMMU=y + CONFIG_NR_CPUS=64 + CONFIG_SCHED_SMT=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y + CONFIG_X86_MCE=y + CONFIG_MICROCODE=y +diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt +index 0fee5fe6c899..9cf10230d5fb 100644 +--- a/kernel/Kconfig.preempt ++++ b/kernel/Kconfig.preempt +@@ -1,7 +1,7 @@ + + choice + prompt "Preemption Model" +- default PREEMPT_NONE ++ default PREEMPT + + config PREEMPT_NONE + bool "No Forced Preemption (Server)" +@@ -17,7 +17,7 @@ config PREEMPT_NONE + latencies. + + config PREEMPT_VOLUNTARY +- bool "Voluntary Kernel Preemption (Desktop)" ++ bool "Voluntary Kernel Preemption (Nothing)" + depends on !ARCH_NO_PREEMPT + help + This option reduces the latency of the kernel by adding more +@@ -32,7 +32,8 @@ config PREEMPT_VOLUNTARY + applications to run more 'smoothly' even when the system is + under load. + +- Select this if you are building a kernel for a desktop system. ++ Select this for no system in particular (choose Preemptible ++ instead on a desktop if you know what's good for you). + + config PREEMPT + bool "Preemptible Kernel (Low-Latency Desktop)" +-- +2.17.1 + diff --git a/sys-kernel/linux-image-redcore/files/5.1-0004-Expose-vmsplit-for-our-poor-32-bit-users.patch b/sys-kernel/linux-image-redcore/files/5.1-0004-Expose-vmsplit-for-our-poor-32-bit-users.patch new file mode 100644 index 00000000..fd180743 --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0004-Expose-vmsplit-for-our-poor-32-bit-users.patch @@ -0,0 +1,48 @@ +From 2dd86e6f35c475b4b42b0b96a0b47a39a630736e Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Fri, 12 May 2017 13:07:37 +1000 +Subject: [PATCH 04/16] Expose vmsplit for our poor 32 bit users. + +--- + arch/x86/Kconfig | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index fe5df269452d..29ecd7d99cfc 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -1426,7 +1426,7 @@ config HIGHMEM64G + endchoice + + choice +- prompt "Memory split" if EXPERT ++ prompt "Memory split" + default VMSPLIT_3G + depends on X86_32 + ---help--- +@@ -1446,17 +1446,17 @@ choice + option alone! + + config VMSPLIT_3G +- bool "3G/1G user/kernel split" ++ bool "Default 896MB lowmem (3G/1G user/kernel split)" + config VMSPLIT_3G_OPT + depends on !X86_PAE +- bool "3G/1G user/kernel split (for full 1G low memory)" ++ bool "1GB lowmem (3G/1G user/kernel split)" + config VMSPLIT_2G +- bool "2G/2G user/kernel split" ++ bool "2GB lowmem (2G/2G user/kernel split)" + config VMSPLIT_2G_OPT + depends on !X86_PAE +- bool "2G/2G user/kernel split (for full 2G low memory)" ++ bool "2GB lowmem (2G/2G user/kernel split)" + config VMSPLIT_1G +- bool "1G/3G user/kernel split" ++ bool "3GB lowmem (1G/3G user/kernel split)" + endchoice + + config PAGE_OFFSET +-- +2.17.1 + diff --git a/sys-kernel/linux-image-redcore/files/5.1-0005-Create-highres-timeout-variants-of-schedule_timeout-.patch b/sys-kernel/linux-image-redcore/files/5.1-0005-Create-highres-timeout-variants-of-schedule_timeout-.patch new file mode 100644 index 00000000..5c7831cb --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0005-Create-highres-timeout-variants-of-schedule_timeout-.patch @@ -0,0 +1,153 @@ +From 35c6a7df0bc36fdf3cb8e14c0ba8c73b6b17dded Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Sat, 12 Aug 2017 11:53:39 +1000 +Subject: [PATCH 05/16] Create highres timeout variants of schedule_timeout + functions. + +--- + include/linux/freezer.h | 1 + + include/linux/sched.h | 31 ++++++++++++++++-- + kernel/time/hrtimer.c | 71 +++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 101 insertions(+), 2 deletions(-) + +diff --git a/include/linux/freezer.h b/include/linux/freezer.h +index 21f5aa0b217f..ee9b46394fdf 100644 +--- a/include/linux/freezer.h ++++ b/include/linux/freezer.h +@@ -297,6 +297,7 @@ static inline void set_freezable(void) {} + #define wait_event_freezekillable_unsafe(wq, condition) \ + wait_event_killable(wq, condition) + ++#define pm_freezing (false) + #endif /* !CONFIG_FREEZER */ + + #endif /* FREEZER_H_INCLUDED */ +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 71d849ef5ee2..14ab8a8f8b1c 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -214,13 +214,40 @@ struct task_group; + + extern void scheduler_tick(void); + +-#define MAX_SCHEDULE_TIMEOUT LONG_MAX +- ++#define MAX_SCHEDULE_TIMEOUT LONG_MAX + extern long schedule_timeout(long timeout); + extern long schedule_timeout_interruptible(long timeout); + extern long schedule_timeout_killable(long timeout); + extern long schedule_timeout_uninterruptible(long timeout); + extern long schedule_timeout_idle(long timeout); ++ ++#ifdef CONFIG_HIGH_RES_TIMERS ++extern long schedule_msec_hrtimeout(long timeout); ++extern long schedule_min_hrtimeout(void); ++extern long schedule_msec_hrtimeout_interruptible(long timeout); ++extern long schedule_msec_hrtimeout_uninterruptible(long timeout); ++#else ++static inline long schedule_msec_hrtimeout(long timeout) ++{ ++ return schedule_timeout(msecs_to_jiffies(timeout)); ++} ++ ++static inline long schedule_min_hrtimeout(void) ++{ ++ return schedule_timeout(1); ++} ++ ++static inline long schedule_msec_hrtimeout_interruptible(long timeout) ++{ ++ return schedule_timeout_interruptible(msecs_to_jiffies(timeout)); ++} ++ ++static inline long schedule_msec_hrtimeout_uninterruptible(long timeout) ++{ ++ return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout)); ++} ++#endif ++ + asmlinkage void schedule(void); + extern void schedule_preempt_disabled(void); + +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 41dfff23c1f9..c6ea49693bca 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -2017,3 +2017,74 @@ int __sched schedule_hrtimeout(ktime_t *expires, + return schedule_hrtimeout_range(expires, 0, mode); + } + EXPORT_SYMBOL_GPL(schedule_hrtimeout); ++ ++/* ++ * As per schedule_hrtimeout but taskes a millisecond value and returns how ++ * many milliseconds are left. ++ */ ++long __sched schedule_msec_hrtimeout(long timeout) ++{ ++ struct hrtimer_sleeper t; ++ int delta, secs, jiffs; ++ ktime_t expires; ++ ++ if (!timeout) { ++ __set_current_state(TASK_RUNNING); ++ return 0; ++ } ++ ++ jiffs = msecs_to_jiffies(timeout); ++ /* ++ * If regular timer resolution is adequate or hrtimer resolution is not ++ * (yet) better than Hz, as would occur during startup, use regular ++ * timers. ++ */ ++ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ) ++ return schedule_timeout(jiffs); ++ ++ secs = timeout / 1000; ++ delta = (timeout % 1000) * NSEC_PER_MSEC; ++ expires = ktime_set(secs, delta); ++ ++ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_set_expires_range_ns(&t.timer, expires, delta); ++ ++ hrtimer_init_sleeper(&t, current); ++ ++ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL); ++ ++ if (likely(t.task)) ++ schedule(); ++ ++ hrtimer_cancel(&t.timer); ++ destroy_hrtimer_on_stack(&t.timer); ++ ++ __set_current_state(TASK_RUNNING); ++ ++ expires = hrtimer_expires_remaining(&t.timer); ++ timeout = ktime_to_ms(expires); ++ return timeout < 0 ? 0 : timeout; ++} ++ ++EXPORT_SYMBOL(schedule_msec_hrtimeout); ++ ++long __sched schedule_min_hrtimeout(void) ++{ ++ return schedule_msec_hrtimeout(1); ++} ++ ++EXPORT_SYMBOL(schedule_min_hrtimeout); ++ ++long __sched schedule_msec_hrtimeout_interruptible(long timeout) ++{ ++ __set_current_state(TASK_INTERRUPTIBLE); ++ return schedule_msec_hrtimeout(timeout); ++} ++EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible); ++ ++long __sched schedule_msec_hrtimeout_uninterruptible(long timeout) ++{ ++ __set_current_state(TASK_UNINTERRUPTIBLE); ++ return schedule_msec_hrtimeout(timeout); ++} ++EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible); +-- +2.17.1 + diff --git a/sys-kernel/linux-image-redcore/files/5.1-0006-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch b/sys-kernel/linux-image-redcore/files/5.1-0006-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch new file mode 100644 index 00000000..4c5c24e7 --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0006-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch @@ -0,0 +1,49 @@ +From cda5868e93585d3751bcb991e00735502cba2566 Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Sat, 5 Nov 2016 09:27:36 +1100 +Subject: [PATCH 06/16] Special case calls of schedule_timeout(1) to use the + min hrtimeout of 1ms, working around low Hz resolutions. + +--- + kernel/time/timer.c | 16 ++++++++++++++-- + 1 file changed, 14 insertions(+), 2 deletions(-) + +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index 926ab73595a2..98803a47491c 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -1800,6 +1800,18 @@ signed long __sched schedule_timeout(signed long timeout) + + expire = timeout + jiffies; + ++#ifdef CONFIG_HIGH_RES_TIMERS ++ if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ /* ++ * Special case 1 as being a request for the minimum timeout ++ * and use highres timers to timeout after 1ms to workaround ++ * the granularity of low Hz tick timers. ++ */ ++ if (!schedule_min_hrtimeout()) ++ return 0; ++ goto out_timeout; ++ } ++#endif + timer.task = current; + timer_setup_on_stack(&timer.timer, process_timeout, 0); + __mod_timer(&timer.timer, expire, 0); +@@ -1808,10 +1820,10 @@ signed long __sched schedule_timeout(signed long timeout) + + /* Remove the timer from the object tracker */ + destroy_timer_on_stack(&timer.timer); +- ++out_timeout: + timeout = expire - jiffies; + +- out: ++out: + return timeout < 0 ? 0 : timeout; + } + EXPORT_SYMBOL(schedule_timeout); +-- +2.17.1 + diff --git a/sys-kernel/linux-image-redcore/files/5.1-0007-Convert-msleep-to-use-hrtimers-when-active.patch b/sys-kernel/linux-image-redcore/files/5.1-0007-Convert-msleep-to-use-hrtimers-when-active.patch new file mode 100644 index 00000000..9dead86e --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0007-Convert-msleep-to-use-hrtimers-when-active.patch @@ -0,0 +1,54 @@ +From b713b0c571fa869fec376742be0e9c217ab38dab Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Fri, 4 Nov 2016 09:25:54 +1100 +Subject: [PATCH 07/16] Convert msleep to use hrtimers when active. + +--- + kernel/time/timer.c | 24 ++++++++++++++++++++++-- + 1 file changed, 22 insertions(+), 2 deletions(-) + +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index 98803a47491c..3ab277ba0f44 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -1964,7 +1964,19 @@ void __init init_timers(void) + */ + void msleep(unsigned int msecs) + { +- unsigned long timeout = msecs_to_jiffies(msecs) + 1; ++ int jiffs = msecs_to_jiffies(msecs); ++ unsigned long timeout; ++ ++ /* ++ * Use high resolution timers where the resolution of tick based ++ * timers is inadequate. ++ */ ++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ while (msecs) ++ msecs = schedule_msec_hrtimeout_uninterruptible(msecs); ++ return; ++ } ++ timeout = msecs_to_jiffies(msecs) + 1; + + while (timeout) + timeout = schedule_timeout_uninterruptible(timeout); +@@ -1978,7 +1990,15 @@ EXPORT_SYMBOL(msleep); + */ + unsigned long msleep_interruptible(unsigned int msecs) + { +- unsigned long timeout = msecs_to_jiffies(msecs) + 1; ++ int jiffs = msecs_to_jiffies(msecs); ++ unsigned long timeout; ++ ++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ while (msecs && !signal_pending(current)) ++ msecs = schedule_msec_hrtimeout_interruptible(msecs); ++ return msecs; ++ } ++ timeout = msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) + timeout = schedule_timeout_interruptible(timeout); +-- +2.17.1 + diff --git a/sys-kernel/linux-image-redcore/files/5.1-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch b/sys-kernel/linux-image-redcore/files/5.1-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch new file mode 100644 index 00000000..b21caaeb --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch @@ -0,0 +1,995 @@ +From 97bf976c1a61af48431ce97bc129a0f448e8c735 Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Mon, 20 Feb 2017 13:28:30 +1100 +Subject: [PATCH 08/16] Replace all schedule timeout(1) with + schedule_min_hrtimeout() + +--- + drivers/block/swim.c | 6 +- + drivers/char/ipmi/ipmi_msghandler.c | 2 +- + drivers/char/ipmi/ipmi_ssif.c | 2 +- + drivers/char/snsc.c | 4 +- + drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 2 +- + drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 2 +- + drivers/media/pci/ivtv/ivtv-ioctl.c | 2 +- + drivers/media/pci/ivtv/ivtv-streams.c | 2 +- + drivers/mfd/ucb1x00-core.c | 2 +- + drivers/misc/sgi-xp/xpc_channel.c | 2 +- + drivers/net/caif/caif_hsi.c | 2 +- + drivers/net/can/usb/peak_usb/pcan_usb.c | 2 +- + drivers/net/usb/lan78xx.c | 2 +- + drivers/net/usb/usbnet.c | 2 +- + drivers/scsi/fnic/fnic_scsi.c | 4 +- + drivers/scsi/snic/snic_scsi.c | 2 +- + .../staging/comedi/drivers/ni_mio_common.c | 2 +- + drivers/staging/lustre/lnet/lnet/lib-eq.c | 426 ++++++++++++++++++ + drivers/staging/rts5208/rtsx.c | 2 +- + drivers/staging/speakup/speakup_acntpc.c | 4 +- + drivers/staging/speakup/speakup_apollo.c | 2 +- + drivers/staging/speakup/speakup_decext.c | 2 +- + drivers/staging/speakup/speakup_decpc.c | 2 +- + drivers/staging/speakup/speakup_dectlk.c | 2 +- + drivers/staging/speakup/speakup_dtlk.c | 4 +- + drivers/staging/speakup/speakup_keypc.c | 4 +- + drivers/staging/speakup/synth.c | 14 +- + .../staging/unisys/visornic/visornic_main.c | 6 +- + drivers/video/fbdev/omap/hwa742.c | 2 +- + drivers/video/fbdev/pxafb.c | 2 +- + fs/btrfs/extent-tree.c | 2 +- + fs/btrfs/inode-map.c | 2 +- + sound/usb/line6/pcm.c | 2 +- + 33 files changed, 470 insertions(+), 50 deletions(-) + create mode 100644 drivers/staging/lustre/lnet/lnet/lib-eq.c + +diff --git a/drivers/block/swim.c b/drivers/block/swim.c +index 3fa6fcc34790..278486c8266d 100644 +--- a/drivers/block/swim.c ++++ b/drivers/block/swim.c +@@ -332,7 +332,7 @@ static inline void swim_motor(struct swim __iomem *base, + if (swim_readbit(base, MOTOR_ON)) + break; + current->state = TASK_INTERRUPTIBLE; +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + } else if (action == OFF) { + swim_action(base, MOTOR_OFF); +@@ -351,7 +351,7 @@ static inline void swim_eject(struct swim __iomem *base) + if (!swim_readbit(base, DISK_IN)) + break; + current->state = TASK_INTERRUPTIBLE; +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + swim_select(base, RELAX); + } +@@ -375,7 +375,7 @@ static inline int swim_step(struct swim __iomem *base) + for (wait = 0; wait < HZ; wait++) { + + current->state = TASK_INTERRUPTIBLE; +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + + swim_select(base, RELAX); + if (!swim_readbit(base, STEP)) +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index 00bf4b17edbf..71c49540cb25 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -3541,7 +3541,7 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf) + /* Current message first, to preserve order */ + while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { + /* Wait for the message to clear out. */ +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + + /* No need for locks, the interface is down. */ +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c +index 8b5aec5430f1..a5737d29cd93 100644 +--- a/drivers/char/ipmi/ipmi_ssif.c ++++ b/drivers/char/ipmi/ipmi_ssif.c +@@ -1285,7 +1285,7 @@ static void shutdown_ssif(void *send_info) + + /* make sure the driver is not looking for flags any more. */ + while (ssif_info->ssif_state != SSIF_NORMAL) +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + + ssif_info->stopping = true; + del_timer_sync(&ssif_info->watch_timer); +diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c +index 5918ea7499bb..5228e78df804 100644 +--- a/drivers/char/snsc.c ++++ b/drivers/char/snsc.c +@@ -198,7 +198,7 @@ scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos) + add_wait_queue(&sd->sd_rq, &wait); + spin_unlock_irqrestore(&sd->sd_rlock, flags); + +- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT)); ++ schedule_msec_hrtimeout((SCDRV_TIMEOUT)); + + remove_wait_queue(&sd->sd_rq, &wait); + if (signal_pending(current)) { +@@ -294,7 +294,7 @@ scdrv_write(struct file *file, const char __user *buf, + add_wait_queue(&sd->sd_wq, &wait); + spin_unlock_irqrestore(&sd->sd_wlock, flags); + +- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT)); ++ schedule_msec_hrtimeout((SCDRV_TIMEOUT)); + + remove_wait_queue(&sd->sd_wq, &wait); + if (signal_pending(current)) { +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +index d0fd147ef75f..730ae4fe6b85 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +@@ -235,7 +235,7 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv, + DRM_ERROR("SVGA device lockup.\n"); + break; + } +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + if (interruptible && signal_pending(current)) { + ret = -ERESTARTSYS; + break; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +index c3ad4478266b..7e2a29d56459 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +@@ -202,7 +202,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, + break; + } + if (lazy) +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + else if ((++count & 0x0F) == 0) { + /** + * FIXME: Use schedule_hr_timeout here for +diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c +index 6c269ecd8d05..69becedee614 100644 +--- a/drivers/media/pci/ivtv/ivtv-ioctl.c ++++ b/drivers/media/pci/ivtv/ivtv-ioctl.c +@@ -1156,7 +1156,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std) + TASK_UNINTERRUPTIBLE); + if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100) + break; +- schedule_timeout(msecs_to_jiffies(25)); ++ schedule_msec_hrtimeout((25)); + } + finish_wait(&itv->vsync_waitq, &wait); + mutex_lock(&itv->serialize_lock); +diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c +index a641f20e3f86..e1b40d2b4bed 100644 +--- a/drivers/media/pci/ivtv/ivtv-streams.c ++++ b/drivers/media/pci/ivtv/ivtv-streams.c +@@ -843,7 +843,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end) + while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) && + time_before(jiffies, + then + msecs_to_jiffies(2000))) { +- schedule_timeout(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout((10)); + } + + /* To convert jiffies to ms, we must multiply by 1000 +diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c +index d6fb2e1a759a..7ac951b84beb 100644 +--- a/drivers/mfd/ucb1x00-core.c ++++ b/drivers/mfd/ucb1x00-core.c +@@ -253,7 +253,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync) + break; + /* yield to other processes */ + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + + return UCB_ADC_DAT(val); +diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c +index 8e6607fc8a67..b9ab770bbdb5 100644 +--- a/drivers/misc/sgi-xp/xpc_channel.c ++++ b/drivers/misc/sgi-xp/xpc_channel.c +@@ -834,7 +834,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) + + atomic_inc(&ch->n_on_msg_allocate_wq); + prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE); +- ret = schedule_timeout(1); ++ ret = schedule_min_hrtimeout(); + finish_wait(&ch->msg_allocate_wq, &wait); + atomic_dec(&ch->n_on_msg_allocate_wq); + +diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c +index 433a14b9f731..4d197a99472b 100644 +--- a/drivers/net/caif/caif_hsi.c ++++ b/drivers/net/caif/caif_hsi.c +@@ -939,7 +939,7 @@ static void cfhsi_wake_down(struct work_struct *work) + break; + + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + retry--; + } + +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c +index 13238a72a338..fc51ae55c63f 100644 +--- a/drivers/net/can/usb/peak_usb/pcan_usb.c ++++ b/drivers/net/can/usb/peak_usb/pcan_usb.c +@@ -250,7 +250,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff) + } else { + /* the PCAN-USB needs time to init */ + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT)); ++ schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT)); + } + + return err; +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c +index 3d92ea6fcc02..de564401fd4d 100644 +--- a/drivers/net/usb/lan78xx.c ++++ b/drivers/net/usb/lan78xx.c +@@ -2674,7 +2674,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev) + while (!skb_queue_empty(&dev->rxq) && + !skb_queue_empty(&dev->txq) && + !skb_queue_empty(&dev->done)) { +- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); ++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS)); + set_current_state(TASK_UNINTERRUPTIBLE); + netif_dbg(dev, ifdown, dev->net, + "waited for %d urb completions\n", temp); +diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c +index 504282af27e5..da60ab9b56c7 100644 +--- a/drivers/net/usb/usbnet.c ++++ b/drivers/net/usb/usbnet.c +@@ -770,7 +770,7 @@ static void wait_skb_queue_empty(struct sk_buff_head *q) + spin_lock_irqsave(&q->lock, flags); + while (!skb_queue_empty(q)) { + spin_unlock_irqrestore(&q->lock, flags); +- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); ++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS)); + set_current_state(TASK_UNINTERRUPTIBLE); + spin_lock_irqsave(&q->lock, flags); + } +diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c +index 80608b53897b..84051b538fa8 100644 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@ -216,7 +216,7 @@ int fnic_fw_reset_handler(struct fnic *fnic) + + /* wait for io cmpl */ + while (atomic_read(&fnic->in_flight)) +- schedule_timeout(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout((1)); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + +@@ -2273,7 +2273,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, + } + } + +- schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); ++ schedule_msec_hrtimeout((2 * fnic->config.ed_tov)); + + /* walk again to check, if IOs are still pending in fw */ + if (fnic_is_abts_pending(fnic, lr_sc)) +diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c +index b3650c989ed4..7ed1fb285754 100644 +--- a/drivers/scsi/snic/snic_scsi.c ++++ b/drivers/scsi/snic/snic_scsi.c +@@ -2353,7 +2353,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc) + + /* Wait for all the IOs that are entered in Qcmd */ + while (atomic_read(&snic->ios_inflight)) +- schedule_timeout(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout((1)); + + ret = snic_issue_hba_reset(snic, sc); + if (ret) { +diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c +index b04dad8c7092..27c824bdebf0 100644 +--- a/drivers/staging/comedi/drivers/ni_mio_common.c ++++ b/drivers/staging/comedi/drivers/ni_mio_common.c +@@ -4727,7 +4727,7 @@ static int cs5529_wait_for_idle(struct comedi_device *dev) + if ((status & NI67XX_CAL_STATUS_BUSY) == 0) + break; + set_current_state(TASK_INTERRUPTIBLE); +- if (schedule_timeout(1)) ++ if (schedule_min_hrtimeout()) + return -EIO; + } + if (i == timeout) { +diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c +new file mode 100644 +index 000000000000..8cca151741b2 +--- /dev/null ++++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c +@@ -0,0 +1,426 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * GPL HEADER START ++ * ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 only, ++ * as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License version 2 for more details (a copy is included ++ * in the LICENSE file that accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License ++ * version 2 along with this program; If not, see ++ * http://www.gnu.org/licenses/gpl-2.0.html ++ * ++ * GPL HEADER END ++ */ ++/* ++ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. ++ * Use is subject to license terms. ++ * ++ * Copyright (c) 2012, Intel Corporation. ++ */ ++/* ++ * This file is part of Lustre, http://www.lustre.org/ ++ * Lustre is a trademark of Sun Microsystems, Inc. ++ * ++ * lnet/lnet/lib-eq.c ++ * ++ * Library level Event queue management routines ++ */ ++ ++#define DEBUG_SUBSYSTEM S_LNET ++ ++#include <linux/lnet/lib-lnet.h> ++ ++/** ++ * Create an event queue that has room for \a count number of events. ++ * ++ * The event queue is circular and older events will be overwritten by new ++ * ones if they are not removed in time by the user using the functions ++ * LNetEQGet(), LNetEQWait(), or LNetEQPoll(). It is up to the user to ++ * determine the appropriate size of the event queue to prevent this loss ++ * of events. Note that when EQ handler is specified in \a callback, no ++ * event loss can happen, since the handler is run for each event deposited ++ * into the EQ. ++ * ++ * \param count The number of events to be stored in the event queue. It ++ * will be rounded up to the next power of two. ++ * \param callback A handler function that runs when an event is deposited ++ * into the EQ. The constant value LNET_EQ_HANDLER_NONE can be used to ++ * indicate that no event handler is desired. ++ * \param handle On successful return, this location will hold a handle for ++ * the newly created EQ. ++ * ++ * \retval 0 On success. ++ * \retval -EINVAL If an parameter is not valid. ++ * \retval -ENOMEM If memory for the EQ can't be allocated. ++ * ++ * \see lnet_eq_handler_t for the discussion on EQ handler semantics. ++ */ ++int ++LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, ++ struct lnet_handle_eq *handle) ++{ ++ struct lnet_eq *eq; ++ ++ LASSERT(the_lnet.ln_refcount > 0); ++ ++ /* ++ * We need count to be a power of 2 so that when eq_{enq,deq}_seq ++ * overflow, they don't skip entries, so the queue has the same ++ * apparent capacity at all times ++ */ ++ if (count) ++ count = roundup_pow_of_two(count); ++ ++ if (callback != LNET_EQ_HANDLER_NONE && count) ++ CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count); ++ ++ /* ++ * count can be 0 if only need callback, we can eliminate ++ * overhead of enqueue event ++ */ ++ if (!count && callback == LNET_EQ_HANDLER_NONE) ++ return -EINVAL; ++ ++ eq = kzalloc(sizeof(*eq), GFP_NOFS); ++ if (!eq) ++ return -ENOMEM; ++ ++ if (count) { ++ eq->eq_events = kvmalloc_array(count, sizeof(struct lnet_event), ++ GFP_KERNEL | __GFP_ZERO); ++ if (!eq->eq_events) ++ goto failed; ++ /* ++ * NB allocator has set all event sequence numbers to 0, ++ * so all them should be earlier than eq_deq_seq ++ */ ++ } ++ ++ eq->eq_deq_seq = 1; ++ eq->eq_enq_seq = 1; ++ eq->eq_size = count; ++ eq->eq_callback = callback; ++ ++ eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(), ++ sizeof(*eq->eq_refs[0])); ++ if (!eq->eq_refs) ++ goto failed; ++ ++ /* MUST hold both exclusive lnet_res_lock */ ++ lnet_res_lock(LNET_LOCK_EX); ++ /* ++ * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do ++ * both EQ lookup and poll event with only lnet_eq_wait_lock ++ */ ++ lnet_eq_wait_lock(); ++ ++ lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh); ++ list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active); ++ ++ lnet_eq_wait_unlock(); ++ lnet_res_unlock(LNET_LOCK_EX); ++ ++ lnet_eq2handle(handle, eq); ++ return 0; ++ ++failed: ++ kvfree(eq->eq_events); ++ ++ if (eq->eq_refs) ++ cfs_percpt_free(eq->eq_refs); ++ ++ kfree(eq); ++ return -ENOMEM; ++} ++EXPORT_SYMBOL(LNetEQAlloc); ++ ++/** ++ * Release the resources associated with an event queue if it's idle; ++ * otherwise do nothing and it's up to the user to try again. ++ * ++ * \param eqh A handle for the event queue to be released. ++ * ++ * \retval 0 If the EQ is not in use and freed. ++ * \retval -ENOENT If \a eqh does not point to a valid EQ. ++ * \retval -EBUSY If the EQ is still in use by some MDs. ++ */ ++int ++LNetEQFree(struct lnet_handle_eq eqh) ++{ ++ struct lnet_eq *eq; ++ struct lnet_event *events = NULL; ++ int **refs = NULL; ++ int *ref; ++ int rc = 0; ++ int size = 0; ++ int i; ++ ++ LASSERT(the_lnet.ln_refcount > 0); ++ ++ lnet_res_lock(LNET_LOCK_EX); ++ /* ++ * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do ++ * both EQ lookup and poll event with only lnet_eq_wait_lock ++ */ ++ lnet_eq_wait_lock(); ++ ++ eq = lnet_handle2eq(&eqh); ++ if (!eq) { ++ rc = -ENOENT; ++ goto out; ++ } ++ ++ cfs_percpt_for_each(ref, i, eq->eq_refs) { ++ LASSERT(*ref >= 0); ++ if (!*ref) ++ continue; ++ ++ CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n", ++ i, *ref); ++ rc = -EBUSY; ++ goto out; ++ } ++ ++ /* stash for free after lock dropped */ ++ events = eq->eq_events; ++ size = eq->eq_size; ++ refs = eq->eq_refs; ++ ++ lnet_res_lh_invalidate(&eq->eq_lh); ++ list_del(&eq->eq_list); ++ kfree(eq); ++ out: ++ lnet_eq_wait_unlock(); ++ lnet_res_unlock(LNET_LOCK_EX); ++ ++ kvfree(events); ++ if (refs) ++ cfs_percpt_free(refs); ++ ++ return rc; ++} ++EXPORT_SYMBOL(LNetEQFree); ++ ++void ++lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev) ++{ ++ /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */ ++ int index; ++ ++ if (!eq->eq_size) { ++ LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE); ++ eq->eq_callback(ev); ++ return; ++ } ++ ++ lnet_eq_wait_lock(); ++ ev->sequence = eq->eq_enq_seq++; ++ ++ LASSERT(eq->eq_size == LOWEST_BIT_SET(eq->eq_size)); ++ index = ev->sequence & (eq->eq_size - 1); ++ ++ eq->eq_events[index] = *ev; ++ ++ if (eq->eq_callback != LNET_EQ_HANDLER_NONE) ++ eq->eq_callback(ev); ++ ++ /* Wake anyone waiting in LNetEQPoll() */ ++ if (waitqueue_active(&the_lnet.ln_eq_waitq)) ++ wake_up_all(&the_lnet.ln_eq_waitq); ++ lnet_eq_wait_unlock(); ++} ++ ++static int ++lnet_eq_dequeue_event(struct lnet_eq *eq, struct lnet_event *ev) ++{ ++ int new_index = eq->eq_deq_seq & (eq->eq_size - 1); ++ struct lnet_event *new_event = &eq->eq_events[new_index]; ++ int rc; ++ ++ /* must called with lnet_eq_wait_lock hold */ ++ if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence)) ++ return 0; ++ ++ /* We've got a new event... */ ++ *ev = *new_event; ++ ++ CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n", ++ new_event, eq->eq_deq_seq, eq->eq_size); ++ ++ /* ...but did it overwrite an event we've not seen yet? */ ++ if (eq->eq_deq_seq == new_event->sequence) { ++ rc = 1; ++ } else { ++ /* ++ * don't complain with CERROR: some EQs are sized small ++ * anyway; if it's important, the caller should complain ++ */ ++ CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n", ++ eq->eq_deq_seq, new_event->sequence); ++ rc = -EOVERFLOW; ++ } ++ ++ eq->eq_deq_seq = new_event->sequence + 1; ++ return rc; ++} ++ ++/** ++ * A nonblocking function that can be used to get the next event in an EQ. ++ * If an event handler is associated with the EQ, the handler will run before ++ * this function returns successfully. The event is removed from the queue. ++ * ++ * \param eventq A handle for the event queue. ++ * \param event On successful return (1 or -EOVERFLOW), this location will ++ * hold the next event in the EQ. ++ * ++ * \retval 0 No pending event in the EQ. ++ * \retval 1 Indicates success. ++ * \retval -ENOENT If \a eventq does not point to a valid EQ. ++ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that ++ * at least one event between this event and the last event obtained from the ++ * EQ has been dropped due to limited space in the EQ. ++ */ ++ ++/** ++ * Block the calling process until there is an event in the EQ. ++ * If an event handler is associated with the EQ, the handler will run before ++ * this function returns successfully. This function returns the next event ++ * in the EQ and removes it from the EQ. ++ * ++ * \param eventq A handle for the event queue. ++ * \param event On successful return (1 or -EOVERFLOW), this location will ++ * hold the next event in the EQ. ++ * ++ * \retval 1 Indicates success. ++ * \retval -ENOENT If \a eventq does not point to a valid EQ. ++ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that ++ * at least one event between this event and the last event obtained from the ++ * EQ has been dropped due to limited space in the EQ. ++ */ ++ ++static int ++lnet_eq_wait_locked(int *timeout_ms, long state) ++__must_hold(&the_lnet.ln_eq_wait_lock) ++{ ++ int tms = *timeout_ms; ++ int wait; ++ wait_queue_entry_t wl; ++ unsigned long now; ++ ++ if (!tms) ++ return -ENXIO; /* don't want to wait and no new event */ ++ ++ init_waitqueue_entry(&wl, current); ++ set_current_state(state); ++ add_wait_queue(&the_lnet.ln_eq_waitq, &wl); ++ ++ lnet_eq_wait_unlock(); ++ ++ if (tms < 0) { ++ schedule(); ++ } else { ++ now = jiffies; ++ schedule_msec_hrtimeout((tms)); ++ tms -= jiffies_to_msecs(jiffies - now); ++ if (tms < 0) /* no more wait but may have new event */ ++ tms = 0; ++ } ++ ++ wait = tms; /* might need to call here again */ ++ *timeout_ms = tms; ++ ++ lnet_eq_wait_lock(); ++ remove_wait_queue(&the_lnet.ln_eq_waitq, &wl); ++ ++ return wait; ++} ++ ++/** ++ * Block the calling process until there's an event from a set of EQs or ++ * timeout happens. ++ * ++ * If an event handler is associated with the EQ, the handler will run before ++ * this function returns successfully, in which case the corresponding event ++ * is consumed. ++ * ++ * LNetEQPoll() provides a timeout to allow applications to poll, block for a ++ * fixed period, or block indefinitely. ++ * ++ * \param eventqs,neq An array of EQ handles, and size of the array. ++ * \param timeout_ms Time in milliseconds to wait for an event to occur on ++ * one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an ++ * infinite timeout. ++ * \param interruptible, if true, use TASK_INTERRUPTIBLE, else TASK_NOLOAD ++ * \param event,which On successful return (1 or -EOVERFLOW), \a event will ++ * hold the next event in the EQs, and \a which will contain the index of the ++ * EQ from which the event was taken. ++ * ++ * \retval 0 No pending event in the EQs after timeout. ++ * \retval 1 Indicates success. ++ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that ++ * at least one event between this event and the last event obtained from the ++ * EQ indicated by \a which has been dropped due to limited space in the EQ. ++ * \retval -ENOENT If there's an invalid handle in \a eventqs. ++ */ ++int ++LNetEQPoll(struct lnet_handle_eq *eventqs, int neq, int timeout_ms, ++ int interruptible, ++ struct lnet_event *event, int *which) ++{ ++ int wait = 1; ++ int rc; ++ int i; ++ ++ LASSERT(the_lnet.ln_refcount > 0); ++ ++ if (neq < 1) ++ return -ENOENT; ++ ++ lnet_eq_wait_lock(); ++ ++ for (;;) { ++ for (i = 0; i < neq; i++) { ++ struct lnet_eq *eq = lnet_handle2eq(&eventqs[i]); ++ ++ if (!eq) { ++ lnet_eq_wait_unlock(); ++ return -ENOENT; ++ } ++ ++ rc = lnet_eq_dequeue_event(eq, event); ++ if (rc) { ++ lnet_eq_wait_unlock(); ++ *which = i; ++ return rc; ++ } ++ } ++ ++ if (!wait) ++ break; ++ ++ /* ++ * return value of lnet_eq_wait_locked: ++ * -1 : did nothing and it's sure no new event ++ * 1 : sleep inside and wait until new event ++ * 0 : don't want to wait anymore, but might have new event ++ * so need to call dequeue again ++ */ ++ wait = lnet_eq_wait_locked(&timeout_ms, ++ interruptible ? TASK_INTERRUPTIBLE ++ : TASK_NOLOAD); ++ if (wait < 0) /* no new event */ ++ break; ++ } ++ ++ lnet_eq_wait_unlock(); ++ return 0; ++} +diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c +index fa597953e9a0..685cf842badc 100644 +--- a/drivers/staging/rts5208/rtsx.c ++++ b/drivers/staging/rts5208/rtsx.c +@@ -490,7 +490,7 @@ static int rtsx_polling_thread(void *__dev) + + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL)); ++ schedule_msec_hrtimeout((POLLING_INTERVAL)); + + /* lock the device pointers */ + mutex_lock(&dev->dev_mutex); +diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c +index c94328a5bd4a..6e7d4671aa69 100644 +--- a/drivers/staging/speakup/speakup_acntpc.c ++++ b/drivers/staging/speakup/speakup_acntpc.c +@@ -198,7 +198,7 @@ static void do_catch_up(struct spk_synth *synth) + full_time_val = full_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); + if (synth_full()) { +- schedule_timeout(msecs_to_jiffies(full_time_val)); ++ schedule_msec_hrtimeout((full_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +@@ -226,7 +226,7 @@ static void do_catch_up(struct spk_synth *synth) + jiffy_delta_val = jiffy_delta->u.n.value; + delay_time_val = delay_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout(delay_time_val); + jiff_max = jiffies + jiffy_delta_val; + } + } +diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c +index 0877b4044c28..627102d048c1 100644 +--- a/drivers/staging/speakup/speakup_apollo.c ++++ b/drivers/staging/speakup/speakup_apollo.c +@@ -165,7 +165,7 @@ static void do_catch_up(struct spk_synth *synth) + if (!synth->io_ops->synth_out(synth, ch)) { + synth->io_ops->tiocmset(0, UART_MCR_RTS); + synth->io_ops->tiocmset(UART_MCR_RTS, 0); +- schedule_timeout(msecs_to_jiffies(full_time_val)); ++ schedule_msec_hrtimeout(full_time_val); + continue; + } + if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) { +diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c +index ddbb7e97d118..f9502addc765 100644 +--- a/drivers/staging/speakup/speakup_decext.c ++++ b/drivers/staging/speakup/speakup_decext.c +@@ -176,7 +176,7 @@ static void do_catch_up(struct spk_synth *synth) + if (ch == '\n') + ch = 0x0D; + if (synth_full() || !synth->io_ops->synth_out(synth, ch)) { +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout(delay_time_val); + continue; + } + set_current_state(TASK_RUNNING); +diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c +index 459ee0c0bd57..52b539544c98 100644 +--- a/drivers/staging/speakup/speakup_decpc.c ++++ b/drivers/staging/speakup/speakup_decpc.c +@@ -394,7 +394,7 @@ static void do_catch_up(struct spk_synth *synth) + if (ch == '\n') + ch = 0x0D; + if (dt_sendchar(ch)) { +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c +index dccb4ea29d37..8ecead307d04 100644 +--- a/drivers/staging/speakup/speakup_dectlk.c ++++ b/drivers/staging/speakup/speakup_dectlk.c +@@ -244,7 +244,7 @@ static void do_catch_up(struct spk_synth *synth) + if (ch == '\n') + ch = 0x0D; + if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) { +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout(delay_time_val); + continue; + } + set_current_state(TASK_RUNNING); +diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c +index dbebed0eeeec..6d83c13ca4a6 100644 +--- a/drivers/staging/speakup/speakup_dtlk.c ++++ b/drivers/staging/speakup/speakup_dtlk.c +@@ -211,7 +211,7 @@ static void do_catch_up(struct spk_synth *synth) + delay_time_val = delay_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); + if (synth_full()) { +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +@@ -227,7 +227,7 @@ static void do_catch_up(struct spk_synth *synth) + delay_time_val = delay_time->u.n.value; + jiffy_delta_val = jiffy_delta->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + jiff_max = jiffies + jiffy_delta_val; + } + } +diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c +index b788272da4f9..d5dac16c04d8 100644 +--- a/drivers/staging/speakup/speakup_keypc.c ++++ b/drivers/staging/speakup/speakup_keypc.c +@@ -199,7 +199,7 @@ static void do_catch_up(struct spk_synth *synth) + full_time_val = full_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); + if (synth_full()) { +- schedule_timeout(msecs_to_jiffies(full_time_val)); ++ schedule_msec_hrtimeout((full_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +@@ -232,7 +232,7 @@ static void do_catch_up(struct spk_synth *synth) + jiffy_delta_val = jiffy_delta->u.n.value; + delay_time_val = delay_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + jiff_max = jiffies+jiffy_delta_val; + } + } +diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c +index 3568bfb89912..0a80b3b098b2 100644 +--- a/drivers/staging/speakup/synth.c ++++ b/drivers/staging/speakup/synth.c +@@ -93,12 +93,8 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode) + spin_unlock_irqrestore(&speakup_info.spinlock, flags); + if (ch == '\n') + ch = synth->procspeech; +- if (unicode) +- ret = synth->io_ops->synth_out_unicode(synth, ch); +- else +- ret = synth->io_ops->synth_out(synth, ch); +- if (!ret) { +- schedule_timeout(msecs_to_jiffies(full_time_val)); ++ if (!synth->io_ops->synth_out(synth, ch)) { ++ schedule_msec_hrtimeout(full_time_val); + continue; + } + if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) { +@@ -108,11 +104,9 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode) + full_time_val = full_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); + if (synth->io_ops->synth_out(synth, synth->procspeech)) +- schedule_timeout( +- msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout(delay_time_val); + else +- schedule_timeout( +- msecs_to_jiffies(full_time_val)); ++ schedule_msec_hrtimeout(full_time_val); + jiff_max = jiffies + jiffy_delta_val; + } + set_current_state(TASK_RUNNING); +diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c +index 1c1a470d2e50..cbac00e586b0 100644 +--- a/drivers/staging/unisys/visornic/visornic_main.c ++++ b/drivers/staging/unisys/visornic/visornic_main.c +@@ -549,7 +549,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev, + } + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&devdata->priv_lock, flags); +- wait += schedule_timeout(msecs_to_jiffies(10)); ++ wait += schedule_msec_hrtimeout((10)); + spin_lock_irqsave(&devdata->priv_lock, flags); + } + +@@ -560,7 +560,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev, + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&devdata->priv_lock, flags); +- schedule_timeout(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout((10)); + spin_lock_irqsave(&devdata->priv_lock, flags); + if (atomic_read(&devdata->usage)) + break; +@@ -714,7 +714,7 @@ static int visornic_enable_with_timeout(struct net_device *netdev, + } + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&devdata->priv_lock, flags); +- wait += schedule_timeout(msecs_to_jiffies(10)); ++ wait += schedule_msec_hrtimeout((10)); + spin_lock_irqsave(&devdata->priv_lock, flags); + } + +diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c +index 6199d4806193..7c7165f2dad4 100644 +--- a/drivers/video/fbdev/omap/hwa742.c ++++ b/drivers/video/fbdev/omap/hwa742.c +@@ -926,7 +926,7 @@ static void hwa742_resume(void) + if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7)) + break; + set_current_state(TASK_UNINTERRUPTIBLE); +- schedule_timeout(msecs_to_jiffies(5)); ++ schedule_msec_hrtimeout((5)); + } + hwa742_set_update_mode(hwa742.update_mode_before_suspend); + } +diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c +index d59c8a59f582..e103cce28de7 100644 +--- a/drivers/video/fbdev/pxafb.c ++++ b/drivers/video/fbdev/pxafb.c +@@ -1287,7 +1287,7 @@ static int pxafb_smart_thread(void *arg) + mutex_unlock(&fbi->ctrlr_lock); + + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(msecs_to_jiffies(30)); ++ schedule_msec_hrtimeout((30)); + } + + pr_debug("%s(): task ending\n", __func__); +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index c5880329ae37..aad921814170 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -6206,7 +6206,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) + flush = BTRFS_RESERVE_FLUSH_LIMIT; + + if (btrfs_transaction_in_commit(fs_info)) +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + + if (delalloc_lock) +diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c +index ffca2abf13d0..89b2a7f7397e 100644 +--- a/fs/btrfs/inode-map.c ++++ b/fs/btrfs/inode-map.c +@@ -75,7 +75,7 @@ static int caching_kthread(void *data) + btrfs_release_path(path); + root->ino_cache_progress = last; + up_read(&fs_info->commit_root_sem); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + goto again; + } else + continue; +diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c +index 72c6f8e82a7e..46d8c2a148ad 100644 +--- a/sound/usb/line6/pcm.c ++++ b/sound/usb/line6/pcm.c +@@ -131,7 +131,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm, + if (!alive) + break; + set_current_state(TASK_UNINTERRUPTIBLE); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } while (--timeout > 0); + if (alive) + dev_err(line6pcm->line6->ifcdev, +-- +2.17.1 + diff --git a/sys-kernel/linux-image-redcore/files/5.1-0009-Replace-all-calls-to-schedule_timeout_interruptible-.patch b/sys-kernel/linux-image-redcore/files/5.1-0009-Replace-all-calls-to-schedule_timeout_interruptible-.patch new file mode 100644 index 00000000..5e044ab3 --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0009-Replace-all-calls-to-schedule_timeout_interruptible-.patch @@ -0,0 +1,311 @@ +From ed9d6f38013a94e1ea56b33c02706a4079b31e92 Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Mon, 20 Feb 2017 13:30:07 +1100 +Subject: [PATCH 09/16] Replace all calls to schedule_timeout_interruptible of + potentially under 50ms to use schedule_msec_hrtimeout_interruptible. + +--- + drivers/hwmon/fam15h_power.c | 2 +- + drivers/iio/light/tsl2563.c | 6 +----- + drivers/media/i2c/msp3400-driver.c | 4 ++-- + drivers/media/pci/ivtv/ivtv-gpio.c | 6 +++--- + drivers/media/radio/radio-mr800.c | 2 +- + drivers/media/radio/radio-tea5777.c | 2 +- + drivers/media/radio/tea575x.c | 2 +- + drivers/parport/ieee1284.c | 2 +- + drivers/parport/ieee1284_ops.c | 2 +- + drivers/platform/x86/intel_ips.c | 8 ++++---- + net/core/pktgen.c | 2 +- + sound/soc/codecs/wm8350.c | 12 ++++++------ + sound/soc/codecs/wm8900.c | 2 +- + sound/soc/codecs/wm9713.c | 4 ++-- + 14 files changed, 26 insertions(+), 30 deletions(-) + +diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c +index 9545a346044f..c24cf1302ec7 100644 +--- a/drivers/hwmon/fam15h_power.c ++++ b/drivers/hwmon/fam15h_power.c +@@ -237,7 +237,7 @@ static ssize_t power1_average_show(struct device *dev, + prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu]; + } + +- leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period)); ++ leftover = schedule_msec_hrtimeout_interruptible((data->power_period)); + if (leftover) + return 0; + +diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c +index 6bbb0b1e6032..f4b83648c405 100644 +--- a/drivers/iio/light/tsl2563.c ++++ b/drivers/iio/light/tsl2563.c +@@ -282,11 +282,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip) + default: + delay = 402; + } +- /* +- * TODO: Make sure that we wait at least required delay but why we +- * have to extend it one tick more? +- */ +- schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2); ++ schedule_msec_hrtimeout_interruptible(delay + 1); + } + + static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc) +diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c +index 522fb1d561e7..47d1afee1d04 100644 +--- a/drivers/media/i2c/msp3400-driver.c ++++ b/drivers/media/i2c/msp3400-driver.c +@@ -179,7 +179,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr) + break; + dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err, + dev, addr); +- schedule_timeout_interruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_interruptible((10)); + } + if (err == 3) { + dev_warn(&client->dev, "resetting chip, sound will go off.\n"); +@@ -220,7 +220,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val) + break; + dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err, + dev, addr); +- schedule_timeout_interruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_interruptible((10)); + } + if (err == 3) { + dev_warn(&client->dev, "resetting chip, sound will go off.\n"); +diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c +index f752f3993687..23372af61ebf 100644 +--- a/drivers/media/pci/ivtv/ivtv-gpio.c ++++ b/drivers/media/pci/ivtv/ivtv-gpio.c +@@ -117,7 +117,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv) + curout = (curout & ~0xF) | 1; + write_reg(curout, IVTV_REG_GPIO_OUT); + /* We could use something else for smaller time */ +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible((1)); + curout |= 2; + write_reg(curout, IVTV_REG_GPIO_OUT); + curdir &= ~0x80; +@@ -137,11 +137,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value) + curout = read_reg(IVTV_REG_GPIO_OUT); + curout &= ~(1 << itv->card->xceive_pin); + write_reg(curout, IVTV_REG_GPIO_OUT); +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible((1)); + + curout |= 1 << itv->card->xceive_pin; + write_reg(curout, IVTV_REG_GPIO_OUT); +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible((1)); + return 0; + } + +diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c +index ab1324f68199..3fdb422a5caa 100644 +--- a/drivers/media/radio/radio-mr800.c ++++ b/drivers/media/radio/radio-mr800.c +@@ -378,7 +378,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv, + retval = -ENODATA; + break; + } +- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) { ++ if (schedule_msec_hrtimeout_interruptible((10))) { + retval = -ERESTARTSYS; + break; + } +diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c +index 61f751cf1aa4..7eb30468091e 100644 +--- a/drivers/media/radio/radio-tea5777.c ++++ b/drivers/media/radio/radio-tea5777.c +@@ -245,7 +245,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait) + } + + if (wait) { +- if (schedule_timeout_interruptible(msecs_to_jiffies(wait))) ++ if (schedule_msec_hrtimeout_interruptible((wait))) + return -ERESTARTSYS; + } + +diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c +index f89f83e04741..325987cd5997 100644 +--- a/drivers/media/radio/tea575x.c ++++ b/drivers/media/radio/tea575x.c +@@ -416,7 +416,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea, + for (;;) { + if (time_after(jiffies, timeout)) + break; +- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) { ++ if (schedule_msec_hrtimeout_interruptible((10))) { + /* some signal arrived, stop search */ + tea->val &= ~TEA575X_BIT_SEARCH; + snd_tea575x_set_freq(tea); +diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c +index f12b9da69255..6ca6eecbdb2d 100644 +--- a/drivers/parport/ieee1284.c ++++ b/drivers/parport/ieee1284.c +@@ -208,7 +208,7 @@ int parport_wait_peripheral(struct parport *port, + /* parport_wait_event didn't time out, but the + * peripheral wasn't actually ready either. + * Wait for another 10ms. */ +- schedule_timeout_interruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_interruptible((10)); + } + } + +diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c +index 5d41dda6da4e..34705f6b423f 100644 +--- a/drivers/parport/ieee1284_ops.c ++++ b/drivers/parport/ieee1284_ops.c +@@ -537,7 +537,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port, + /* Yield the port for a while. */ + if (count && dev->port->irq != PARPORT_IRQ_NONE) { + parport_release (dev); +- schedule_timeout_interruptible(msecs_to_jiffies(40)); ++ schedule_msec_hrtimeout_interruptible((40)); + parport_claim_or_block (dev); + } + else +diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c +index bffe548187ee..c2918ee3e100 100644 +--- a/drivers/platform/x86/intel_ips.c ++++ b/drivers/platform/x86/intel_ips.c +@@ -798,7 +798,7 @@ static int ips_adjust(void *data) + ips_gpu_lower(ips); + + sleep: +- schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD)); ++ schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD)); + } while (!kthread_should_stop()); + + dev_dbg(ips->dev, "ips-adjust thread stopped\n"); +@@ -974,7 +974,7 @@ static int ips_monitor(void *data) + seqno_timestamp = get_jiffies_64(); + + old_cpu_power = thm_readl(THM_CEC); +- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); ++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD)); + + /* Collect an initial average */ + for (i = 0; i < IPS_SAMPLE_COUNT; i++) { +@@ -1001,7 +1001,7 @@ static int ips_monitor(void *data) + mchp_samples[i] = mchp; + } + +- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); ++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD)); + if (kthread_should_stop()) + break; + } +@@ -1028,7 +1028,7 @@ static int ips_monitor(void *data) + * us to reduce the sample frequency if the CPU and GPU are idle. + */ + old_cpu_power = thm_readl(THM_CEC); +- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); ++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD)); + last_sample_period = IPS_SAMPLE_PERIOD; + + timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE); +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index f3f5a78cd062..edbed00a06ed 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -1901,7 +1901,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname) + mutex_unlock(&pktgen_thread_lock); + pr_debug("%s: waiting for %s to disappear....\n", + __func__, ifname); +- schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try)); ++ schedule_msec_hrtimeout_interruptible((msec_per_try)); + mutex_lock(&pktgen_thread_lock); + + if (++i >= max_tries) { +diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c +index e92ebe52d485..88791ebb6df0 100644 +--- a/sound/soc/codecs/wm8350.c ++++ b/sound/soc/codecs/wm8350.c +@@ -236,10 +236,10 @@ static void wm8350_pga_work(struct work_struct *work) + out2->ramp == WM8350_RAMP_UP) { + /* delay is longer over 0dB as increases are larger */ + if (i >= WM8350_OUTn_0dB) +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (2)); + else +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (1)); + } else + udelay(50); /* doesn't matter if we delay longer */ +@@ -1123,7 +1123,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component, + (platform->dis_out4 << 6)); + + /* wait for discharge */ +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (platform-> + cap_discharge_msecs)); + +@@ -1139,7 +1139,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component, + WM8350_VBUFEN); + + /* wait for vmid */ +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (platform-> + vmid_charge_msecs)); + +@@ -1190,7 +1190,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component, + wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1); + + /* wait */ +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (platform-> + vmid_discharge_msecs)); + +@@ -1208,7 +1208,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component, + pm1 | WM8350_OUTPUT_DRAIN_EN); + + /* wait */ +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (platform->drain_msecs)); + + pm1 &= ~WM8350_BIASEN; +diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c +index 1a14e902949d..68f17d9877ec 100644 +--- a/sound/soc/codecs/wm8900.c ++++ b/sound/soc/codecs/wm8900.c +@@ -1112,7 +1112,7 @@ static int wm8900_set_bias_level(struct snd_soc_component *component, + /* Need to let things settle before stopping the clock + * to ensure that restart works, see "Stopping the + * master clock" in the datasheet. */ +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible(1); + snd_soc_component_write(component, WM8900_REG_POWER2, + WM8900_REG_POWER2_SYSCLK_ENA); + break; +diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c +index 5a2fdf4f69bf..aeb4e759de4c 100644 +--- a/sound/soc/codecs/wm9713.c ++++ b/sound/soc/codecs/wm9713.c +@@ -203,7 +203,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w, + + /* Gracefully shut down the voice interface. */ + snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0200); +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible(1); + snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0f00); + snd_soc_component_update_bits(component, AC97_EXTENDED_MID, 0x1000, 0x1000); + +@@ -872,7 +872,7 @@ static int wm9713_set_pll(struct snd_soc_component *component, + wm9713->pll_in = freq_in; + + /* wait 10ms AC97 link frames for the link to stabilise */ +- schedule_timeout_interruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_interruptible((10)); + return 0; + } + +-- +2.17.1 + diff --git a/sys-kernel/linux-image-redcore/files/5.1-0010-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch b/sys-kernel/linux-image-redcore/files/5.1-0010-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch new file mode 100644 index 00000000..ab601f4a --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0010-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch @@ -0,0 +1,160 @@ +From 4c8554b4da764ea564a9a2ef9faa481a61cb1a4b Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Mon, 20 Feb 2017 13:30:32 +1100 +Subject: [PATCH 10/16] Replace all calls to schedule_timeout_uninterruptible + of potentially under 50ms to use schedule_msec_hrtimeout_uninterruptible + +--- + drivers/media/pci/cx18/cx18-gpio.c | 4 ++-- + drivers/net/wireless/intel/ipw2x00/ipw2100.c | 4 ++-- + drivers/rtc/rtc-wm8350.c | 6 +++--- + drivers/scsi/lpfc/lpfc_scsi.c | 2 +- + sound/pci/maestro3.c | 4 ++-- + sound/soc/codecs/rt5631.c | 4 ++-- + sound/soc/soc-dapm.c | 2 +- + 7 files changed, 13 insertions(+), 13 deletions(-) + +diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c +index 012859e6dc7b..206bd08265a5 100644 +--- a/drivers/media/pci/cx18/cx18-gpio.c ++++ b/drivers/media/pci/cx18/cx18-gpio.c +@@ -90,11 +90,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi, + + /* Assert */ + gpio_update(cx, mask, ~active_lo); +- schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs)); ++ schedule_msec_hrtimeout_uninterruptible((assert_msecs)); + + /* Deassert */ + gpio_update(cx, mask, ~active_hi); +- schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs)); ++ schedule_msec_hrtimeout_uninterruptible((recovery_msecs)); + } + + /* +diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c +index 52e5ed2d3bc2..7d72a8b62700 100644 +--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c ++++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c +@@ -830,7 +830,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv, + * doesn't seem to have as many firmware restart cycles... + * + * As a test, we're sticking in a 1/100s delay here */ +- schedule_timeout_uninterruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_uninterruptible((10)); + + return 0; + +@@ -1281,7 +1281,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv) + IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n"); + i = 5000; + do { +- schedule_timeout_uninterruptible(msecs_to_jiffies(40)); ++ schedule_msec_hrtimeout_uninterruptible((40)); + /* Todo... wait for sync command ... */ + + read_register(priv->net_dev, IPW_REG_INTA, &inta); +diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c +index 483c7993516b..fddbaa475066 100644 +--- a/drivers/rtc/rtc-wm8350.c ++++ b/drivers/rtc/rtc-wm8350.c +@@ -119,7 +119,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm) + /* Wait until confirmation of stopping */ + do { + rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); +- schedule_timeout_uninterruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_uninterruptible((1)); + } while (--retries && !(rtc_ctrl & WM8350_RTC_STS)); + + if (!retries) { +@@ -202,7 +202,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350) + /* Wait until confirmation of stopping */ + do { + rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); +- schedule_timeout_uninterruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_uninterruptible((1)); + } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS)); + + if (!(rtc_ctrl & WM8350_RTC_ALMSTS)) +@@ -225,7 +225,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350) + /* Wait until confirmation */ + do { + rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); +- schedule_timeout_uninterruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_uninterruptible((1)); + } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS); + + if (rtc_ctrl & WM8350_RTC_ALMSTS) +diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c +index a497b2c0cb79..1ed8a04c5cdf 100644 +--- a/drivers/scsi/lpfc/lpfc_scsi.c ++++ b/drivers/scsi/lpfc/lpfc_scsi.c +@@ -5007,7 +5007,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, + tgt_id, lun_id, context); + later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + while (time_after(later, jiffies) && cnt) { +- schedule_timeout_uninterruptible(msecs_to_jiffies(20)); ++ schedule_msec_hrtimeout_uninterruptible((20)); + cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); + } + if (cnt) { +diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c +index 1a9468c14aaf..410ad89a3c7c 100644 +--- a/sound/pci/maestro3.c ++++ b/sound/pci/maestro3.c +@@ -2016,7 +2016,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip) + outw(0, io + GPIO_DATA); + outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION); + +- schedule_timeout_uninterruptible(msecs_to_jiffies(delay1)); ++ schedule_msec_hrtimeout_uninterruptible((delay1)); + + outw(GPO_PRIMARY_AC97, io + GPIO_DATA); + udelay(5); +@@ -2024,7 +2024,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip) + outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A); + outw(~0, io + GPIO_MASK); + +- schedule_timeout_uninterruptible(msecs_to_jiffies(delay2)); ++ schedule_msec_hrtimeout_uninterruptible((delay2)); + + if (! snd_m3_try_read_vendor(chip)) + break; +diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c +index 865f49ac38dd..3c1190dd114f 100644 +--- a/sound/soc/codecs/rt5631.c ++++ b/sound/soc/codecs/rt5631.c +@@ -419,7 +419,7 @@ static void onebit_depop_mute_stage(struct snd_soc_component *component, int ena + hp_zc = snd_soc_component_read32(component, RT5631_INT_ST_IRQ_CTRL_2); + snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff); + if (enable) { +- schedule_timeout_uninterruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_uninterruptible((10)); + /* config one-bit depop parameter */ + rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x307f); + snd_soc_component_update_bits(component, RT5631_HP_OUT_VOL, +@@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_component *component, int enable + hp_zc = snd_soc_component_read32(component, RT5631_INT_ST_IRQ_CTRL_2); + snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff); + if (enable) { +- schedule_timeout_uninterruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_uninterruptible((10)); + + /* config depop sequence parameter */ + rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x302f); +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index 0382a47b30bd..0d805d3563ca 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -154,7 +154,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm) + static void pop_wait(u32 pop_time) + { + if (pop_time) +- schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time)); ++ schedule_msec_hrtimeout_uninterruptible((pop_time)); + } + + __printf(3, 4) +-- +2.17.1 + diff --git a/sys-kernel/linux-image-redcore/files/5.1-0011-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch b/sys-kernel/linux-image-redcore/files/5.1-0011-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch new file mode 100644 index 00000000..a18d030a --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0011-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch @@ -0,0 +1,69 @@ +From 876598b9dbe9c1f27feae36c2e2deacdd4beaf9d Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Mon, 20 Feb 2017 13:32:58 +1100 +Subject: [PATCH 11/16] Don't use hrtimer overlay when pm_freezing since some + drivers still don't correctly use freezable timeouts. + +--- + kernel/time/hrtimer.c | 2 +- + kernel/time/timer.c | 9 +++++---- + 2 files changed, 6 insertions(+), 5 deletions(-) + +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index c6ea49693bca..17ad543fbbc4 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -2039,7 +2039,7 @@ long __sched schedule_msec_hrtimeout(long timeout) + * (yet) better than Hz, as would occur during startup, use regular + * timers. + */ +- if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ) ++ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing) + return schedule_timeout(jiffs); + + secs = timeout / 1000; +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index 3ab277ba0f44..28509c518461 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -43,6 +43,7 @@ + #include <linux/sched/debug.h> + #include <linux/slab.h> + #include <linux/compat.h> ++#include <linux/freezer.h> + + #include <linux/uaccess.h> + #include <asm/unistd.h> +@@ -1971,12 +1972,12 @@ void msleep(unsigned int msecs) + * Use high resolution timers where the resolution of tick based + * timers is inadequate. + */ +- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) { + while (msecs) + msecs = schedule_msec_hrtimeout_uninterruptible(msecs); + return; + } +- timeout = msecs_to_jiffies(msecs) + 1; ++ timeout = jiffs + 1; + + while (timeout) + timeout = schedule_timeout_uninterruptible(timeout); +@@ -1993,12 +1994,12 @@ unsigned long msleep_interruptible(unsigned int msecs) + int jiffs = msecs_to_jiffies(msecs); + unsigned long timeout; + +- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) { + while (msecs && !signal_pending(current)) + msecs = schedule_msec_hrtimeout_interruptible(msecs); + return msecs; + } +- timeout = msecs_to_jiffies(msecs) + 1; ++ timeout = jiffs + 1; + + while (timeout && !signal_pending(current)) + timeout = schedule_timeout_interruptible(timeout); +-- +2.17.1 + diff --git a/sys-kernel/linux-image-redcore/files/5.1-0012-Make-threaded-IRQs-optionally-the-default-which-can-.patch b/sys-kernel/linux-image-redcore/files/5.1-0012-Make-threaded-IRQs-optionally-the-default-which-can-.patch new file mode 100644 index 00000000..3e40227f --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0012-Make-threaded-IRQs-optionally-the-default-which-can-.patch @@ -0,0 +1,67 @@ +From 50ddde6e3c62abd39dc6a3cd5941febed7ad49c3 Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Wed, 7 Dec 2016 21:13:16 +1100 +Subject: [PATCH 13/16] Make threaded IRQs optionally the default which can be + disabled. + +--- + kernel/irq/Kconfig | 17 +++++++++++++++++ + kernel/irq/manage.c | 11 +++++++++++ + 2 files changed, 28 insertions(+) + +diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig +index 5f3e2baefca9..de3e5740679b 100644 +--- a/kernel/irq/Kconfig ++++ b/kernel/irq/Kconfig +@@ -107,6 +107,23 @@ config GENERIC_IRQ_RESERVATION_MODE + config IRQ_FORCED_THREADING + bool + ++config FORCE_IRQ_THREADING ++ bool "Make IRQ threading compulsory" ++ depends on IRQ_FORCED_THREADING ++ default n ++ ---help--- ++ ++ Make IRQ threading mandatory for any IRQ handlers that support it ++ instead of being optional and requiring the threadirqs kernel ++ parameter. Instead they can be optionally disabled with the ++ nothreadirqs kernel parameter. ++ ++ Enabling this may make some architectures not boot with runqueue ++ sharing and MuQSS. ++ ++ Enable if you are building for a desktop or low latency system, ++ otherwise say N. ++ + config SPARSE_IRQ + bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ + ---help--- +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index 1401afa0d58a..54394031b536 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -23,9 +23,20 @@ + #include "internals.h" + + #ifdef CONFIG_IRQ_FORCED_THREADING ++#ifdef CONFIG_FORCE_IRQ_THREADING ++__read_mostly bool force_irqthreads = true; ++#else + __read_mostly bool force_irqthreads; ++#endif + EXPORT_SYMBOL_GPL(force_irqthreads); + ++static int __init setup_noforced_irqthreads(char *arg) ++{ ++ force_irqthreads = false; ++ return 0; ++} ++early_param("nothreadirqs", setup_noforced_irqthreads); ++ + static int __init setup_forced_irqthreads(char *arg) + { + force_irqthreads = true; +-- +2.17.1 + diff --git a/sys-kernel/linux-image-redcore/files/5.1-0013-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch b/sys-kernel/linux-image-redcore/files/5.1-0013-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch new file mode 100644 index 00000000..cd46a360 --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0013-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch @@ -0,0 +1,81 @@ +From f1b776971e6aa46347f035adfebfd71d5f0930bb Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Wed, 7 Dec 2016 21:23:01 +1100 +Subject: [PATCH 14/16] Reinstate default Hz of 100 in combination with MuQSS + and -ck patches. + +--- + kernel/Kconfig.hz | 25 ++++++++++++++++++------- + 1 file changed, 18 insertions(+), 7 deletions(-) + +diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz +index 2a202a846757..1806fcac8f14 100644 +--- a/kernel/Kconfig.hz ++++ b/kernel/Kconfig.hz +@@ -4,7 +4,8 @@ + + choice + prompt "Timer frequency" +- default HZ_250 ++ default HZ_100 if SCHED_MUQSS ++ default HZ_250_NODEF if !SCHED_MUQSS + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -19,11 +20,18 @@ choice + config HZ_100 + bool "100 HZ" + help ++ 100 Hz is a suitable choice in combination with MuQSS which does ++ not rely on ticks for rescheduling interrupts, and is not Hz limited ++ for timeouts and sleeps from both the kernel and userspace. ++ This allows us to benefit from the lower overhead and higher ++ throughput of fewer timer ticks. ++ ++ Non-MuQSS kernels: + 100 Hz is a typical choice for servers, SMP and NUMA systems + with lots of processors that may show reduced performance if + too many timer interrupts are occurring. + +- config HZ_250 ++ config HZ_250_NODEF + bool "250 HZ" + help + 250 Hz is a good compromise choice allowing server performance +@@ -31,7 +39,10 @@ choice + on SMP and NUMA systems. If you are going to be using NTSC video + or multimedia, selected 300Hz instead. + +- config HZ_300 ++ 250 Hz is the default choice for the mainline scheduler but not ++ advantageous in combination with MuQSS. ++ ++ config HZ_300_NODEF + bool "300 HZ" + help + 300 Hz is a good compromise choice allowing server performance +@@ -39,7 +50,7 @@ choice + on SMP and NUMA systems and exactly dividing by both PAL and + NTSC frame rates for video and multimedia work. + +- config HZ_1000 ++ config HZ_1000_NODEF + bool "1000 HZ" + help + 1000 Hz is the preferred choice for desktop systems and other +@@ -50,9 +61,9 @@ endchoice + config HZ + int + default 100 if HZ_100 +- default 250 if HZ_250 +- default 300 if HZ_300 +- default 1000 if HZ_1000 ++ default 250 if HZ_250_NODEF ++ default 300 if HZ_300_NODEF ++ default 1000 if HZ_1000_NODEF + + config SCHED_HRTICK + def_bool HIGH_RES_TIMERS +-- +2.17.1 + diff --git a/sys-kernel/linux-image-redcore/files/5.1-0014-Swap-sucks.patch b/sys-kernel/linux-image-redcore/files/5.1-0014-Swap-sucks.patch new file mode 100644 index 00000000..cca606c8 --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-0014-Swap-sucks.patch @@ -0,0 +1,25 @@ +From 2113c0b7c42ba961bcc409c1bf9aca9db747b2b0 Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Sat, 12 Aug 2017 12:02:04 +1000 +Subject: [PATCH 15/16] Swap sucks. + +--- + mm/vmscan.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/mm/vmscan.c b/mm/vmscan.c +index a815f73ee4d5..4bcbaec19859 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -164,7 +164,7 @@ struct scan_control { + /* + * From 0 .. 100. Higher means more swappy. + */ +-int vm_swappiness = 60; ++int vm_swappiness = 33; + /* + * The total number of pages which are beyond the high watermark within all + * zones. +-- +2.17.1 + diff --git a/sys-kernel/linux-image-redcore/files/5.1-Unknow-SSD-HFM128GDHTNG-8310B-QUIRK_NO_APST.patch b/sys-kernel/linux-image-redcore/files/5.1-Unknow-SSD-HFM128GDHTNG-8310B-QUIRK_NO_APST.patch new file mode 100644 index 00000000..073fb752 --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-Unknow-SSD-HFM128GDHTNG-8310B-QUIRK_NO_APST.patch @@ -0,0 +1,19 @@ +diff -Naur linux-4.20.6/drivers/nvme/host/core.c linux-4.20.6-p/drivers/nvme/host/core.c +--- linux-4.20.6/drivers/nvme/host/core.c 2019-01-31 08:15:47.000000000 +0100 ++++ linux-4.20.6-p/drivers/nvme/host/core.c 2019-02-04 22:32:28.182827035 +0100 +@@ -2046,6 +2046,15 @@ + .vid = 0x1179, + .mn = "THNSF5256GPUK TOSHIBA", + .quirks = NVME_QUIRK_NO_APST, ++ }, ++ ++ { ++ /* https://forum.openmandriva.org/t/nvme-ssd-m2-not-seen-by-omlx-4-0/2407 ++ * Unknow SSD .. Maybe ADATA/Hynix ( a similar mn from ADTA but vid seems to be Hynix) ++ */ ++ .vid = 0x1c5c, ++ .mn = "HFM128GDHTNG-8310B", ++ .quirks = NVME_QUIRK_NO_APST, + } + }; + diff --git a/sys-kernel/linux-image-redcore/files/5.1-acpi-use-kern_warning_even_when_error.patch b/sys-kernel/linux-image-redcore/files/5.1-acpi-use-kern_warning_even_when_error.patch new file mode 100644 index 00000000..64c773ab --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-acpi-use-kern_warning_even_when_error.patch @@ -0,0 +1,18 @@ +diff -Naur linux-5.1/include/acpi/platform/aclinux.h linux-5.1-p/include/acpi/platform/aclinux.h +--- linux-5.1/include/acpi/platform/aclinux.h 2019-05-06 02:42:58.000000000 +0200 ++++ linux-5.1-p/include/acpi/platform/aclinux.h 2019-05-07 09:49:23.980444601 +0200 +@@ -153,12 +153,12 @@ + #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_next_filename + #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_directory + +-#define ACPI_MSG_ERROR KERN_ERR "ACPI Error: " ++#define ACPI_MSG_ERROR KERN_WARNING "ACPI Error: " + #define ACPI_MSG_EXCEPTION KERN_ERR "ACPI Exception: " + #define ACPI_MSG_WARNING KERN_WARNING "ACPI Warning: " + #define ACPI_MSG_INFO KERN_INFO "ACPI: " + +-#define ACPI_MSG_BIOS_ERROR KERN_ERR "ACPI BIOS Error (bug): " ++#define ACPI_MSG_BIOS_ERROR KERN_WARNING "ACPI BIOS Error (bug): " + #define ACPI_MSG_BIOS_WARNING KERN_WARNING "ACPI BIOS Warning (bug): " + + /* diff --git a/sys-kernel/linux-image-redcore/files/redcore-amd64.config b/sys-kernel/linux-image-redcore/files/5.1-amd64.config index 288c1279..40628bb6 100644 --- a/sys-kernel/linux-image-redcore/files/redcore-amd64.config +++ b/sys-kernel/linux-image-redcore/files/5.1-amd64.config @@ -1,54 +1,16 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 4.18.5-redcore Kernel Configuration +# Linux/x86 5.1.15-redcore Kernel Configuration # # -# Compiler: gcc (Gentoo Hardened 7.3.0-r3 p1.4) 7.3.0 +# Compiler: gcc (Gentoo Hardened 8.2.0-r1337 p1.6) 8.2.0 # -CONFIG_64BIT=y -CONFIG_X86_64=y -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_OUTPUT_FORMAT="elf64-x86-64" -CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_MMU=y -CONFIG_ARCH_MMAP_RND_BITS_MIN=28 -CONFIG_ARCH_MMAP_RND_BITS_MAX=32 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_ARCH_HAS_FILTER_PGPROT=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ZONE_DMA32=y -CONFIG_AUDIT_ARCH=y -CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_HAVE_INTEL_TXT=y -CONFIG_X86_64_SMP=y -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_DYNAMIC_PHYSICAL_MASK=y -CONFIG_PGTABLE_LEVELS=4 CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=70300 +CONFIG_GCC_VERSION=80200 CONFIG_CLANG_VERSION=0 +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y CONFIG_IRQ_WORK=y CONFIG_BUILDTIME_EXTABLE_SORT=y CONFIG_THREAD_INFO_IN_TASK=y @@ -56,22 +18,24 @@ CONFIG_THREAD_INFO_IN_TASK=y # # General setup # +CONFIG_SCHED_MUQSS=y CONFIG_INIT_ENV_ARG_LIMIT=32 # CONFIG_COMPILE_TEST is not set CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y +CONFIG_BUILD_SALT="" CONFIG_HAVE_KERNEL_GZIP=y CONFIG_HAVE_KERNEL_BZIP2=y CONFIG_HAVE_KERNEL_LZMA=y CONFIG_HAVE_KERNEL_XZ=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_HAVE_KERNEL_LZ4=y -CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_GZIP is not set # CONFIG_KERNEL_BZIP2 is not set # CONFIG_KERNEL_LZMA is not set # CONFIG_KERNEL_XZ is not set # CONFIG_KERNEL_LZO is not set -# CONFIG_KERNEL_LZ4 is not set +CONFIG_KERNEL_LZ4=y CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SWAP=y CONFIG_SYSVIPC=y @@ -83,8 +47,6 @@ CONFIG_CROSS_MEMORY_ATTACH=y CONFIG_AUDIT=y CONFIG_HAVE_ARCH_AUDITSYSCALL=y CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y # # IRQ subsystem @@ -103,10 +65,12 @@ CONFIG_GENERIC_MSI_IRQ_DOMAIN=y CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y CONFIG_GENERIC_IRQ_RESERVATION_MODE=y CONFIG_IRQ_FORCED_THREADING=y +# CONFIG_FORCE_IRQ_THREADING is not set CONFIG_SPARSE_IRQ=y # CONFIG_GENERIC_IRQ_DEBUGFS is not set CONFIG_CLOCKSOURCE_WATCHDOG=y CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y CONFIG_GENERIC_TIME_VSYSCALL=y CONFIG_GENERIC_CLOCKEVENTS=y @@ -118,25 +82,33 @@ CONFIG_GENERIC_CMOS_UPDATE=y # Timers subsystem # CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set +CONFIG_HZ_PERIODIC=y # CONFIG_NO_HZ_IDLE is not set -CONFIG_NO_HZ_FULL=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y # # CPU/Task time and stats accounting # CONFIG_VIRT_CPU_ACCOUNTING=y +# CONFIG_TICK_CPU_ACCOUNTING is not set CONFIG_VIRT_CPU_ACCOUNTING_GEN=y CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y CONFIG_BSD_PROCESS_ACCT=y # CONFIG_BSD_PROCESS_ACCT_V3 is not set CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_PSI is not set CONFIG_CPU_ISOLATION=y # @@ -149,9 +121,6 @@ CONFIG_TREE_SRCU=y CONFIG_TASKS_RCU=y CONFIG_RCU_STALL_COMMON=y CONFIG_RCU_NEED_SEGCBLIST=y -CONFIG_CONTEXT_TRACKING=y -# CONFIG_CONTEXT_TRACKING_FORCE is not set -CONFIG_RCU_NOCB_CPU=y CONFIG_BUILD_BIN2C=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -162,20 +131,16 @@ CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_NUMA_BALANCING=y -CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y CONFIG_CGROUPS=y CONFIG_PAGE_COUNTER=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y CONFIG_BLK_CGROUP=y # CONFIG_DEBUG_BLK_CGROUP is not set CONFIG_CGROUP_WRITEBACK=y CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y @@ -183,7 +148,6 @@ CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y # CONFIG_CGROUP_DEBUG is not set @@ -194,7 +158,7 @@ CONFIG_IPC_NS=y CONFIG_USER_NS=y CONFIG_PID_NS=y CONFIG_NET_NS=y -CONFIG_SCHED_AUTOGROUP=y +# CONFIG_CHECKPOINT_RESTORE is not set # CONFIG_SYSFS_DEPRECATED is not set CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y @@ -235,6 +199,7 @@ CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y CONFIG_AIO=y +CONFIG_IO_URING=y CONFIG_ADVISE_SYSCALLS=y CONFIG_MEMBARRIER=y CONFIG_KALLSYMS=y @@ -269,203 +234,47 @@ CONFIG_SLUB_CPU_PARTIAL=y CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y -CONFIG_CRASH_CORE=y -CONFIG_KEXEC_CORE=y -CONFIG_HOTPLUG_SMT=y -# CONFIG_OPROFILE is not set -CONFIG_HAVE_OPROFILE=y -CONFIG_OPROFILE_NMI_TIMER=y -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set -CONFIG_OPTPROBES=y -CONFIG_KPROBES_ON_FTRACE=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_KRETPROBES=y -CONFIG_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_KPROBES_ON_FTRACE=y -CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y -CONFIG_HAVE_NMI=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y -CONFIG_PLUGIN_HOSTCC="g++" -CONFIG_HAVE_GCC_PLUGINS=y -# CONFIG_GCC_PLUGINS is not set -CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_CC_HAS_STACKPROTECTOR_NONE=y -CONFIG_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR_STRONG=y -CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_HAVE_ARCH_SOFT_DIRTY=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_HAVE_EXIT_THREAD=y -CONFIG_ARCH_MMAP_RND_BITS=32 -CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 -CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y -CONFIG_HAVE_COPY_THREAD_TLS=y -CONFIG_HAVE_STACK_VALIDATION=y -CONFIG_ISA_BUS_API=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y -CONFIG_COMPAT_32BIT_TIME=y -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_REFCOUNT=y -CONFIG_REFCOUNT_FULL=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_MODULE_SIG=y -# CONFIG_MODULE_SIG_FORCE is not set -CONFIG_MODULE_SIG_ALL=y -# CONFIG_MODULE_SIG_SHA1 is not set -# CONFIG_MODULE_SIG_SHA224 is not set -# CONFIG_MODULE_SIG_SHA256 is not set -# CONFIG_MODULE_SIG_SHA384 is not set -CONFIG_MODULE_SIG_SHA512=y -CONFIG_MODULE_SIG_HASH="sha512" -CONFIG_MODULE_COMPRESS=y -CONFIG_MODULE_COMPRESS_GZIP=y -# CONFIG_MODULE_COMPRESS_XZ is not set -# CONFIG_TRIM_UNUSED_KSYMS is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLK_SCSI_REQUEST=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_ZONED=y -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -CONFIG_BLK_CMDLINE_PARSER=y -CONFIG_BLK_WBT=y -CONFIG_BLK_WBT_SQ=y -CONFIG_BLK_WBT_MQ=y -CONFIG_BLK_DEBUG_FS=y -# CONFIG_BLK_SED_OPAL is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -CONFIG_ACORN_PARTITION=y -CONFIG_ACORN_PARTITION_CUMANA=y -CONFIG_ACORN_PARTITION_EESOX=y -CONFIG_ACORN_PARTITION_ICS=y -CONFIG_ACORN_PARTITION_ADFS=y -CONFIG_ACORN_PARTITION_POWERTEC=y -CONFIG_ACORN_PARTITION_RISCIX=y -CONFIG_AIX_PARTITION=y -CONFIG_OSF_PARTITION=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_LDM_PARTITION=y -CONFIG_LDM_DEBUG=y -CONFIG_SGI_PARTITION=y -CONFIG_ULTRIX_PARTITION=y -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y -CONFIG_SYSV68_PARTITION=y -CONFIG_CMDLINE_PARTITION=y -CONFIG_BLOCK_COMPAT=y -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_MQ_RDMA=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_MQ_IOSCHED_DEADLINE=y -# CONFIG_MQ_IOSCHED_KYBER is not set -CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -CONFIG_ASN1=y -CONFIG_UNINLINE_SPIN_UNLOCK=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y -CONFIG_QUEUED_SPINLOCKS=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y -CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=4 CONFIG_CC_HAS_SANE_STACKPROTECTOR=y -CONFIG_FREEZER=y # # Processor type and features @@ -477,7 +286,7 @@ CONFIG_X86_X2APIC=y CONFIG_X86_MPPARSE=y # CONFIG_GOLDFISH is not set CONFIG_RETPOLINE=y -CONFIG_INTEL_RDT=y +CONFIG_X86_CPU_RESCTRL=y # CONFIG_X86_EXTENDED_PLATFORM is not set CONFIG_X86_INTEL_LPSS=y CONFIG_X86_AMD_PLATFORM_DEVICE=y @@ -491,11 +300,11 @@ CONFIG_PARAVIRT=y # CONFIG_PARAVIRT_SPINLOCKS is not set # CONFIG_XEN is not set CONFIG_KVM_GUEST=y +# CONFIG_PVH is not set # CONFIG_KVM_DEBUG_FS is not set # CONFIG_PARAVIRT_TIME_ACCOUNTING is not set CONFIG_PARAVIRT_CLOCK=y CONFIG_JAILHOUSE_GUEST=y -CONFIG_NO_BOOTMEM=y # CONFIG_MK8 is not set # CONFIG_MPSC is not set # CONFIG_MCORE2 is not set @@ -510,6 +319,7 @@ CONFIG_X86_MINIMUM_CPU_FAMILY=64 CONFIG_X86_DEBUGCTLMSR=y CONFIG_CPU_SUP_INTEL=y CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y CONFIG_CPU_SUP_CENTAUR=y CONFIG_HPET_TIMER=y CONFIG_HPET_EMULATE_RTC=y @@ -522,12 +332,15 @@ CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=8192 CONFIG_NR_CPUS=8192 CONFIG_SCHED_SMT=y +CONFIG_SMT_NICE=y CONFIG_SCHED_MC=y CONFIG_SCHED_MC_PRIO=y -# CONFIG_PREEMPT_NONE is not set -# CONFIG_PREEMPT_VOLUNTARY is not set -CONFIG_PREEMPT=y -CONFIG_PREEMPT_COUNT=y +# CONFIG_RQ_NONE is not set +# CONFIG_RQ_SMT is not set +CONFIG_RQ_MC=y +# CONFIG_RQ_SMP is not set +# CONFIG_RQ_ALL is not set +CONFIG_SHARERQ=2 CONFIG_X86_LOCAL_APIC=y CONFIG_X86_IO_APIC=y CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y @@ -558,10 +371,10 @@ CONFIG_X86_MSR=m CONFIG_X86_CPUID=m # CONFIG_X86_5LEVEL is not set CONFIG_X86_DIRECT_GBPAGES=y +# CONFIG_X86_CPA_STATISTICS is not set CONFIG_ARCH_HAS_MEM_ENCRYPT=y CONFIG_AMD_MEM_ENCRYPT=y # CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set -CONFIG_ARCH_USE_MEMREMAP_PROT=y CONFIG_NUMA=y CONFIG_AMD_NUMA=y CONFIG_X86_64_ACPI_NUMA=y @@ -574,74 +387,6 @@ CONFIG_ARCH_SELECT_MEMORY_MODEL=y CONFIG_ARCH_MEMORY_PROBE=y CONFIG_ARCH_PROC_KCORE_TEXT=y CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_NEED_MULTIPLE_NODES=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_HAVE_MEMBLOCK_NODE_MAP=y -CONFIG_HAVE_GENERIC_GUP=y -CONFIG_ARCH_DISCARD_MEMBLOCK=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_HAVE_BOOTMEM_INFO_NODE=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_SPARSE=y -# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y -CONFIG_ARCH_ENABLE_THP_MIGRATION=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_BOUNCE=y -CONFIG_VIRT_TO_BUS=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_UKSM=y -# CONFIG_KSM_LEGACY is not set -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -# CONFIG_HWPOISON_INJECT is not set -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -CONFIG_ARCH_WANTS_THP_SWAP=y -CONFIG_THP_SWAP=y -CONFIG_TRANSPARENT_HUGE_PAGECACHE=y -CONFIG_CLEANCACHE=y -CONFIG_FRONTSWAP=y -CONFIG_CMA=y -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set -CONFIG_CMA_AREAS=7 -# CONFIG_ZSWAP is not set -CONFIG_ZPOOL=m -CONFIG_ZBUD=m -CONFIG_Z3FOLD=m -CONFIG_ZSMALLOC=y -# CONFIG_PGTABLE_MAPPING is not set -# CONFIG_ZSMALLOC_STAT is not set -CONFIG_GENERIC_EARLY_IOREMAP=y -# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set -# CONFIG_IDLE_PAGE_TRACKING is not set -CONFIG_ARCH_HAS_ZONE_DEVICE=y -# CONFIG_ZONE_DEVICE is not set -CONFIG_FRAME_VECTOR=y -CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y -CONFIG_ARCH_HAS_PKEYS=y -# CONFIG_PERCPU_STATS is not set -# CONFIG_GUP_BENCHMARK is not set -CONFIG_ARCH_HAS_PTE_SPECIAL=y CONFIG_X86_PMEM_LEGACY_DEVICE=y CONFIG_X86_PMEM_LEGACY=m CONFIG_X86_CHECK_BIOS_CORRUPTION=y @@ -662,13 +407,14 @@ CONFIG_EFI=y CONFIG_EFI_STUB=y CONFIG_EFI_MIXED=y CONFIG_SECCOMP=y -# CONFIG_HZ_100 is not set -# CONFIG_HZ_250 is not set -# CONFIG_HZ_300 is not set -CONFIG_HZ_1000=y -CONFIG_HZ=1000 +CONFIG_HZ_100=y +# CONFIG_HZ_250_NODEF is not set +# CONFIG_HZ_300_NODEF is not set +# CONFIG_HZ_1000_NODEF is not set +CONFIG_HZ=100 CONFIG_SCHED_HRTICK=y CONFIG_KEXEC=y +# CONFIG_KEXEC_FILE is not set # CONFIG_CRASH_DUMP is not set CONFIG_KEXEC_JUMP=y CONFIG_PHYSICAL_START=0x1000000 @@ -693,6 +439,9 @@ CONFIG_ARCH_HAS_ADD_PAGES=y CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y # # Power management and ACPI options @@ -715,6 +464,8 @@ CONFIG_PM_CLK=y CONFIG_PM_GENERIC_DOMAINS=y # CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y CONFIG_ACPI=y CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y @@ -755,6 +506,7 @@ CONFIG_ACPI_HED=y # CONFIG_ACPI_CUSTOM_METHOD is not set CONFIG_ACPI_BGRT=y CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set CONFIG_HAVE_ACPI_APEI=y CONFIG_HAVE_ACPI_APEI_NMI=y CONFIG_ACPI_APEI=y @@ -766,6 +518,7 @@ CONFIG_ACPI_APEI_MEMORY_FAILURE=y CONFIG_DPTF_POWER=m CONFIG_ACPI_WATCHDOG=y CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_ADXL=y CONFIG_PMIC_OPREGION=y # CONFIG_XPOWER_PMIC_OPREGION is not set # CONFIG_BXT_WC_PMIC_OPREGION is not set @@ -816,119 +569,306 @@ CONFIG_X86_AMD_FREQ_SENSITIVITY=m CONFIG_CPU_IDLE=y CONFIG_CPU_IDLE_GOV_LADDER=y CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set CONFIG_INTEL_IDLE=y # # Bus options (PCI etc.) # -CONFIG_PCI=y CONFIG_PCI_DIRECT=y CONFIG_PCI_MMCONFIG=y -CONFIG_PCI_DOMAINS=y CONFIG_MMCONF_FAM10H=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -CONFIG_PCIEAER_INJECT=m -CONFIG_PCIE_ECRC=y -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -CONFIG_PCIE_DPC=y -CONFIG_PCIE_PTM=y -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y -CONFIG_PCI_QUIRKS=y -# CONFIG_PCI_DEBUG is not set -CONFIG_PCI_REALLOC_ENABLE_AUTO=y -CONFIG_PCI_STUB=m -CONFIG_PCI_PF_STUB=m -CONFIG_PCI_ATS=y -CONFIG_PCI_LOCKLESS_CONFIG=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PRI=y -CONFIG_PCI_PASID=y -CONFIG_PCI_LABEL=y -CONFIG_PCI_HYPERV=m -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -CONFIG_HOTPLUG_PCI_CPCI=y -CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m -CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m -# CONFIG_HOTPLUG_PCI_SHPC is not set +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +CONFIG_X86_SYSFB=y # -# PCI controller drivers +# Binary Emulations # +CONFIG_IA32_EMULATION=y +CONFIG_X86_X32=y +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_X86_DEV_DMA_OPS=y +CONFIG_HAVE_GENERIC_GUP=y # -# Cadence PCIe controllers support +# Firmware Drivers # -CONFIG_VMD=m +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=m +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +CONFIG_FW_CFG_SYSFS=m +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_GOOGLE_FIRMWARE=y +CONFIG_GOOGLE_SMI=m +CONFIG_GOOGLE_COREBOOT_TABLE=m +CONFIG_GOOGLE_MEMCONSOLE=m +CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY=m +CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT=m +CONFIG_GOOGLE_MEMCONSOLE_COREBOOT=m +CONFIG_GOOGLE_VPD=m # -# DesignWare PCI Core Support +# EFI (Extensible Firmware Interface) Support # -# CONFIG_PCIE_DW_PLAT_HOST is not set -# CONFIG_PCIE_DW_PLAT_EP is not set +CONFIG_EFI_VARS=m +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_BOOTLOADER_CONTROL=m +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m +CONFIG_APPLE_PROPERTIES=y +CONFIG_RESET_ATTACK_MITIGATION=y +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y +CONFIG_EFI_DEV_PATH_PARSER=y +CONFIG_EFI_EARLYCON=y # -# PCI Endpoint +# Tegra firmware driver # -CONFIG_PCI_ENDPOINT=y -CONFIG_PCI_ENDPOINT_CONFIGFS=y -# CONFIG_PCI_EPF_TEST is not set +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m +CONFIG_KVM_AMD_SEV=y +# CONFIG_KVM_MMU_AUDIT is not set +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set # -# PCI switch controller drivers +# General architecture-dependent options # -CONFIG_PCI_SW_SWITCHTEC=m -CONFIG_ISA_DMA_API=y -CONFIG_AMD_NB=y -CONFIG_PCCARD=m -CONFIG_PCMCIA=m -CONFIG_PCMCIA_LOAD_CIS=y -CONFIG_CARDBUS=y +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HOTPLUG_SMT=y +# CONFIG_OPROFILE is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_HAVE_RCU_TABLE_INVALIDATE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=32 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_ISA_BUS_API=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_64BIT_TIME=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_REFCOUNT=y +CONFIG_REFCOUNT_FULL=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y # -# PC-card bridges +# GCOV-based kernel profiling # -CONFIG_YENTA=m -CONFIG_YENTA_O2=y -CONFIG_YENTA_RICOH=y -CONFIG_YENTA_TI=y -CONFIG_YENTA_ENE_TUNE=y -CONFIG_YENTA_TOSHIBA=y -CONFIG_PD6729=m -CONFIG_I82092=m -CONFIG_PCCARD_NONSTATIC=y -CONFIG_RAPIDIO=y -CONFIG_RAPIDIO_TSI721=y -CONFIG_RAPIDIO_DISC_TIMEOUT=30 -CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y -CONFIG_RAPIDIO_DMA_ENGINE=y -# CONFIG_RAPIDIO_DEBUG is not set -CONFIG_RAPIDIO_ENUM_BASIC=m -CONFIG_RAPIDIO_CHMAN=m -CONFIG_RAPIDIO_MPORT_CDEV=m +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_PLUGIN_HOSTCC="g++" +CONFIG_HAVE_GCC_PLUGINS=y +# CONFIG_GCC_PLUGINS is not set +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +# CONFIG_MODULE_SIG_SHA256 is not set +# CONFIG_MODULE_SIG_SHA384 is not set +CONFIG_MODULE_SIG_SHA512=y +CONFIG_MODULE_SIG_HASH="sha512" +CONFIG_MODULE_COMPRESS=y +CONFIG_MODULE_COMPRESS_GZIP=y +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +CONFIG_BLK_CMDLINE_PARSER=y +CONFIG_BLK_WBT=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set # -# RapidIO Switch drivers +# Partition Types # -CONFIG_RAPIDIO_TSI57X=y -CONFIG_RAPIDIO_CPS_XX=y -CONFIG_RAPIDIO_TSI568=y -CONFIG_RAPIDIO_CPS_GEN2=y -CONFIG_RAPIDIO_RXS_GEN3=m -CONFIG_X86_SYSFB=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_ACORN_PARTITION=y +CONFIG_ACORN_PARTITION_CUMANA=y +CONFIG_ACORN_PARTITION_EESOX=y +CONFIG_ACORN_PARTITION_ICS=y +CONFIG_ACORN_PARTITION_ADFS=y +CONFIG_ACORN_PARTITION_POWERTEC=y +CONFIG_ACORN_PARTITION_RISCIX=y +CONFIG_AIX_PARTITION=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_ATARI_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_LDM_DEBUG=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +CONFIG_SYSV68_PARTITION=y +CONFIG_CMDLINE_PARTITION=y +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y +CONFIG_BLK_PM=y # -# Executable file formats / Emulations +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +# CONFIG_MQ_IOSCHED_KYBER is not set +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats # CONFIG_BINFMT_ELF=y CONFIG_COMPAT_BINFMT_ELF=y @@ -937,18 +877,77 @@ CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_BINFMT_SCRIPT=y CONFIG_BINFMT_MISC=y CONFIG_COREDUMP=y -CONFIG_IA32_EMULATION=y -CONFIG_IA32_AOUT=y -CONFIG_X86_X32=y -CONFIG_COMPAT_32=y -CONFIG_COMPAT=y -CONFIG_COMPAT_FOR_U64_ALIGNMENT=y -CONFIG_SYSVIPC_COMPAT=y -CONFIG_X86_DEV_DMA_OPS=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_UKSM=y +# CONFIG_KSM_LEGACY is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +# CONFIG_HWPOISON_INJECT is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +# CONFIG_ZSWAP is not set +CONFIG_ZPOOL=m +CONFIG_ZBUD=m +CONFIG_Z3FOLD=m +CONFIG_ZSMALLOC=y +# CONFIG_PGTABLE_MAPPING is not set +# CONFIG_ZSMALLOC_STAT is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_ZONE_DEVICE=y +# CONFIG_ZONE_DEVICE is not set +CONFIG_FRAME_VECTOR=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y CONFIG_NET=y CONFIG_COMPAT_NETLINK_MESSAGES=y CONFIG_NET_INGRESS=y CONFIG_NET_EGRESS=y +CONFIG_SKB_EXTENSIONS=y # # Networking options @@ -956,6 +955,7 @@ CONFIG_NET_EGRESS=y CONFIG_PACKET=m CONFIG_PACKET_DIAG=m CONFIG_UNIX=m +CONFIG_UNIX_SCM=y CONFIG_UNIX_DIAG=m CONFIG_TLS=m # CONFIG_TLS_DEVICE is not set @@ -963,6 +963,7 @@ CONFIG_XFRM=y CONFIG_XFRM_OFFLOAD=y CONFIG_XFRM_ALGO=m CONFIG_XFRM_USER=m +# CONFIG_XFRM_INTERFACE is not set CONFIG_XFRM_SUB_POLICY=y CONFIG_XFRM_MIGRATE=y CONFIG_XFRM_STATISTICS=y @@ -972,6 +973,7 @@ CONFIG_NET_KEY_MIGRATE=y CONFIG_SMC=m CONFIG_SMC_DIAG=m CONFIG_XDP_SOCKETS=y +# CONFIG_XDP_SOCKETS_DIAG is not set CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y @@ -1082,6 +1084,7 @@ CONFIG_NETFILTER_FAMILY_ARP=y CONFIG_NETFILTER_NETLINK_ACCT=m CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m CONFIG_NF_CONNTRACK=m CONFIG_NF_LOG_COMMON=m CONFIG_NF_LOG_NETDEV=m @@ -1095,7 +1098,7 @@ CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y CONFIG_NF_CONNTRACK_LABELS=y CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_GRE=y CONFIG_NF_CT_PROTO_SCTP=y CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m @@ -1115,17 +1118,14 @@ CONFIG_NF_CT_NETLINK_HELPER=m CONFIG_NETFILTER_NETLINK_GLUE_CT=y CONFIG_NF_NAT=m CONFIG_NF_NAT_NEEDED=y -CONFIG_NF_NAT_PROTO_DCCP=y -CONFIG_NF_NAT_PROTO_UDPLITE=y -CONFIG_NF_NAT_PROTO_SCTP=y CONFIG_NF_NAT_AMANDA=m CONFIG_NF_NAT_FTP=m CONFIG_NF_NAT_IRC=m CONFIG_NF_NAT_SIP=m CONFIG_NF_NAT_TFTP=m CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y CONFIG_NETFILTER_SYNPROXY=m -CONFIG_NF_OSF=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_SET=m CONFIG_NF_TABLES_INET=y @@ -1140,6 +1140,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +# CONFIG_NFT_TUNNEL is not set CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -1149,7 +1150,10 @@ CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB=m CONFIG_NFT_FIB_INET=m +# CONFIG_NFT_XFRM is not set CONFIG_NFT_SOCKET=m +# CONFIG_NFT_OSF is not set +# CONFIG_NFT_TPROXY is not set CONFIG_NF_DUP_NETDEV=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m @@ -1315,7 +1319,6 @@ CONFIG_IP_VS_PE_SIP=m # IP: Netfilter Configuration # CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NF_SOCKET_IPV4=m CONFIG_NF_TPROXY_IPV4=m CONFIG_NF_TABLES_IPV4=y @@ -1329,13 +1332,7 @@ CONFIG_NF_DUP_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NF_LOG_IPV4=m CONFIG_NF_REJECT_IPV4=m -CONFIG_NF_NAT_IPV4=m -CONFIG_NFT_CHAIN_NAT_IPV4=m -CONFIG_NF_NAT_MASQUERADE_IPV4=y -CONFIG_NFT_MASQ_IPV4=m -CONFIG_NFT_REDIR_IPV4=m CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PROTO_GRE=m CONFIG_NF_NAT_PPTP=m CONFIG_NF_NAT_H323=m CONFIG_IP_NF_IPTABLES=m @@ -1363,15 +1360,10 @@ CONFIG_IP_NF_ARP_MANGLE=m # # IPv6: Netfilter Configuration # -CONFIG_NF_DEFRAG_IPV6=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NF_TPROXY_IPV6=m CONFIG_NF_TABLES_IPV6=y CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_CHAIN_NAT_IPV6=m -CONFIG_NFT_MASQ_IPV6=m -CONFIG_NFT_REDIR_IPV6=m CONFIG_NFT_REJECT_IPV6=m CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_FIB_IPV6=m @@ -1379,8 +1371,6 @@ CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NF_DUP_IPV6=m CONFIG_NF_REJECT_IPV6=m CONFIG_NF_LOG_IPV6=m -CONFIG_NF_NAT_IPV6=m -CONFIG_NF_NAT_MASQUERADE_IPV6=y CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -1402,6 +1392,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_DEFRAG_IPV6=m # # DECnet: Netfilter Configuration @@ -1489,7 +1480,9 @@ CONFIG_NET_DSA_TAG_BRCM=y CONFIG_NET_DSA_TAG_BRCM_PREPEND=y CONFIG_NET_DSA_TAG_DSA=y CONFIG_NET_DSA_TAG_EDSA=y +CONFIG_NET_DSA_TAG_GSWIP=y CONFIG_NET_DSA_TAG_KSZ=y +CONFIG_NET_DSA_TAG_KSZ9477=y CONFIG_NET_DSA_TAG_LAN9303=y CONFIG_NET_DSA_TAG_MTK=y CONFIG_NET_DSA_TAG_TRAILER=y @@ -1546,15 +1539,19 @@ CONFIG_NET_SCH_SFQ=m CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_CBS=m +# CONFIG_NET_SCH_ETF is not set +# CONFIG_NET_SCH_TAPRIO is not set CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_DSMARK=m CONFIG_NET_SCH_NETEM=m CONFIG_NET_SCH_DRR=m CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set CONFIG_NET_SCH_CHOKE=m CONFIG_NET_SCH_QFQ=m CONFIG_NET_SCH_CODEL=m CONFIG_NET_SCH_FQ_CODEL=m +# CONFIG_NET_SCH_CAKE is not set CONFIG_NET_SCH_FQ=m CONFIG_NET_SCH_HHF=m CONFIG_NET_SCH_PIE=m @@ -1623,6 +1620,7 @@ CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_BATMAN_ADV_DEBUGFS=y # CONFIG_BATMAN_ADV_DEBUG is not set +# CONFIG_BATMAN_ADV_TRACING is not set CONFIG_OPENVSWITCH=m CONFIG_OPENVSWITCH_GRE=m CONFIG_OPENVSWITCH_VXLAN=m @@ -1692,7 +1690,6 @@ CONFIG_CAN_VXCAN=m CONFIG_CAN_SLCAN=m CONFIG_CAN_DEV=m CONFIG_CAN_CALC_BITTIMING=y -CONFIG_CAN_LEDS=y CONFIG_CAN_JANZ_ICAN3=m CONFIG_CAN_C_CAN=m CONFIG_CAN_C_CAN_PLATFORM=m @@ -1725,13 +1722,14 @@ CONFIG_CAN_MCP251X=m # # CAN USB interfaces # +CONFIG_CAN_8DEV_USB=m CONFIG_CAN_EMS_USB=m CONFIG_CAN_ESD_USB2=m CONFIG_CAN_GS_USB=m CONFIG_CAN_KVASER_USB=m -CONFIG_CAN_PEAK_USB=m -CONFIG_CAN_8DEV_USB=m CONFIG_CAN_MCBA_USB=m +CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set # CONFIG_CAN_DEBUG_DEVICES is not set CONFIG_BT=m CONFIG_BT_BREDR=y @@ -1770,6 +1768,7 @@ CONFIG_BT_HCIUART_ATH3K=y CONFIG_BT_HCIUART_LL=y CONFIG_BT_HCIUART_3WIRE=y CONFIG_BT_HCIUART_INTEL=y +# CONFIG_BT_HCIUART_RTL is not set CONFIG_BT_HCIUART_QCA=y CONFIG_BT_HCIUART_AG6XX=y CONFIG_BT_HCIUART_MRVL=y @@ -1784,6 +1783,7 @@ CONFIG_BT_MRVL=m CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m CONFIG_BT_WILINK=m +# CONFIG_BT_MTKUART is not set CONFIG_BT_HCIRSI=m CONFIG_AF_RXRPC=m CONFIG_AF_RXRPC_IPV6=y @@ -1817,7 +1817,6 @@ CONFIG_LIB80211_CRYPT_TKIP=m CONFIG_MAC80211=m CONFIG_MAC80211_HAS_RC=y CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_MINSTREL_HT=y CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" CONFIG_MAC80211_MESH=y @@ -1890,14 +1889,118 @@ CONFIG_LWTUNNEL=y CONFIG_LWTUNNEL_BPF=y CONFIG_DST_CACHE=y CONFIG_GRO_CELLS=y -CONFIG_NET_DEVLINK=m -CONFIG_MAY_USE_DEVLINK=m +CONFIG_NET_SOCK_MSG=y +# CONFIG_NET_DEVLINK is not set CONFIG_FAILOVER=m CONFIG_HAVE_EBPF_JIT=y # # Device Drivers # +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +CONFIG_PCIE_PTM=y +# CONFIG_PCIE_BW is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +CONFIG_PCI_REALLOC_ENABLE_AUTO=y +CONFIG_PCI_STUB=m +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_PCI_HYPERV=m +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +CONFIG_HOTPLUG_PCI_CPCI=y +CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m +CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +CONFIG_VMD=m + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCIE_DW_PLAT_EP is not set +# CONFIG_PCI_MESON is not set + +# +# PCI Endpoint +# +CONFIG_PCI_ENDPOINT=y +CONFIG_PCI_ENDPOINT_CONFIGFS=y +# CONFIG_PCI_EPF_TEST is not set + +# +# PCI switch controller drivers +# +CONFIG_PCI_SW_SWITCHTEC=m +CONFIG_PCCARD=m +CONFIG_PCMCIA=m +CONFIG_PCMCIA_LOAD_CIS=y +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +CONFIG_PD6729=m +CONFIG_I82092=m +CONFIG_PCCARD_NONSTATIC=y +CONFIG_RAPIDIO=y +CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_DISC_TIMEOUT=30 +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +CONFIG_RAPIDIO_DMA_ENGINE=y +# CONFIG_RAPIDIO_DEBUG is not set +CONFIG_RAPIDIO_ENUM_BASIC=m +CONFIG_RAPIDIO_CHMAN=m +CONFIG_RAPIDIO_MPORT_CDEV=m + +# +# RapidIO Switch drivers +# +CONFIG_RAPIDIO_TSI57X=y +CONFIG_RAPIDIO_CPS_XX=y +CONFIG_RAPIDIO_TSI568=y +CONFIG_RAPIDIO_CPS_GEN2=y +CONFIG_RAPIDIO_RXS_GEN3=m # # Generic Driver Options @@ -1926,6 +2029,7 @@ CONFIG_GENERIC_CPU_AUTOPROBE=y CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SLIMBUS=m CONFIG_REGMAP_SPI=y CONFIG_REGMAP_SPMI=m CONFIG_REGMAP_W1=m @@ -1934,24 +2038,24 @@ CONFIG_REGMAP_IRQ=y CONFIG_REGMAP_SOUNDWIRE=m CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set -# CONFIG_DMA_CMA is not set # # Bus devices # CONFIG_CONNECTOR=m +# CONFIG_GNSS is not set CONFIG_MTD=m CONFIG_MTD_TESTS=m -CONFIG_MTD_REDBOOT_PARTS=m -CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 -CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y -CONFIG_MTD_REDBOOT_PARTS_READONLY=y CONFIG_MTD_CMDLINE_PARTS=m CONFIG_MTD_AR7_PARTS=m # # Partition parsers # +CONFIG_MTD_REDBOOT_PARTS=m +CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 +CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y +CONFIG_MTD_REDBOOT_PARTS_READONLY=y # # User Modules And Translation Layers @@ -1996,6 +2100,7 @@ CONFIG_MTD_ABSENT=m CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_PHYSMAP=m # CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PHYSMAP_GPIO_ADDR is not set CONFIG_MTD_SBC_GXX=m CONFIG_MTD_AMD76XROM=m CONFIG_MTD_ICHXROM=m @@ -2007,10 +2112,8 @@ CONFIG_MTD_L440GX=m CONFIG_MTD_PCI=m CONFIG_MTD_PCMCIA=m # CONFIG_MTD_PCMCIA_ANONYMOUS is not set -CONFIG_MTD_GPIO_ADDR=m CONFIG_MTD_INTEL_VR_NOR=m CONFIG_MTD_PLATRAM=m -CONFIG_MTD_LATCH_ADDR=m # # Self-contained MTD device drivers @@ -2057,10 +2160,10 @@ CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED=y CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0x0 CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH=y CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y -CONFIG_MTD_NAND_DOCG4=m CONFIG_MTD_NAND_CAFE=m CONFIG_MTD_NAND_NANDSIM=m CONFIG_MTD_NAND_PLATFORM=m +# CONFIG_MTD_SPI_NAND is not set # # LPDDR & LPDDR2 PCM memory drivers @@ -2068,8 +2171,8 @@ CONFIG_MTD_NAND_PLATFORM=m CONFIG_MTD_LPDDR=m CONFIG_MTD_QINFO_PROBE=m CONFIG_MTD_SPI_NOR=m -CONFIG_MTD_MT81xx_NOR=m CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +CONFIG_SPI_MTK_QUADSPI=m CONFIG_SPI_INTEL_SPI=m CONFIG_SPI_INTEL_SPI_PCI=m CONFIG_SPI_INTEL_SPI_PLATFORM=m @@ -2134,7 +2237,6 @@ CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m CONFIG_ZRAM=m CONFIG_ZRAM_WRITEBACK=y # CONFIG_ZRAM_MEMORY_TRACKING is not set -CONFIG_BLK_DEV_DAC960=m CONFIG_BLK_DEV_UMEM=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 @@ -2165,11 +2267,13 @@ CONFIG_BLK_DEV_NVME=y CONFIG_NVME_FABRICS=m CONFIG_NVME_RDMA=m CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m CONFIG_NVME_TARGET=m CONFIG_NVME_TARGET_LOOP=m CONFIG_NVME_TARGET_RDMA=m CONFIG_NVME_TARGET_FC=m CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m # # Misc devices @@ -2201,6 +2305,7 @@ CONFIG_LATTICE_ECP3_CONFIG=m CONFIG_SRAM=y CONFIG_PCI_ENDPOINT_TEST=m CONFIG_MISC_RTSX=m +CONFIG_PVPANIC=m CONFIG_C2PORT=m CONFIG_C2PORT_DURAMAR_2150=m @@ -2214,6 +2319,7 @@ CONFIG_EEPROM_MAX6875=m CONFIG_EEPROM_93CX6=m CONFIG_EEPROM_93XX46=m CONFIG_EEPROM_IDT_89HPESX=m +CONFIG_EEPROM_EE1004=m CONFIG_CB710_CORE=m # CONFIG_CB710_DEBUG is not set CONFIG_CB710_DEBUG_ASSUMPTIONS=y @@ -2231,6 +2337,7 @@ CONFIG_ALTERA_STAPL=m CONFIG_INTEL_MEI=y CONFIG_INTEL_MEI_ME=y CONFIG_INTEL_MEI_TXE=m +CONFIG_INTEL_MEI_HDCP=m CONFIG_VMWARE_VMCI=m # @@ -2280,8 +2387,10 @@ CONFIG_VHOST_RING=m CONFIG_GENWQE=m CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0 CONFIG_ECHO=m +CONFIG_MISC_ALCOR_PCI=m CONFIG_MISC_RTSX_PCI=m CONFIG_MISC_RTSX_USB=m +CONFIG_HABANA_AI=m CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -2293,7 +2402,6 @@ CONFIG_RAID_ATTRS=m CONFIG_SCSI=m CONFIG_SCSI_DMA=y CONFIG_SCSI_NETLINK=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_SCSI_PROC_FS=y # @@ -2372,10 +2480,14 @@ CONFIG_SCSI_UFSHCD=m CONFIG_SCSI_UFSHCD_PCI=m CONFIG_SCSI_UFS_DWC_TC_PCI=m CONFIG_SCSI_UFSHCD_PLATFORM=m +CONFIG_SCSI_UFS_CDNS_PLATFORM=m CONFIG_SCSI_UFS_DWC_TC_PLATFORM=m +# CONFIG_SCSI_UFS_BSG is not set CONFIG_SCSI_HPTIOP=m CONFIG_SCSI_BUSLOGIC=m CONFIG_SCSI_FLASHPOINT=y +CONFIG_SCSI_MYRB=m +CONFIG_SCSI_MYRS=m CONFIG_VMWARE_PVSCSI=m CONFIG_HYPERV_STORAGE=m CONFIG_LIBFC=m @@ -2430,10 +2542,6 @@ CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m CONFIG_SCSI_DH_ALUA=m -CONFIG_SCSI_OSD_INITIATOR=m -CONFIG_SCSI_OSD_ULD=m -CONFIG_SCSI_OSD_DPRINT_SENSE=1 -# CONFIG_SCSI_OSD_DEBUG is not set CONFIG_ATA=m CONFIG_ATA_VERBOSE_ERROR=y CONFIG_ATA_ACPI=y @@ -2545,7 +2653,6 @@ CONFIG_BCACHE=m # CONFIG_BCACHE_CLOSURES_DEBUG is not set CONFIG_BLK_DEV_DM_BUILTIN=y CONFIG_BLK_DEV_DM=m -# CONFIG_DM_MQ_DEFAULT is not set # CONFIG_DM_DEBUG is not set CONFIG_DM_BUFIO=m # CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set @@ -2620,6 +2727,7 @@ CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y CONFIG_IPVLAN=m CONFIG_IPVTAP=m CONFIG_VXLAN=m @@ -2702,15 +2810,20 @@ CONFIG_B53_SPI_DRIVER=m CONFIG_B53_MDIO_DRIVER=m CONFIG_B53_MMAP_DRIVER=m CONFIG_B53_SRAB_DRIVER=m +CONFIG_B53_SERDES=m +# CONFIG_NET_DSA_BCM_SF2 is not set CONFIG_NET_DSA_LOOP=m +CONFIG_NET_DSA_LANTIQ_GSWIP=m CONFIG_NET_DSA_MT7530=m CONFIG_NET_DSA_MV88E6060=m -CONFIG_MICROCHIP_KSZ=m -CONFIG_MICROCHIP_KSZ_SPI_DRIVER=m +CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON=m +CONFIG_NET_DSA_MICROCHIP_KSZ9477=m +CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI=m CONFIG_NET_DSA_MV88E6XXX=m CONFIG_NET_DSA_MV88E6XXX_GLOBAL2=y CONFIG_NET_DSA_MV88E6XXX_PTP=y CONFIG_NET_DSA_QCA8K=m +# CONFIG_NET_DSA_REALTEK_SMI is not set CONFIG_NET_DSA_SMSC_LAN9303=m CONFIG_NET_DSA_SMSC_LAN9303_I2C=m CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m @@ -2756,19 +2869,22 @@ CONFIG_B44=m CONFIG_B44_PCI_AUTOSELECT=y CONFIG_B44_PCICORE_AUTOSELECT=y CONFIG_B44_PCI=y +# CONFIG_BCMGENET is not set CONFIG_BNX2=m CONFIG_CNIC=m CONFIG_TIGON3=m CONFIG_TIGON3_HWMON=y CONFIG_BNX2X=m CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set CONFIG_BNXT=m CONFIG_BNXT_SRIOV=y CONFIG_BNXT_FLOWER_OFFLOAD=y CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y CONFIG_NET_VENDOR_BROCADE=y CONFIG_BNA=m -CONFIG_NET_CADENCE=y +CONFIG_NET_VENDOR_CADENCE=y CONFIG_MACB=m CONFIG_MACB_USE_HWSTAMP=y CONFIG_MACB_PCI=m @@ -2815,6 +2931,10 @@ CONFIG_SUNDANCE=m CONFIG_NET_VENDOR_EMULEX=y CONFIG_BE2NET=m CONFIG_BE2NET_HWMON=y +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y CONFIG_NET_VENDOR_EZCHIP=y CONFIG_NET_VENDOR_FUJITSU=y CONFIG_PCMCIA_FMVJ18X=m @@ -2837,16 +2957,16 @@ CONFIG_IXGBE=m CONFIG_IXGBE_HWMON=y CONFIG_IXGBE_DCA=y CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y CONFIG_I40E=m CONFIG_I40E_DCB=y +CONFIG_IAVF=m CONFIG_I40EVF=m CONFIG_ICE=m CONFIG_FM10K=m -CONFIG_NET_VENDOR_EXAR=y -CONFIG_S2IO=m -CONFIG_VXGE=m -# CONFIG_VXGE_DEBUG_TRACE_ALL is not set +CONFIG_IGC=m CONFIG_JME=m CONFIG_NET_VENDOR_MARVELL=y CONFIG_MVMDIO=m @@ -2888,7 +3008,6 @@ CONFIG_ENCX24J600=m CONFIG_LAN743X=m CONFIG_NET_VENDOR_MICROSEMI=y CONFIG_MSCC_OCELOT_SWITCH=m -CONFIG_MSCC_OCELOT_SWITCH_OCELOT=m CONFIG_NET_VENDOR_MYRI=y CONFIG_MYRI10GE=m CONFIG_MYRI10GE_DCA=y @@ -2896,12 +3015,17 @@ CONFIG_FEALNX=m CONFIG_NET_VENDOR_NATSEMI=y CONFIG_NATSEMI=m CONFIG_NS83820=m +CONFIG_NET_VENDOR_NETERION=y +CONFIG_S2IO=m +CONFIG_VXGE=m +# CONFIG_VXGE_DEBUG_TRACE_ALL is not set CONFIG_NET_VENDOR_NETRONOME=y CONFIG_NFP=m # CONFIG_NFP_APP_FLOWER is not set CONFIG_NFP_APP_ABM_NIC=y # CONFIG_NFP_DEBUG is not set CONFIG_NET_VENDOR_NI=y +CONFIG_NI_XGE_MANAGEMENT_ENET=m CONFIG_NET_VENDOR_8390=y CONFIG_PCMCIA_AXNET=m CONFIG_NE2K_PCI=m @@ -2910,7 +3034,7 @@ CONFIG_NET_VENDOR_NVIDIA=y CONFIG_FORCEDETH=m CONFIG_NET_VENDOR_OKI=y CONFIG_ETHOC=m -CONFIG_NET_PACKET_ENGINE=y +CONFIG_NET_VENDOR_PACKET_ENGINES=y CONFIG_HAMACHI=m CONFIG_YELLOWFIN=m CONFIG_NET_VENDOR_QLOGIC=y @@ -2984,6 +3108,7 @@ CONFIG_DWC_XLGMAC_PCI=m CONFIG_NET_VENDOR_TEHUTI=y CONFIG_TEHUTI=m CONFIG_NET_VENDOR_TI=y +# CONFIG_TI_CPSW_PHY_SEL is not set CONFIG_TI_CPSW_ALE=m CONFIG_TLAN=m CONFIG_NET_VENDOR_VIA=y @@ -3009,6 +3134,7 @@ CONFIG_ROADRUNNER_LARGE_RINGS=y CONFIG_NET_SB1000=m CONFIG_MDIO_DEVICE=m CONFIG_MDIO_BUS=m +# CONFIG_MDIO_BCM_UNIMAC is not set CONFIG_MDIO_BITBANG=m CONFIG_MDIO_CAVIUM=m CONFIG_MDIO_GPIO=m @@ -3127,6 +3253,7 @@ CONFIG_USB_IPHETH=m CONFIG_USB_SIERRA_NET=m CONFIG_USB_VL600=m CONFIG_USB_NET_CH9200=m +CONFIG_USB_NET_AQC111=m CONFIG_WLAN=y CONFIG_WLAN_VENDOR_ADMTEK=y CONFIG_ADM8211=m @@ -3299,7 +3426,15 @@ CONFIG_MWL8K=m CONFIG_WLAN_VENDOR_MEDIATEK=y CONFIG_MT7601U=m CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76x02_LIB=m +CONFIG_MT76x0_COMMON=m +# CONFIG_MT76x0U is not set +CONFIG_MT76x0E=m +CONFIG_MT76x2_COMMON=m CONFIG_MT76x2E=m +# CONFIG_MT76x2U is not set +CONFIG_MT7603E=m CONFIG_WLAN_VENDOR_RALINK=y CONFIG_RT2X00=m CONFIG_RT2400PCI=m @@ -3377,11 +3512,12 @@ CONFIG_ZD1211RW=m # CONFIG_ZD1211RW_DEBUG is not set CONFIG_WLAN_VENDOR_QUANTENNA=y CONFIG_QTNFMAC=m -CONFIG_QTNFMAC_PEARL_PCIE=m +CONFIG_QTNFMAC_PCIE=m CONFIG_PCMCIA_RAYCS=m CONFIG_PCMCIA_WL3501=m # CONFIG_MAC80211_HWSIM is not set CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_VIRT_WIFI=m # # WiMAX Wireless Broadband devices @@ -3422,6 +3558,7 @@ CONFIG_IEEE802154_ADF7242=m CONFIG_IEEE802154_CA8210=m # CONFIG_IEEE802154_CA8210_DEBUGFS is not set CONFIG_IEEE802154_MCR20A=m +# CONFIG_IEEE802154_HWSIM is not set CONFIG_VMXNET3=m CONFIG_FUJITSU_ES=m CONFIG_THUNDERBOLT_NET=m @@ -3520,13 +3657,6 @@ CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m CONFIG_ISDN_DRV_AVMB1_AVM_CS=m CONFIG_ISDN_DRV_AVMB1_T1PCI=m CONFIG_ISDN_DRV_AVMB1_C4=m -CONFIG_CAPI_EICON=y -CONFIG_ISDN_DIVAS=m -CONFIG_ISDN_DIVAS_BRIPCI=y -CONFIG_ISDN_DIVAS_PRIPCI=y -CONFIG_ISDN_DIVAS_DIVACAPI=m -CONFIG_ISDN_DIVAS_USERIDI=m -CONFIG_ISDN_DIVAS_MAINT=m CONFIG_ISDN_DRV_GIGASET=m CONFIG_GIGASET_CAPI=y CONFIG_GIGASET_BASE=m @@ -3554,8 +3684,8 @@ CONFIG_MISDN_IPAC=m CONFIG_MISDN_ISAR=m CONFIG_ISDN_HDLC=m CONFIG_NVM=y -# CONFIG_NVM_DEBUG is not set CONFIG_NVM_PBLK=m +# CONFIG_NVM_PBLK_DEBUG is not set # # Input device support @@ -3686,10 +3816,12 @@ CONFIG_TOUCHSCREEN_AD7877=m CONFIG_TOUCHSCREEN_AD7879=m CONFIG_TOUCHSCREEN_AD7879_I2C=m CONFIG_TOUCHSCREEN_AD7879_SPI=m +# CONFIG_TOUCHSCREEN_ADC is not set CONFIG_TOUCHSCREEN_ATMEL_MXT=m # CONFIG_TOUCHSCREEN_ATMEL_MXT_T37 is not set CONFIG_TOUCHSCREEN_AUO_PIXCIR=m CONFIG_TOUCHSCREEN_BU21013=m +# CONFIG_TOUCHSCREEN_BU21029 is not set CONFIG_TOUCHSCREEN_CHIPONE_ICN8505=m CONFIG_TOUCHSCREEN_CY8CTMG110=m CONFIG_TOUCHSCREEN_CYTTSP_CORE=m @@ -3783,6 +3915,7 @@ CONFIG_INPUT_AD714X_SPI=m CONFIG_INPUT_ARIZONA_HAPTICS=m CONFIG_INPUT_BMA150=m CONFIG_INPUT_E3X0_BUTTON=m +CONFIG_INPUT_MSM_VIBRATOR=m CONFIG_INPUT_PCSPKR=m CONFIG_INPUT_MAX77693_HAPTIC=m CONFIG_INPUT_MC13783_PWRBUTTON=m @@ -3853,6 +3986,7 @@ CONFIG_SERIO_RAW=m CONFIG_SERIO_ALTERA_PS2=m CONFIG_SERIO_PS2MULT=m CONFIG_SERIO_ARC_PS2=m +CONFIG_SERIO_OLPC_APSP=m CONFIG_HYPERV_KEYBOARD=m CONFIG_SERIO_GPIO_PS2=m CONFIG_USERIO=m @@ -3890,6 +4024,7 @@ CONFIG_N_HDLC=m CONFIG_N_GSM=m CONFIG_TRACE_ROUTER=m CONFIG_TRACE_SINK=m +CONFIG_LDISC_AUTOLOAD=y CONFIG_DEVMEM=y # CONFIG_DEVKMEM is not set @@ -3954,6 +4089,7 @@ CONFIG_HVC_DRIVER=y CONFIG_VIRTIO_CONSOLE=m CONFIG_IPMI_HANDLER=m CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y CONFIG_IPMI_PANIC_EVENT=y CONFIG_IPMI_PANIC_STRING=y CONFIG_IPMI_DEVICE_INTERFACE=m @@ -3968,7 +4104,6 @@ CONFIG_HW_RANDOM_AMD=m CONFIG_HW_RANDOM_VIA=m CONFIG_HW_RANDOM_VIRTIO=m CONFIG_NVRAM=m -CONFIG_R3964=m CONFIG_APPLICOM=m # @@ -4006,6 +4141,7 @@ CONFIG_TELCLOCK=m CONFIG_DEVPORT=y CONFIG_XILLYBUS=m CONFIG_XILLYBUS_PCIE=m +# CONFIG_RANDOM_TRUST_CPU is not set # # I2C support @@ -4049,6 +4185,7 @@ CONFIG_I2C_ISMT=m CONFIG_I2C_PIIX4=m CONFIG_I2C_NFORCE2=m CONFIG_I2C_NFORCE2_S4985=m +CONFIG_I2C_NVIDIA_GPU=m CONFIG_I2C_SIS5595=m CONFIG_I2C_SIS630=m CONFIG_I2C_SIS96X=m @@ -4101,6 +4238,9 @@ CONFIG_I2C_SLAVE_EEPROM=m # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set # CONFIG_I2C_DEBUG_BUS is not set +CONFIG_I3C=m +CONFIG_CDNS_I3C_MASTER=m +CONFIG_DW_I3C_MASTER=m CONFIG_SPI=y # CONFIG_SPI_DEBUG is not set CONFIG_SPI_MASTER=y @@ -4119,6 +4259,7 @@ CONFIG_SPI_DW_PCI=m CONFIG_SPI_DW_MID_DMA=y CONFIG_SPI_DW_MMIO=m CONFIG_SPI_DLN2=m +CONFIG_SPI_NXP_FLEXSPI=m CONFIG_SPI_GPIO=m CONFIG_SPI_LM70_LLP=m CONFIG_SPI_OC_TINY=m @@ -4126,6 +4267,8 @@ CONFIG_SPI_PXA2XX=m CONFIG_SPI_PXA2XX_PCI=m CONFIG_SPI_ROCKCHIP=m CONFIG_SPI_SC18IS602=m +CONFIG_SPI_SIFIVE=m +CONFIG_SPI_MXIC=m CONFIG_SPI_XCOMM=m CONFIG_SPI_XILINX=m CONFIG_SPI_ZYNQMP_GQSPI=m @@ -4153,6 +4296,7 @@ CONFIG_HSI_BOARDINFO=y CONFIG_HSI_CHAR=m CONFIG_PPS=y # CONFIG_PPS_DEBUG is not set +# CONFIG_NTP_PPS is not set # # PPS clients support @@ -4187,6 +4331,7 @@ CONFIG_PINCTRL_CANNONLAKE=m CONFIG_PINCTRL_CEDARFORK=m CONFIG_PINCTRL_DENVERTON=m CONFIG_PINCTRL_GEMINILAKE=m +# CONFIG_PINCTRL_ICELAKE is not set CONFIG_PINCTRL_LEWISBURG=m CONFIG_PINCTRL_SUNRISEPOINT=m CONFIG_GPIOLIB=y @@ -4210,7 +4355,9 @@ CONFIG_GPIO_LYNXPOINT=y CONFIG_GPIO_MB86S7X=m CONFIG_GPIO_MENZ127=m CONFIG_GPIO_MOCKUP=m +CONFIG_GPIO_SIOX=m CONFIG_GPIO_VX855=m +CONFIG_GPIO_AMD_FCH=m # # Port-mapped I/O GPIO drivers @@ -4245,6 +4392,7 @@ CONFIG_GPIO_LP3943=m CONFIG_GPIO_LP873X=m CONFIG_GPIO_TPS65086=m CONFIG_GPIO_TPS65912=m +CONFIG_GPIO_TQMX86=m CONFIG_GPIO_UCB1400=m CONFIG_GPIO_WHISKEY_COVE=m CONFIG_GPIO_WM831X=m @@ -4300,7 +4448,6 @@ CONFIG_W1_SLAVE_DS2431=m CONFIG_W1_SLAVE_DS2433=m CONFIG_W1_SLAVE_DS2433_CRC=y CONFIG_W1_SLAVE_DS2438=m -CONFIG_W1_SLAVE_DS2760=m CONFIG_W1_SLAVE_DS2780=m CONFIG_W1_SLAVE_DS2781=m CONFIG_W1_SLAVE_DS28E04=m @@ -4315,6 +4462,7 @@ CONFIG_GENERIC_ADC_BATTERY=m CONFIG_WM831X_BACKUP=m CONFIG_WM831X_POWER=m # CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set CONFIG_BATTERY_DS2760=m CONFIG_BATTERY_DS2780=m CONFIG_BATTERY_DS2781=m @@ -4355,6 +4503,7 @@ CONFIG_CHARGER_SMB347=m CONFIG_BATTERY_GAUGE_LTC2941=m CONFIG_BATTERY_RT5033=m CONFIG_CHARGER_RT9455=m +# CONFIG_CHARGER_CROS_USBPD is not set CONFIG_HWMON=m CONFIG_HWMON_VID=m # CONFIG_HWMON_DEBUG_CHIP is not set @@ -4434,6 +4583,7 @@ CONFIG_SENSORS_MAX6650=m CONFIG_SENSORS_MAX6697=m CONFIG_SENSORS_MAX31790=m CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_MLXREG_FAN is not set CONFIG_SENSORS_TC654=m CONFIG_SENSORS_MENF21BMC_HWMON=m CONFIG_SENSORS_ADCXX=m @@ -4460,6 +4610,9 @@ CONFIG_SENSORS_NCT6683=m CONFIG_SENSORS_NCT6775=m CONFIG_SENSORS_NCT7802=m CONFIG_SENSORS_NCT7904=m +# CONFIG_SENSORS_NPCM7XX is not set +CONFIG_SENSORS_OCC_P8_I2C=m +CONFIG_SENSORS_OCC=y CONFIG_SENSORS_PCF8591=m CONFIG_PMBUS=m CONFIG_SENSORS_PMBUS=m @@ -4551,6 +4704,10 @@ CONFIG_THERMAL_GOV_USER_SPACE=y CONFIG_CLOCK_THERMAL=y CONFIG_DEVFREQ_THERMAL=y # CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# CONFIG_INTEL_POWERCLAMP=m CONFIG_X86_PKG_TEMP_THERMAL=m CONFIG_INTEL_SOC_DTS_IOSF_CORE=m @@ -4580,11 +4737,13 @@ CONFIG_DA9052_WATCHDOG=m CONFIG_DA9063_WATCHDOG=m CONFIG_DA9062_WATCHDOG=m CONFIG_MENF21BMC_WATCHDOG=m +# CONFIG_MENZ069_WATCHDOG is not set CONFIG_WDAT_WDT=m CONFIG_WM831X_WATCHDOG=m CONFIG_XILINX_WATCHDOG=m CONFIG_ZIIRAVE_WATCHDOG=m CONFIG_RAVE_SP_WATCHDOG=m +CONFIG_MLX_WDT=m CONFIG_CADENCE_WATCHDOG=m CONFIG_DW_WATCHDOG=m CONFIG_MAX63XX_WATCHDOG=m @@ -4617,6 +4776,7 @@ CONFIG_60XX_WDT=m CONFIG_CPU5_WDT=m CONFIG_SMSC_SCH311X_WDT=m CONFIG_SMSC37B787_WDT=m +CONFIG_TQMX86_WDT=m CONFIG_VIA_WDT=m CONFIG_W83627HF_WDT=m CONFIG_W83877F_WDT=m @@ -4658,7 +4818,6 @@ CONFIG_SSB_PCMCIAHOST_POSSIBLE=y CONFIG_SSB_PCMCIAHOST=y CONFIG_SSB_SDIOHOST_POSSIBLE=y CONFIG_SSB_SDIOHOST=y -# CONFIG_SSB_DEBUG is not set CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y CONFIG_SSB_DRIVER_PCICORE=y CONFIG_SSB_DRIVER_GPIO=y @@ -4683,9 +4842,8 @@ CONFIG_MFD_BD9571MWV=m CONFIG_MFD_AXP20X=m CONFIG_MFD_AXP20X_I2C=m CONFIG_MFD_CROS_EC=m -CONFIG_MFD_CROS_EC_I2C=m -CONFIG_MFD_CROS_EC_SPI=m # CONFIG_MFD_CROS_EC_CHARDEV is not set +# CONFIG_MFD_MADERA is not set CONFIG_PMIC_DA9052=y CONFIG_MFD_DA9052_SPI=y CONFIG_MFD_DA9062=m @@ -4741,6 +4899,7 @@ CONFIG_MFD_TPS65912_I2C=m CONFIG_MFD_TPS65912_SPI=y CONFIG_MFD_WL1273_CORE=m CONFIG_MFD_LM3533=m +CONFIG_MFD_TQMX86=m CONFIG_MFD_VX855=m CONFIG_MFD_ARIZONA=y CONFIG_MFD_ARIZONA_I2C=m @@ -4836,6 +4995,7 @@ CONFIG_IR_SHARP_DECODER=m CONFIG_IR_MCE_KBD_DECODER=m CONFIG_IR_XMP_DECODER=m CONFIG_IR_IMON_DECODER=m +CONFIG_IR_RCMM_DECODER=m CONFIG_RC_DEVICES=y CONFIG_RC_ATI_REMOTE=m CONFIG_IR_ENE=m @@ -4855,6 +5015,7 @@ CONFIG_RC_LOOPBACK=m CONFIG_IR_SERIAL=m CONFIG_IR_SERIAL_TRANSMITTER=y CONFIG_IR_SIR=m +CONFIG_RC_XBOX_DVD=m CONFIG_MEDIA_SUPPORT=m # @@ -4870,12 +5031,12 @@ CONFIG_MEDIA_CEC_RC=y # CONFIG_CEC_PIN_ERROR_INJ is not set CONFIG_MEDIA_CONTROLLER=y CONFIG_MEDIA_CONTROLLER_DVB=y +CONFIG_MEDIA_CONTROLLER_REQUEST_API=y CONFIG_VIDEO_DEV=m CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_VIDEO_V4L2=m # CONFIG_VIDEO_ADV_DEBUG is not set # CONFIG_VIDEO_FIXED_MINOR_RANGES is not set -CONFIG_VIDEO_PCI_SKELETON=m CONFIG_VIDEO_TUNER=m CONFIG_V4L2_MEM2MEM_DEV=m CONFIG_V4L2_FLASH_LED_CLASS=m @@ -5019,7 +5180,6 @@ CONFIG_DVB_USB_PCTV452E=m CONFIG_DVB_USB_DW2102=m CONFIG_DVB_USB_CINERGY_T2=m CONFIG_DVB_USB_DTV5100=m -CONFIG_DVB_USB_FRIIO=m CONFIG_DVB_USB_AZ6027=m CONFIG_DVB_USB_TECHNISAT_USB2=m CONFIG_DVB_USB_V2=m @@ -5082,6 +5242,7 @@ CONFIG_VIDEO_IVTV=m # CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set CONFIG_VIDEO_IVTV_ALSA=m CONFIG_VIDEO_FB_IVTV=m +# CONFIG_VIDEO_FB_IVTV_FORCE_PAT is not set CONFIG_VIDEO_HEXIUM_GEMINI=m CONFIG_VIDEO_HEXIUM_ORION=m CONFIG_VIDEO_MXB=m @@ -5144,15 +5305,17 @@ CONFIG_VIDEO_CAFE_CCIC=m CONFIG_VIDEO_CADENCE=y CONFIG_VIDEO_CADENCE_CSI2RX=m CONFIG_VIDEO_CADENCE_CSI2TX=m -CONFIG_SOC_CAMERA=m -CONFIG_SOC_CAMERA_PLATFORM=m +CONFIG_VIDEO_ASPEED=m CONFIG_V4L_MEM2MEM_DRIVERS=y CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m CONFIG_VIDEO_SH_VEU=m # CONFIG_V4L_TEST_DRIVERS is not set CONFIG_DVB_PLATFORM_DRIVERS=y CONFIG_CEC_PLATFORM_DRIVERS=y +# CONFIG_VIDEO_CROS_EC_CEC is not set CONFIG_CEC_GPIO=m +CONFIG_VIDEO_SECO_CEC=m +# CONFIG_VIDEO_SECO_RC is not set CONFIG_SDR_PLATFORM_DRIVERS=y # @@ -5279,7 +5442,6 @@ CONFIG_VIDEO_ADV7175=m CONFIG_VIDEO_OV2640=m CONFIG_VIDEO_OV7640=m CONFIG_VIDEO_OV7670=m -CONFIG_VIDEO_MT9M111=m CONFIG_VIDEO_MT9V011=m # @@ -5307,24 +5469,6 @@ CONFIG_VIDEO_SAA6752HS=m CONFIG_VIDEO_M52790=m # -# Sensors used on soc_camera driver -# - -# -# soc_camera sensor drivers -# -CONFIG_SOC_CAMERA_MT9M001=m -CONFIG_SOC_CAMERA_MT9M111=m -CONFIG_SOC_CAMERA_MT9T112=m -CONFIG_SOC_CAMERA_MT9V022=m -CONFIG_SOC_CAMERA_OV5642=m -CONFIG_SOC_CAMERA_OV772X=m -CONFIG_SOC_CAMERA_OV9640=m -CONFIG_SOC_CAMERA_OV9740=m -CONFIG_SOC_CAMERA_RJ54N1=m -CONFIG_SOC_CAMERA_TW9910=m - -# # Media SPI Adapters # CONFIG_CXD2880_SPI_DRV=m @@ -5547,6 +5691,7 @@ CONFIG_DRM_KMS_FB_HELPER=y CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_FBDEV_OVERALLOC=100 # CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +# CONFIG_DRM_DP_CEC is not set CONFIG_DRM_TTM=m CONFIG_DRM_GEM_CMA_HELPER=y CONFIG_DRM_KMS_CMA_HELPER=y @@ -5560,6 +5705,10 @@ CONFIG_DRM_I2C_CH7006=m CONFIG_DRM_I2C_SIL164=m CONFIG_DRM_I2C_NXP_TDA998X=m CONFIG_DRM_I2C_NXP_TDA9950=m + +# +# ARM devices +# CONFIG_DRM_RADEON=m # CONFIG_DRM_RADEON_USERPTR is not set CONFIG_DRM_AMDGPU=m @@ -5577,9 +5726,10 @@ CONFIG_DRM_AMD_ACP=y # Display Engine Configuration # CONFIG_DRM_AMD_DC=y -# CONFIG_DRM_AMD_DC_FBC is not set CONFIG_DRM_AMD_DC_DCN1_0=y +CONFIG_DRM_AMD_DC_DCN1_01=y # CONFIG_DEBUG_KERNEL_DC is not set +CONFIG_HSA_AMD=y # # AMD Library routines @@ -5588,6 +5738,7 @@ CONFIG_CHASH=m # CONFIG_CHASH_STATS is not set # CONFIG_CHASH_SELFTEST is not set CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y CONFIG_NOUVEAU_DEBUG=5 CONFIG_NOUVEAU_DEBUG_DEFAULT=3 # CONFIG_NOUVEAU_DEBUG_MMU is not set @@ -5600,6 +5751,7 @@ CONFIG_DRM_I915_USERPTR=y CONFIG_DRM_I915_GVT=y CONFIG_DRM_I915_GVT_KVMGT=m CONFIG_DRM_VGEM=m +# CONFIG_DRM_VKMS is not set CONFIG_DRM_VMWGFX=m CONFIG_DRM_VMWGFX_FBCON=y CONFIG_DRM_GMA500=m @@ -5625,11 +5777,14 @@ CONFIG_DRM_PANEL_BRIDGE=y # Display Interface Bridges # CONFIG_DRM_ANALOGIX_ANX78XX=m -CONFIG_HSA_AMD=m +CONFIG_DRM_ETNAVIV=m +CONFIG_DRM_ETNAVIV_THERMAL=y CONFIG_DRM_HISI_HIBMC=m CONFIG_DRM_TINYDRM=m CONFIG_TINYDRM_MIPI_DBI=m +CONFIG_TINYDRM_HX8357D=m CONFIG_TINYDRM_ILI9225=m +CONFIG_TINYDRM_ILI9341=m CONFIG_TINYDRM_MI0283QT=m CONFIG_TINYDRM_REPAPER=m CONFIG_TINYDRM_ST7586=m @@ -5641,10 +5796,10 @@ CONFIG_DRM_LIB_RANDOM=y # # Frame buffer Devices # -CONFIG_FB=y -CONFIG_FIRMWARE_EDID=y CONFIG_FB_CMDLINE=y CONFIG_FB_NOTIFY=y +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y CONFIG_FB_BOOT_VESA_SUPPORT=y CONFIG_FB_CFB_FILLRECT=y CONFIG_FB_CFB_COPYAREA=y @@ -5658,7 +5813,7 @@ CONFIG_FB_BOTH_ENDIAN=y # CONFIG_FB_LITTLE_ENDIAN is not set CONFIG_FB_SYS_FOPS=m CONFIG_FB_DEFERRED_IO=y -CONFIG_FB_BACKLIGHT=y +CONFIG_FB_BACKLIGHT=m CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y @@ -5707,7 +5862,6 @@ CONFIG_FB_EFI=y # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set # CONFIG_FB_MB862XX is not set -# CONFIG_FB_BROADSHEET is not set # CONFIG_FB_HYPERV is not set CONFIG_FB_SIMPLE=y # CONFIG_FB_SM712 is not set @@ -5721,8 +5875,6 @@ CONFIG_LCD_ILI9320=m CONFIG_LCD_TDO24M=m CONFIG_LCD_VGG2432A4=m CONFIG_LCD_PLATFORM=m -CONFIG_LCD_S6E63M0=m -CONFIG_LCD_LD9040=m CONFIG_LCD_AMS369FG06=m CONFIG_LCD_LMS501KF03=m CONFIG_LCD_HX8357=m @@ -5763,6 +5915,7 @@ CONFIG_DUMMY_CONSOLE_ROWS=25 CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set CONFIG_LOGO=y CONFIG_LOGO_LINUX_MONO=y CONFIG_LOGO_LINUX_VGA16=y @@ -5923,6 +6076,7 @@ CONFIG_SND_HDA_GENERIC=m CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 CONFIG_SND_HDA_CORE=m CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y CONFIG_SND_HDA_I915=y CONFIG_SND_HDA_EXT_CORE=m CONFIG_SND_HDA_PREALLOC_SIZE=64 @@ -5964,6 +6118,7 @@ CONFIG_SND_SOC_ACPI=m CONFIG_SND_SOC_AMD_ACP=m CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH=m CONFIG_SND_SOC_AMD_CZ_RT5645_MACH=m +CONFIG_SND_SOC_AMD_ACP3x=m CONFIG_SND_ATMEL_SOC=m CONFIG_SND_DESIGNWARE_I2S=m CONFIG_SND_DESIGNWARE_PCM=y @@ -5980,6 +6135,7 @@ CONFIG_SND_SOC_FSL_SAI=m CONFIG_SND_SOC_FSL_SSI=m CONFIG_SND_SOC_FSL_SPDIF=m CONFIG_SND_SOC_FSL_ESAI=m +CONFIG_SND_SOC_FSL_MICFIL=m CONFIG_SND_SOC_IMX_AUDMUX=m CONFIG_SND_I2S_HI6210_I2S=m CONFIG_SND_SOC_IMG=y @@ -6000,8 +6156,17 @@ CONFIG_SND_SOC_INTEL_HASWELL=m CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI=m CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=m -CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m CONFIG_SND_SOC_INTEL_SKYLAKE=m +CONFIG_SND_SOC_INTEL_SKL=m +CONFIG_SND_SOC_INTEL_APL=m +CONFIG_SND_SOC_INTEL_KBL=m +CONFIG_SND_SOC_INTEL_GLK=m +CONFIG_SND_SOC_INTEL_CNL=m +CONFIG_SND_SOC_INTEL_CFL=m +CONFIG_SND_SOC_INTEL_SKYLAKE_FAMILY=m +CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m +# CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC is not set +CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON=m CONFIG_SND_SOC_ACPI_INTEL_MATCH=m CONFIG_SND_SOC_INTEL_MACH=y CONFIG_SND_SOC_INTEL_HASWELL_MACH=m @@ -6024,10 +6189,17 @@ CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH=m +CONFIG_SND_SOC_INTEL_KBL_RT5660_MACH=m +# CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH is not set +CONFIG_SND_SOC_MTK_BTCVSD=m # # STMicroelectronics STM32 SOC audio support # +CONFIG_SND_SOC_XILINX_I2S=m +CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=m +CONFIG_SND_SOC_XILINX_SPDIF=m CONFIG_SND_SOC_XTFPGA_I2S=m CONFIG_ZX_TDM=m CONFIG_SND_SOC_I2C_AND_SPI=m @@ -6044,6 +6216,7 @@ CONFIG_SND_SOC_ADAU1761_I2C=m CONFIG_SND_SOC_ADAU1761_SPI=m CONFIG_SND_SOC_ADAU7002=m CONFIG_SND_SOC_AK4104=m +CONFIG_SND_SOC_AK4118=m CONFIG_SND_SOC_AK4458=m CONFIG_SND_SOC_AK4554=m CONFIG_SND_SOC_AK4613=m @@ -6053,10 +6226,12 @@ CONFIG_SND_SOC_AK5558=m CONFIG_SND_SOC_ALC5623=m CONFIG_SND_SOC_BD28623=m # CONFIG_SND_SOC_BT_SCO is not set +CONFIG_SND_SOC_CROS_EC_CODEC=m CONFIG_SND_SOC_CS35L32=m CONFIG_SND_SOC_CS35L33=m CONFIG_SND_SOC_CS35L34=m CONFIG_SND_SOC_CS35L35=m +CONFIG_SND_SOC_CS35L36=m CONFIG_SND_SOC_CS42L42=m CONFIG_SND_SOC_CS42L51=m CONFIG_SND_SOC_CS42L51_I2C=m @@ -6071,14 +6246,15 @@ CONFIG_SND_SOC_CS4271_SPI=m CONFIG_SND_SOC_CS42XX8=m CONFIG_SND_SOC_CS42XX8_I2C=m CONFIG_SND_SOC_CS43130=m +CONFIG_SND_SOC_CS4341=m CONFIG_SND_SOC_CS4349=m CONFIG_SND_SOC_CS53L30=m CONFIG_SND_SOC_DA7213=m CONFIG_SND_SOC_DA7219=m -CONFIG_SND_SOC_DIO2125=m CONFIG_SND_SOC_DMIC=m CONFIG_SND_SOC_HDMI_CODEC=m CONFIG_SND_SOC_ES7134=m +# CONFIG_SND_SOC_ES7241 is not set CONFIG_SND_SOC_ES8316=m CONFIG_SND_SOC_ES8328=m CONFIG_SND_SOC_ES8328_I2C=m @@ -6086,6 +6262,7 @@ CONFIG_SND_SOC_ES8328_SPI=m CONFIG_SND_SOC_GTM601=m CONFIG_SND_SOC_HDAC_HDMI=m CONFIG_SND_SOC_INNO_RK3036=m +CONFIG_SND_SOC_MAX98088=m CONFIG_SND_SOC_MAX98090=m CONFIG_SND_SOC_MAX98357A=m CONFIG_SND_SOC_MAX98504=m @@ -6104,12 +6281,16 @@ CONFIG_SND_SOC_PCM179X_SPI=m CONFIG_SND_SOC_PCM186X=m CONFIG_SND_SOC_PCM186X_I2C=m CONFIG_SND_SOC_PCM186X_SPI=m +CONFIG_SND_SOC_PCM3060=m +CONFIG_SND_SOC_PCM3060_I2C=m +CONFIG_SND_SOC_PCM3060_SPI=m CONFIG_SND_SOC_PCM3168A=m CONFIG_SND_SOC_PCM3168A_I2C=m CONFIG_SND_SOC_PCM3168A_SPI=m CONFIG_SND_SOC_PCM512x=m CONFIG_SND_SOC_PCM512x_I2C=m CONFIG_SND_SOC_PCM512x_SPI=m +CONFIG_SND_SOC_RK3328=m CONFIG_SND_SOC_RL6231=m CONFIG_SND_SOC_RL6347A=m CONFIG_SND_SOC_RT286=m @@ -6121,6 +6302,7 @@ CONFIG_SND_SOC_RT5631=m CONFIG_SND_SOC_RT5640=m CONFIG_SND_SOC_RT5645=m CONFIG_SND_SOC_RT5651=m +CONFIG_SND_SOC_RT5660=m CONFIG_SND_SOC_RT5663=m CONFIG_SND_SOC_RT5670=m CONFIG_SND_SOC_RT5677=m @@ -6130,6 +6312,7 @@ CONFIG_SND_SOC_SI476X=m CONFIG_SND_SOC_SIGMADSP=m CONFIG_SND_SOC_SIGMADSP_I2C=m CONFIG_SND_SOC_SIGMADSP_REGMAP=m +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set CONFIG_SND_SOC_SIRF_AUDIO_CODEC=m CONFIG_SND_SOC_SPDIF=m CONFIG_SND_SOC_SSM2305=m @@ -6158,6 +6341,7 @@ CONFIG_SND_SOC_TLV320AIC3X=m CONFIG_SND_SOC_TS3A227E=m CONFIG_SND_SOC_TSCS42XX=m CONFIG_SND_SOC_TSCS454=m +CONFIG_SND_SOC_WCD9335=m CONFIG_SND_SOC_WM8510=m CONFIG_SND_SOC_WM8523=m CONFIG_SND_SOC_WM8524=m @@ -6176,6 +6360,7 @@ CONFIG_SND_SOC_WM8804=m CONFIG_SND_SOC_WM8804_I2C=m CONFIG_SND_SOC_WM8804_SPI=m CONFIG_SND_SOC_WM8903=m +CONFIG_SND_SOC_WM8904=m CONFIG_SND_SOC_WM8960=m CONFIG_SND_SOC_WM8962=m CONFIG_SND_SOC_WM8974=m @@ -6184,8 +6369,10 @@ CONFIG_SND_SOC_WM8985=m CONFIG_SND_SOC_ZX_AUD96P22=m CONFIG_SND_SOC_MAX9759=m CONFIG_SND_SOC_MT6351=m +CONFIG_SND_SOC_MT6358=m CONFIG_SND_SOC_NAU8540=m CONFIG_SND_SOC_NAU8810=m +CONFIG_SND_SOC_NAU8822=m CONFIG_SND_SOC_NAU8824=m CONFIG_SND_SOC_NAU8825=m CONFIG_SND_SOC_TPA6130A2=m @@ -6218,9 +6405,11 @@ CONFIG_HID_ASUS=m CONFIG_HID_AUREAL=m CONFIG_HID_BELKIN=m CONFIG_HID_BETOP_FF=m +CONFIG_HID_BIGBEN_FF=m CONFIG_HID_CHERRY=m CONFIG_HID_CHICONY=m CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set CONFIG_HID_PRODIKEYS=m CONFIG_HID_CMEDIA=m CONFIG_HID_CP2112=m @@ -6242,6 +6431,7 @@ CONFIG_HID_KEYTOUCH=m CONFIG_HID_KYE=m CONFIG_HID_UCLOGIC=m CONFIG_HID_WALTOP=m +CONFIG_HID_VIEWSONIC=m CONFIG_HID_GYRATION=m CONFIG_HID_ICADE=m CONFIG_HID_ITE=m @@ -6259,6 +6449,7 @@ CONFIG_LOGIRUMBLEPAD2_FF=y CONFIG_LOGIG940_FF=y CONFIG_LOGIWHEELS_FF=y CONFIG_HID_MAGICMOUSE=m +CONFIG_HID_MALTRON=m CONFIG_HID_MAYFLASH=m CONFIG_HID_REDRAGON=m CONFIG_HID_MICROSOFT=m @@ -6345,6 +6536,7 @@ CONFIG_USB_OTG=y # CONFIG_USB_OTG_BLACKLIST_HUB is not set CONFIG_USB_OTG_FSM=m CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 CONFIG_USB_MON=m CONFIG_USB_WUSB=m CONFIG_USB_WUSB_CBAF=m @@ -6362,6 +6554,7 @@ CONFIG_USB_EHCI_HCD=m CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_TT_NEWSCHED=y CONFIG_USB_EHCI_PCI=m +CONFIG_USB_EHCI_FSL=m CONFIG_USB_EHCI_HCD_PLATFORM=m CONFIG_USB_OXU210HP_HCD=m CONFIG_USB_ISP116X_HCD=m @@ -6451,6 +6644,7 @@ CONFIG_USB_DWC3_DUAL_ROLE=y # Platform Glue Driver Support # CONFIG_USB_DWC3_PCI=m +CONFIG_USB_DWC3_HAPS=m CONFIG_USB_DWC2=m # CONFIG_USB_DWC2_HOST is not set @@ -6685,8 +6879,11 @@ CONFIG_USB_G_HID=m CONFIG_USB_G_WEBCAM=m CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m CONFIG_TYPEC_FUSB302=m CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_CCG=m CONFIG_UCSI_ACPI=m CONFIG_TYPEC_TPS6598X=m @@ -6694,10 +6891,15 @@ CONFIG_TYPEC_TPS6598X=m # USB Type-C Multiplexer/DeMultiplexer Switch support # CONFIG_TYPEC_MUX_PI3USB30532=m + +# +# USB Type-C Alternate Mode drivers +# +# CONFIG_TYPEC_DP_ALTMODE is not set +CONFIG_USB_ROLE_SWITCH=m CONFIG_USB_ROLES_INTEL_XHCI=m CONFIG_USB_LED_TRIG=y CONFIG_USB_ULPI_BUS=m -CONFIG_USB_ROLE_SWITCH=m CONFIG_UWB=m CONFIG_UWB_HWA=m CONFIG_UWB_WHCI=m @@ -6719,6 +6921,7 @@ CONFIG_MMC_SDHCI_ACPI=m CONFIG_MMC_SDHCI_PLTFM=m CONFIG_MMC_SDHCI_F_SDH30=m CONFIG_MMC_WBSD=m +CONFIG_MMC_ALCOR=m CONFIG_MMC_TIFM_SD=m CONFIG_MMC_SPI=m CONFIG_MMC_SDRICOH_CS=m @@ -6825,6 +7028,8 @@ CONFIG_LEDS_TRIGGER_TRANSIENT=m CONFIG_LEDS_TRIGGER_CAMERA=m CONFIG_LEDS_TRIGGER_PANIC=y CONFIG_LEDS_TRIGGER_NETDEV=m +CONFIG_LEDS_TRIGGER_PATTERN=m +CONFIG_LEDS_TRIGGER_AUDIO=m CONFIG_ACCESSIBILITY=y CONFIG_A11Y_BRAILLE_CONSOLE=y CONFIG_INFINIBAND=m @@ -6849,6 +7054,13 @@ CONFIG_INFINIBAND_NES=m CONFIG_INFINIBAND_OCRDMA=m CONFIG_INFINIBAND_VMWARE_PVRDMA=m CONFIG_INFINIBAND_USNIC=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_HFI1=m +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set +# CONFIG_SDMA_VERBOSITY is not set +CONFIG_INFINIBAND_QEDR=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_IPOIB_CM=y CONFIG_INFINIBAND_IPOIB_DEBUG=y @@ -6858,13 +7070,6 @@ CONFIG_INFINIBAND_SRPT=m CONFIG_INFINIBAND_ISER=m CONFIG_INFINIBAND_ISERT=m CONFIG_INFINIBAND_OPA_VNIC=m -CONFIG_INFINIBAND_RDMAVT=m -CONFIG_RDMA_RXE=m -CONFIG_INFINIBAND_HFI1=m -# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set -# CONFIG_SDMA_VERBOSITY is not set -CONFIG_INFINIBAND_QEDR=m -CONFIG_INFINIBAND_BNXT_RE=m CONFIG_EDAC_ATOMIC_SCRUB=y CONFIG_EDAC_SUPPORT=y CONFIG_EDAC=y @@ -6887,6 +7092,7 @@ CONFIG_EDAC_I5100=m CONFIG_EDAC_I7300=m CONFIG_EDAC_SBRIDGE=m CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m CONFIG_EDAC_PND2=m CONFIG_RTC_LIB=y CONFIG_RTC_MC146818_LIB=y @@ -6912,9 +7118,9 @@ CONFIG_RTC_INTF_DEV_UIE_EMUL=y # CONFIG_RTC_DRV_88PM80X=m CONFIG_RTC_DRV_ABB5ZES3=m +CONFIG_RTC_DRV_ABEOZ9=m CONFIG_RTC_DRV_ABX80X=m CONFIG_RTC_DRV_DS1307=m -CONFIG_RTC_DRV_DS1307_HWMON=y # CONFIG_RTC_DRV_DS1307_CENTURY is not set CONFIG_RTC_DRV_DS1374=m CONFIG_RTC_DRV_DS1374_WDT=y @@ -6939,7 +7145,9 @@ CONFIG_RTC_DRV_RX8010=m CONFIG_RTC_DRV_RX8581=m CONFIG_RTC_DRV_RX8025=m CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV3028=m CONFIG_RTC_DRV_RV8803=m +CONFIG_RTC_DRV_SD3078=m # # SPI RTC drivers @@ -6983,8 +7191,6 @@ CONFIG_RTC_DRV_DS1685=y # CONFIG_RTC_DRV_DS17285 is not set # CONFIG_RTC_DRV_DS17485 is not set # CONFIG_RTC_DRV_DS17885 is not set -CONFIG_RTC_DS1685_PROC_REGS=y -CONFIG_RTC_DS1685_SYSFS_REGS=y CONFIG_RTC_DRV_DS1742=m CONFIG_RTC_DRV_DS2404=m CONFIG_RTC_DRV_DA9052=m @@ -7013,6 +7219,7 @@ CONFIG_RTC_DRV_MT6397=m # HID Sensor RTC drivers # CONFIG_RTC_DRV_HID_SENSOR_TIME=m +CONFIG_RTC_DRV_WILCO_EC=m CONFIG_DMADEVICES=y # CONFIG_DMADEVICES_DEBUG is not set @@ -7045,6 +7252,7 @@ CONFIG_DMA_ENGINE_RAID=y # CONFIG_SYNC_FILE=y # CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set CONFIG_DCA=m CONFIG_AUXDISPLAY=y CONFIG_HD44780=m @@ -7054,10 +7262,14 @@ CONFIG_KS0108_DELAY=2 CONFIG_CFAG12864B=m CONFIG_CFAG12864B_RATE=20 CONFIG_IMG_ASCII_LCD=m -CONFIG_PANEL=m +CONFIG_PARPORT_PANEL=m CONFIG_PANEL_PARPORT=0 CONFIG_PANEL_PROFILE=5 # CONFIG_PANEL_CHANGE_MESSAGE is not set +# CONFIG_CHARLCD_BL_OFF is not set +CONFIG_CHARLCD_BL_ON=y +# CONFIG_CHARLCD_BL_FLASH is not set +CONFIG_PANEL=m CONFIG_CHARLCD=m CONFIG_UIO=m CONFIG_UIO_CIF=m @@ -7110,7 +7322,6 @@ CONFIG_COMEDI_MISC_DRIVERS=y CONFIG_COMEDI_BOND=m CONFIG_COMEDI_TEST=m CONFIG_COMEDI_PARPORT=m -CONFIG_COMEDI_SERIAL2002=m CONFIG_COMEDI_ISA_DRIVERS=y CONFIG_COMEDI_PCL711=m CONFIG_COMEDI_PCL724=m @@ -7240,6 +7451,7 @@ CONFIG_COMEDI_ISADMA=m CONFIG_COMEDI_NI_LABPC=m CONFIG_COMEDI_NI_LABPC_ISADMA=m CONFIG_COMEDI_NI_TIO=m +CONFIG_COMEDI_NI_ROUTING=m CONFIG_RTL8192U=m CONFIG_RTLLIB=m CONFIG_RTLLIB_CRYPTO_CCMP=m @@ -7269,9 +7481,6 @@ CONFIG_ADIS16240=m # # Analog to digital converters # -CONFIG_AD7606=m -CONFIG_AD7606_IFACE_PARALLEL=m -CONFIG_AD7606_IFACE_SPI=m CONFIG_AD7780=m CONFIG_AD7816=m CONFIG_AD7192=m @@ -7288,7 +7497,6 @@ CONFIG_ADT7316_I2C=m # Capacitance to digital converters # CONFIG_AD7150=m -CONFIG_AD7152=m CONFIG_AD7746=m # @@ -7298,11 +7506,6 @@ CONFIG_AD9832=m CONFIG_AD9834=m # -# Digital gyroscope sensors -# -CONFIG_ADIS16060=m - -# # Network Analyzer, Impedance Converters # CONFIG_AD5933=m @@ -7317,10 +7520,8 @@ CONFIG_ADE7854_SPI=m # # Resolver to digital converters # -CONFIG_AD2S90=m CONFIG_AD2S1210=m CONFIG_FB_SM750=m -CONFIG_FB_XGI=m # # Speakup console speech @@ -7339,8 +7540,6 @@ CONFIG_SPEAKUP_SYNTH_TXPRT=m # CONFIG_SPEAKUP_SYNTH_DUMMY is not set CONFIG_STAGING_MEDIA=y CONFIG_I2C_BCM2048=m -CONFIG_SOC_CAMERA_IMX074=m -CONFIG_SOC_CAMERA_MT9T031=m CONFIG_VIDEO_ZORAN=m CONFIG_VIDEO_ZORAN_DC30=m CONFIG_VIDEO_ZORAN_ZR36060=m @@ -7349,6 +7548,11 @@ CONFIG_VIDEO_ZORAN_DC10=m CONFIG_VIDEO_ZORAN_LML33=m CONFIG_VIDEO_ZORAN_LML33R10=m CONFIG_VIDEO_ZORAN_AVS6EYES=m +CONFIG_VIDEO_IPU3_IMGU=m + +# +# soc_camera sensor drivers +# # # Android @@ -7357,11 +7561,7 @@ CONFIG_LTE_GDM724X=m CONFIG_FIREWIRE_SERIAL=m CONFIG_FWTTY_MAX_TOTAL_PORTS=64 CONFIG_FWTTY_MAX_CARD_PORTS=32 -CONFIG_MTD_SPINAND_MT29F=m -CONFIG_MTD_SPINAND_ONDIEECC=y -CONFIG_DGNC=m CONFIG_GS_FPGABOOT=m -CONFIG_CRYPTO_SKEIN=m CONFIG_UNISYSSPAR=y CONFIG_FB_TFT=m CONFIG_FB_TFT_AGM1264K_FL=m @@ -7429,23 +7629,21 @@ CONFIG_GREYBUS_SDIO=m CONFIG_GREYBUS_SPI=m CONFIG_GREYBUS_UART=m CONFIG_GREYBUS_USB=m +CONFIG_DRM_VBOXVIDEO=m +CONFIG_PI433=m # -# USB Power Delivery and Type-C drivers +# Gasket devices # -CONFIG_TYPEC_TCPCI=m -CONFIG_TYPEC_RT1711H=m -# CONFIG_DRM_VBOXVIDEO is not set -CONFIG_PI433=m -CONFIG_MTK_MMC=m -# CONFIG_MTK_AEE_KDUMP is not set -# CONFIG_MTK_MMC_CD_POLL is not set +# CONFIG_STAGING_GASKET_FRAMEWORK is not set +# CONFIG_EROFS_FS is not set CONFIG_X86_PLATFORM_DEVICES=y CONFIG_ACER_WMI=m CONFIG_ACER_WIRELESS=m CONFIG_ACERHDF=m CONFIG_ALIENWARE_WMI=m CONFIG_ASUS_LAPTOP=m +CONFIG_DCDBAS=m CONFIG_DELL_SMBIOS=m CONFIG_DELL_SMBIOS_WMI=y CONFIG_DELL_SMBIOS_SMM=y @@ -7456,6 +7654,7 @@ CONFIG_DELL_WMI_AIO=m CONFIG_DELL_WMI_LED=m CONFIG_DELL_SMO8800=m CONFIG_DELL_RBTN=m +CONFIG_DELL_RBU=m CONFIG_FUJITSU_LAPTOP=m CONFIG_FUJITSU_TABLET=m CONFIG_AMILO_RFKILL=m @@ -7463,6 +7662,7 @@ CONFIG_GPD_POCKET_FAN=m CONFIG_HP_ACCEL=m CONFIG_HP_WIRELESS=m CONFIG_HP_WMI=m +CONFIG_LG_LAPTOP=m CONFIG_MSI_LAPTOP=m CONFIG_PANASONIC_LAPTOP=m CONFIG_COMPAL_LAPTOP=m @@ -7509,7 +7709,6 @@ CONFIG_SAMSUNG_Q10=m CONFIG_APPLE_GMUX=m CONFIG_INTEL_RST=m CONFIG_INTEL_SMARTCONNECT=m -CONFIG_PVPANIC=m CONFIG_INTEL_PMC_IPC=m CONFIG_INTEL_BXTWC_PMIC_TMU=m CONFIG_SURFACE_PRO3_BUTTON=m @@ -7519,17 +7718,26 @@ CONFIG_INTEL_TELEMETRY=m CONFIG_MLX_PLATFORM=m # CONFIG_INTEL_TURBO_MAX_3 is not set CONFIG_INTEL_CHTDC_TI_PWRBTN=m +# CONFIG_I2C_MULTI_INSTANTIATE is not set +CONFIG_INTEL_ATOMISP2_PM=m +CONFIG_HUAWEI_WMI=m +CONFIG_PCENGINES_APU2=m CONFIG_PMC_ATOM=y CONFIG_CHROME_PLATFORMS=y CONFIG_CHROMEOS_LAPTOP=m CONFIG_CHROMEOS_PSTORE=m CONFIG_CHROMEOS_TBMC=m +# CONFIG_CROS_EC_I2C is not set +# CONFIG_CROS_EC_SPI is not set CONFIG_CROS_EC_LPC=m CONFIG_CROS_EC_LPC_MEC=y CONFIG_CROS_EC_PROTO=y CONFIG_CROS_KBD_LED_BACKLIGHT=m +CONFIG_WILCO_EC=m +# CONFIG_WILCO_EC_DEBUGFS is not set CONFIG_MELLANOX_PLATFORM=y CONFIG_MLXREG_HOTPLUG=m +# CONFIG_MLXREG_IO is not set CONFIG_CLKDEV_LOOKUP=y CONFIG_HAVE_CLK_PREPARE=y CONFIG_COMMON_CLK=y @@ -7538,6 +7746,7 @@ CONFIG_COMMON_CLK=y # Common Clock Framework # CONFIG_COMMON_CLK_WM831X=m +# CONFIG_COMMON_CLK_MAX9485 is not set CONFIG_COMMON_CLK_SI5351=m CONFIG_COMMON_CLK_SI544=m CONFIG_COMMON_CLK_CDCE706=m @@ -7554,13 +7763,15 @@ CONFIG_CLKBLD_I8253=y CONFIG_MAILBOX=y CONFIG_PCC=y CONFIG_ALTERA_MBOX=m +CONFIG_IOMMU_IOVA=y CONFIG_IOMMU_API=y CONFIG_IOMMU_SUPPORT=y # # Generic IOMMU Pagetable Support # -CONFIG_IOMMU_IOVA=y +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_AMD_IOMMU=y CONFIG_AMD_IOMMU_V2=m CONFIG_DMAR_TABLE=y @@ -7569,6 +7780,7 @@ CONFIG_INTEL_IOMMU_SVM=y # CONFIG_INTEL_IOMMU_DEFAULT_ON is not set CONFIG_INTEL_IOMMU_FLOPPY_WA=y CONFIG_IRQ_REMAP=y +CONFIG_HYPERV_IOMMU=y # # Remoteproc drivers @@ -7605,6 +7817,10 @@ CONFIG_SOUNDWIRE_INTEL=m # # +# NXP/Freescale QorIQ SoC drivers +# + +# # i.MX SoC drivers # @@ -7645,6 +7861,7 @@ CONFIG_EXTCON_GPIO=m CONFIG_EXTCON_MAX14577=m CONFIG_EXTCON_MAX3355=m CONFIG_EXTCON_MAX77693=m +CONFIG_EXTCON_PTN5150=m CONFIG_EXTCON_RT8973A=m CONFIG_EXTCON_SM5502=m # CONFIG_EXTCON_USB_GPIO is not set @@ -7668,6 +7885,9 @@ CONFIG_IIO_TRIGGERED_EVENT=m # CONFIG_ADIS16201=m CONFIG_ADIS16209=m +CONFIG_ADXL372=m +CONFIG_ADXL372_SPI=m +CONFIG_ADXL372_I2C=m CONFIG_BMA180=m CONFIG_BMA220=m CONFIG_BMC150_ACCEL=m @@ -7705,15 +7925,21 @@ CONFIG_STK8BA50=m # Analog to digital converters # CONFIG_AD_SIGMA_DELTA=m +CONFIG_AD7124=m CONFIG_AD7266=m CONFIG_AD7291=m CONFIG_AD7298=m CONFIG_AD7476=m +CONFIG_AD7606=m +CONFIG_AD7606_IFACE_PARALLEL=m +CONFIG_AD7606_IFACE_SPI=m CONFIG_AD7766=m +CONFIG_AD7768_1=m CONFIG_AD7791=m CONFIG_AD7793=m CONFIG_AD7887=m CONFIG_AD7923=m +CONFIG_AD7949=m CONFIG_AD799X=m CONFIG_AXP20X_ADC=m CONFIG_AXP288_ADC=m @@ -7733,11 +7959,13 @@ CONFIG_MAX1363=m CONFIG_MAX9611=m CONFIG_MCP320X=m CONFIG_MCP3422=m +CONFIG_MCP3911=m CONFIG_MEN_Z188_ADC=m CONFIG_NAU7802=m CONFIG_QCOM_VADC_COMMON=m CONFIG_QCOM_SPMI_IADC=m CONFIG_QCOM_SPMI_VADC=m +CONFIG_QCOM_SPMI_ADC5=m CONFIG_TI_ADC081C=m CONFIG_TI_ADC0832=m CONFIG_TI_ADC084S021=m @@ -7764,8 +7992,12 @@ CONFIG_AD8366=m # Chemical Sensors # CONFIG_ATLAS_PH_SENSOR=m +# CONFIG_BME680 is not set CONFIG_CCS811=m CONFIG_IAQCORE=m +CONFIG_PMS7003=m +CONFIG_SENSIRION_SGP30=m +CONFIG_SPS30=m CONFIG_VZ89X=m CONFIG_IIO_CROS_EC_SENSORS_CORE=m CONFIG_IIO_CROS_EC_SENSORS=m @@ -7804,11 +8036,13 @@ CONFIG_AD5592R=m CONFIG_AD5593R=m CONFIG_AD5504=m CONFIG_AD5624R_SPI=m +CONFIG_LTC1660=m CONFIG_LTC2632=m CONFIG_AD5686=m CONFIG_AD5686_SPI=m CONFIG_AD5696_I2C=m CONFIG_AD5755=m +# CONFIG_AD5758 is not set CONFIG_AD5761=m CONFIG_AD5764=m CONFIG_AD5791=m @@ -7821,6 +8055,8 @@ CONFIG_MCP4725=m CONFIG_MCP4922=m CONFIG_TI_DAC082S085=m CONFIG_TI_DAC5571=m +CONFIG_TI_DAC7311=m +CONFIG_TI_DAC7612=m # # IIO dummy driver @@ -7931,8 +8167,10 @@ CONFIG_SENSORS_LM3533=m CONFIG_LTR501=m CONFIG_LV0104CS=m CONFIG_MAX44000=m +CONFIG_MAX44009=m CONFIG_OPT3001=m CONFIG_PA12203001=m +# CONFIG_SI1133 is not set CONFIG_SI1145=m CONFIG_STK3310=m CONFIG_ST_UVIS25=m @@ -7946,6 +8184,7 @@ CONFIG_TSL2772=m CONFIG_TSL4531=m CONFIG_US5182D=m CONFIG_VCNL4000=m +CONFIG_VCNL4035=m CONFIG_VEML6070=m CONFIG_VL6180=m CONFIG_ZOPT2201=m @@ -7967,6 +8206,9 @@ CONFIG_IIO_ST_MAGN_SPI_3AXIS=m CONFIG_SENSORS_HMC5843=m CONFIG_SENSORS_HMC5843_I2C=m CONFIG_SENSORS_HMC5843_SPI=m +CONFIG_SENSORS_RM3100=m +CONFIG_SENSORS_RM3100_I2C=m +CONFIG_SENSORS_RM3100_SPI=m # # Multiplexers @@ -7996,6 +8238,7 @@ CONFIG_MAX5487=m CONFIG_MCP4018=m CONFIG_MCP4131=m CONFIG_MCP4531=m +CONFIG_MCP41010=m CONFIG_TPL0102=m # @@ -8038,15 +8281,18 @@ CONFIG_AS3935=m # # Proximity and distance sensors # +# CONFIG_ISL29501 is not set CONFIG_LIDAR_LITE_V2=m CONFIG_RFD77402=m CONFIG_SRF04=m CONFIG_SX9500=m CONFIG_SRF08=m +CONFIG_VL53L0X_I2C=m # # Resolver to digital converters # +CONFIG_AD2S90=m CONFIG_AD2S1200=m # @@ -8125,6 +8371,7 @@ CONFIG_PHY_SAMSUNG_USB2=m CONFIG_PHY_TUSB1210=m CONFIG_POWERCAP=y CONFIG_INTEL_RAPL=m +# CONFIG_IDLE_INJECT is not set CONFIG_MCB=m CONFIG_MCB_PCI=m CONFIG_MCB_LPC=m @@ -8146,9 +8393,11 @@ CONFIG_ND_BLK=m CONFIG_ND_CLAIM=y CONFIG_ND_BTT=m CONFIG_BTT=y +CONFIG_NVDIMM_KEYS=y CONFIG_DAX_DRIVER=y CONFIG_DAX=y CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_KMEM=m CONFIG_NVMEM=y CONFIG_RAVE_SP_EEPROM=m @@ -8156,6 +8405,8 @@ CONFIG_RAVE_SP_EEPROM=m # HW tracing support # CONFIG_STM=m +CONFIG_STM_PROTO_BASIC=m +CONFIG_STM_PROTO_SYS_T=m # CONFIG_STM_DUMMY is not set CONFIG_STM_SOURCE_CONSOLE=m CONFIG_STM_SOURCE_HEARTBEAT=m @@ -8175,67 +8426,23 @@ CONFIG_FPGA_MGR_ALTERA_CVP=m CONFIG_FPGA_MGR_XILINX_SPI=m CONFIG_FPGA_MGR_MACHXO2_SPI=m CONFIG_FPGA_BRIDGE=m +CONFIG_ALTERA_FREEZE_BRIDGE=m CONFIG_XILINX_PR_DECOUPLER=m CONFIG_FPGA_REGION=m +# CONFIG_FPGA_DFL is not set CONFIG_PM_OPP=y # CONFIG_UNISYS_VISORBUS is not set CONFIG_SIOX=m CONFIG_SIOX_BUS_GPIO=m CONFIG_SLIMBUS=m CONFIG_SLIM_QCOM_CTRL=m - -# -# Firmware Drivers -# -CONFIG_EDD=m -# CONFIG_EDD_OFF is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_DELL_RBU=m -CONFIG_DCDBAS=m -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=m -CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y -CONFIG_ISCSI_IBFT_FIND=y -CONFIG_ISCSI_IBFT=m -CONFIG_FW_CFG_SYSFS=m -# CONFIG_FW_CFG_SYSFS_CMDLINE is not set -CONFIG_GOOGLE_FIRMWARE=y -CONFIG_GOOGLE_SMI=m -CONFIG_GOOGLE_COREBOOT_TABLE=m -CONFIG_GOOGLE_COREBOOT_TABLE_ACPI=m -CONFIG_GOOGLE_MEMCONSOLE=m -CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY=m -CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT=m -CONFIG_GOOGLE_MEMCONSOLE_COREBOOT=m -CONFIG_GOOGLE_VPD=m - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_VARS=m -CONFIG_EFI_ESRT=y -CONFIG_EFI_VARS_PSTORE=m -CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y -CONFIG_EFI_RUNTIME_MAP=y -# CONFIG_EFI_FAKE_MEMMAP is not set -CONFIG_EFI_RUNTIME_WRAPPERS=y -CONFIG_EFI_BOOTLOADER_CONTROL=m -CONFIG_EFI_CAPSULE_LOADER=m -CONFIG_EFI_TEST=m -CONFIG_APPLE_PROPERTIES=y -CONFIG_RESET_ATTACK_MITIGATION=y -CONFIG_UEFI_CPER=y -CONFIG_UEFI_CPER_X86=y -CONFIG_EFI_DEV_PATH_PARSER=y - -# -# Tegra firmware driver -# +CONFIG_INTERCONNECT=m # # File systems # CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_VALIDATE_FS_PARSER=y CONFIG_FS_IOMAP=y CONFIG_EXT2_FS=m CONFIG_EXT2_FS_XATTR=y @@ -8247,8 +8454,6 @@ CONFIG_EXT3_FS_SECURITY=y CONFIG_EXT4_FS=m CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y # CONFIG_EXT4_DEBUG is not set CONFIG_JBD2=m # CONFIG_JBD2_DEBUG is not set @@ -8294,7 +8499,6 @@ CONFIG_F2FS_FS_XATTR=y CONFIG_F2FS_FS_POSIX_ACL=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_CHECK_FS=y -CONFIG_F2FS_FS_ENCRYPTION=y # CONFIG_F2FS_IO_TRACE is not set # CONFIG_F2FS_FAULT_INJECTION is not set CONFIG_FS_DAX=y @@ -8303,7 +8507,7 @@ CONFIG_EXPORTFS=y CONFIG_EXPORTFS_BLOCK_OPS=y CONFIG_FILE_LOCKING=y CONFIG_MANDATORY_FILE_LOCKING=y -CONFIG_FS_ENCRYPTION=m +CONFIG_FS_ENCRYPTION=y CONFIG_FSNOTIFY=y CONFIG_DNOTIFY=y CONFIG_INOTIFY_USER=y @@ -8328,6 +8532,7 @@ CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y CONFIG_OVERLAY_FS_INDEX=y CONFIG_OVERLAY_FS_NFS_EXPORT=y # CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set # # Caches @@ -8390,7 +8595,6 @@ CONFIG_ECRYPT_FS=m CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m -CONFIG_HFSPLUS_FS_POSIX_ACL=y CONFIG_BEFS_FS=m # CONFIG_BEFS_DEBUG is not set CONFIG_BFS_FS=m @@ -8417,8 +8621,9 @@ CONFIG_UBIFS_FS_ADVANCED_COMPR=y CONFIG_UBIFS_FS_LZO=y CONFIG_UBIFS_FS_ZLIB=y CONFIG_UBIFS_ATIME_SUPPORT=y -CONFIG_UBIFS_FS_ENCRYPTION=y +CONFIG_UBIFS_FS_XATTR=y CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_UBIFS_FS_AUTHENTICATION is not set CONFIG_CRAMFS=m CONFIG_CRAMFS_BLOCKDEV=y CONFIG_CRAMFS_MTD=y @@ -8453,15 +8658,18 @@ CONFIG_ROMFS_ON_MTD=y CONFIG_PSTORE=y CONFIG_PSTORE_DEFLATE_COMPRESS=y CONFIG_PSTORE_LZO_COMPRESS=y -# CONFIG_PSTORE_LZ4_COMPRESS is not set -CONFIG_PSTORE_LZ4HC_COMPRESS=m +CONFIG_PSTORE_LZ4_COMPRESS=y +CONFIG_PSTORE_LZ4HC_COMPRESS=y CONFIG_PSTORE_842_COMPRESS=y +CONFIG_PSTORE_ZSTD_COMPRESS=y CONFIG_PSTORE_COMPRESS=y -CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +# CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT is not set # CONFIG_PSTORE_LZO_COMPRESS_DEFAULT is not set +CONFIG_PSTORE_LZ4_COMPRESS_DEFAULT=y # CONFIG_PSTORE_LZ4HC_COMPRESS_DEFAULT is not set # CONFIG_PSTORE_842_COMPRESS_DEFAULT is not set -CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT is not set +CONFIG_PSTORE_COMPRESS_DEFAULT="lz4" # CONFIG_PSTORE_CONSOLE is not set CONFIG_PSTORE_PMSG=y # CONFIG_PSTORE_FTRACE is not set @@ -8470,9 +8678,6 @@ CONFIG_SYSV_FS=m CONFIG_UFS_FS=m # CONFIG_UFS_FS_WRITE is not set # CONFIG_UFS_DEBUG is not set -CONFIG_EXOFS_FS=m -# CONFIG_EXOFS_DEBUG is not set -CONFIG_ORE=m CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=m CONFIG_NFS_V2=m @@ -8512,14 +8717,15 @@ CONFIG_SUNRPC_GSS=m CONFIG_SUNRPC_BACKCHANNEL=y CONFIG_SUNRPC_SWAP=y CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set # CONFIG_SUNRPC_DEBUG is not set CONFIG_SUNRPC_XPRT_RDMA=m CONFIG_CEPH_FS=m CONFIG_CEPH_FSCACHE=y CONFIG_CEPH_FS_POSIX_ACL=y CONFIG_CIFS=m -CONFIG_CIFS_STATS=y CONFIG_CIFS_STATS2=y +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y CONFIG_CIFS_WEAK_PW_HASH=y CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y @@ -8527,13 +8733,13 @@ CONFIG_CIFS_POSIX=y CONFIG_CIFS_ACL=y # CONFIG_CIFS_DEBUG is not set CONFIG_CIFS_DFS_UPCALL=y -CONFIG_CIFS_SMB311=y # CONFIG_CIFS_SMB_DIRECT is not set CONFIG_CIFS_FSCACHE=y CONFIG_CODA_FS=m CONFIG_AFS_FS=m # CONFIG_AFS_DEBUG is not set CONFIG_AFS_FSCACHE=y +# CONFIG_AFS_DEBUG_CURSOR is not set CONFIG_9P_FS=m CONFIG_9P_FSCACHE=y CONFIG_9P_FS_POSIX_ACL=y @@ -8593,254 +8799,6 @@ CONFIG_DLM=m # CONFIG_DLM_DEBUG is not set # -# Kernel hacking -# -CONFIG_TRACE_IRQFLAGS_SUPPORT=y - -# -# printk and dmesg options -# -# CONFIG_PRINTK_TIME is not set -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=1 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=1 -# CONFIG_BOOT_PRINTK_DELAY is not set -# CONFIG_DYNAMIC_DEBUG is not set - -# -# Compile-time checks and compiler options -# -# CONFIG_DEBUG_INFO is not set -# CONFIG_ENABLE_WARN_DEPRECATED is not set -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_FRAME_WARN=0 -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_PAGE_OWNER is not set -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_CHECK is not set -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_STACK_VALIDATION=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_DEBUG_KERNEL=y - -# -# Memory Debugging -# -# CONFIG_PAGE_EXTENSION is not set -# CONFIG_DEBUG_PAGEALLOC is not set -# CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_PAGE_REF is not set -# CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_OBJECTS is not set -CONFIG_SLUB_DEBUG_ON=y -# CONFIG_SLUB_STATS is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_VM is not set -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_DEBUG_STACKOVERFLOW=y -# CONFIG_DEBUG_STACKOVERFLOW is not set -CONFIG_HAVE_ARCH_KASAN=y -# CONFIG_KASAN is not set -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set -# CONFIG_DEBUG_SHIRQ is not set - -# -# Debug Lockups and Hangs -# -# CONFIG_SOFTLOCKUP_DETECTOR is not set -CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y -# CONFIG_HARDLOCKUP_DETECTOR is not set -# CONFIG_DETECT_HUNG_TASK is not set -# CONFIG_WQ_WATCHDOG is not set -# CONFIG_PANIC_ON_OOPS is not set -CONFIG_PANIC_ON_OOPS_VALUE=0 -CONFIG_PANIC_TIMEOUT=0 -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_STACK_END_CHECK=y -# CONFIG_DEBUG_TIMEKEEPING is not set -# CONFIG_DEBUG_PREEMPT is not set - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -CONFIG_LOCK_DEBUGGING_SUPPORT=y -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_RWSEMS is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -# CONFIG_WW_MUTEX_SELFTEST is not set -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_LIST=y -CONFIG_DEBUG_PI_LIST=y -CONFIG_DEBUG_SG=y -CONFIG_DEBUG_NOTIFIERS=y -CONFIG_DEBUG_CREDENTIALS=y - -# -# RCU Debugging -# -CONFIG_TORTURE_TEST=m -CONFIG_RCU_PERF_TEST=m -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -CONFIG_FUNCTION_ERROR_INJECTION=y -# CONFIG_FAULT_INJECTION is not set -CONFIG_LATENCYTOP=y -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_FENTRY=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -CONFIG_FUNCTION_TRACER=y -# CONFIG_FUNCTION_GRAPH_TRACER is not set -# CONFIG_PREEMPTIRQ_EVENTS is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_PREEMPT_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_HWLAT_TRACER is not set -CONFIG_FTRACE_SYSCALLS=y -# CONFIG_TRACER_SNAPSHOT is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KPROBE_EVENTS=y -# CONFIG_UPROBE_EVENTS is not set -CONFIG_BPF_EVENTS=y -CONFIG_PROBE_EVENTS=y -CONFIG_DYNAMIC_FTRACE=y -CONFIG_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_FUNCTION_PROFILER=y -# CONFIG_BPF_KPROBE_OVERRIDE is not set -CONFIG_FTRACE_MCOUNT_RECORD=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_MMIOTRACE is not set -# CONFIG_HIST_TRIGGERS is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_TRACE_EVAL_MAP_FILE is not set -CONFIG_TRACING_EVENTS_GPIO=y -# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set -# CONFIG_DMA_API_DEBUG is not set -CONFIG_RUNTIME_TESTING_MENU=y -CONFIG_LKDTM=m -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_TEST_SORT is not set -# CONFIG_KPROBES_SANITY_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -# CONFIG_PERCPU_TEST is not set -# CONFIG_ATOMIC64_SELFTEST is not set -# CONFIG_ASYNC_RAID6_TEST is not set -# CONFIG_TEST_HEXDUMP is not set -# CONFIG_TEST_STRING_HELPERS is not set -# CONFIG_TEST_KSTRTOX is not set -# CONFIG_TEST_PRINTF is not set -# CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_UUID is not set -# CONFIG_TEST_OVERFLOW is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_HASH is not set -# CONFIG_TEST_PARMAN is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_USER_COPY is not set -# CONFIG_TEST_BPF is not set -# CONFIG_FIND_BIT_BENCHMARK is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_SYSCTL is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_TEST_STATIC_KEYS is not set -# CONFIG_TEST_KMOD is not set -CONFIG_MEMTEST=y -# CONFIG_BUG_ON_DATA_CORRUPTION is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -# CONFIG_UBSAN is not set -CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y -CONFIG_STRICT_DEVMEM=y -CONFIG_IO_STRICT_DEVMEM=y -CONFIG_EARLY_PRINTK_USB=y -CONFIG_X86_VERBOSE_BOOTUP=y -CONFIG_EARLY_PRINTK=y -CONFIG_EARLY_PRINTK_DBGP=y -CONFIG_EARLY_PRINTK_EFI=y -CONFIG_EARLY_PRINTK_USB_XDBC=y -CONFIG_X86_PTDUMP_CORE=y -# CONFIG_X86_PTDUMP is not set -# CONFIG_EFI_PGT_DUMP is not set -CONFIG_DEBUG_WX=y -CONFIG_DOUBLEFAULT=y -# CONFIG_DEBUG_TLBFLUSH is not set -# CONFIG_IOMMU_DEBUG is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -# CONFIG_X86_DECODER_SELFTEST is not set -CONFIG_IO_DELAY_TYPE_0X80=0 -CONFIG_IO_DELAY_TYPE_0XED=1 -CONFIG_IO_DELAY_TYPE_UDELAY=2 -CONFIG_IO_DELAY_TYPE_NONE=3 -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEFAULT_IO_DELAY_TYPE=0 -# CONFIG_DEBUG_BOOT_PARAMS is not set -# CONFIG_CPA_DEBUG is not set -# CONFIG_OPTIMIZE_INLINING is not set -# CONFIG_DEBUG_ENTRY is not set -# CONFIG_DEBUG_NMI_SELFTEST is not set -CONFIG_X86_DEBUG_FPU=y -# CONFIG_PUNIT_ATOM_DEBUG is not set -CONFIG_UNWINDER_ORC=y -# CONFIG_UNWINDER_FRAME_POINTER is not set - -# # Security options # CONFIG_KEYS=y @@ -8872,16 +8830,16 @@ CONFIG_PAGE_SANITIZE_VERIFY=y # CONFIG_SECURITY_SMACK is not set # CONFIG_SECURITY_TOMOYO is not set CONFIG_SECURITY_APPARMOR=y -CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1 CONFIG_SECURITY_APPARMOR_HASH=y CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y # CONFIG_SECURITY_APPARMOR_DEBUG is not set # CONFIG_SECURITY_LOADPIN is not set CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set # CONFIG_INTEGRITY is not set CONFIG_DEFAULT_SECURITY_APPARMOR=y # CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_DEFAULT_SECURITY="apparmor" +CONFIG_LSM="apparmor" CONFIG_XOR_BLOCKS=m CONFIG_ASYNC_CORE=m CONFIG_ASYNC_MEMCPY=m @@ -8922,7 +8880,6 @@ CONFIG_CRYPTO_NULL2=y CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_WORKQUEUE=y CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_SIMD=m @@ -8954,15 +8911,20 @@ CONFIG_CRYPTO_ECHAINIV=m # # Block modes # -CONFIG_CRYPTO_CBC=m +CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_CTR=m -CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_CTS=y CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_XTS=y CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_NHPOLY1305=m +CONFIG_CRYPTO_NHPOLY1305_SSE2=m +CONFIG_CRYPTO_NHPOLY1305_AVX2=m +CONFIG_CRYPTO_ADIANTUM=m # # Hash modes @@ -8995,13 +8957,11 @@ CONFIG_CRYPTO_SHA1=y CONFIG_CRYPTO_SHA1_SSSE3=m CONFIG_CRYPTO_SHA256_SSSE3=m CONFIG_CRYPTO_SHA512_SSSE3=m -CONFIG_CRYPTO_SHA1_MB=m -CONFIG_CRYPTO_SHA256_MB=m -CONFIG_CRYPTO_SHA512_MB=m -CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m @@ -9040,7 +9000,6 @@ CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m CONFIG_CRYPTO_SERPENT_AVX_X86_64=m CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_TWOFISH_COMMON=m @@ -9073,6 +9032,7 @@ CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m +# CONFIG_CRYPTO_STATS is not set CONFIG_CRYPTO_HASH_INFO=y CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_DEV_PADLOCK=m @@ -9098,7 +9058,10 @@ CONFIG_CRYPTO_DEV_CHELSIO_TLS=m CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS8_PRIVATE_KEY_PARSER=y +CONFIG_TPM_KEY_PARSER=m CONFIG_PKCS7_MESSAGE_PARSER=y CONFIG_PKCS7_TEST_KEY=m CONFIG_SIGNED_PE_FILE_VERIFICATION=y @@ -9113,36 +9076,13 @@ CONFIG_SYSTEM_TRUSTED_KEYS="" CONFIG_SECONDARY_TRUSTED_KEYRING=y CONFIG_SYSTEM_BLACKLIST_KEYRING=y CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" -CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_IRQFD=y -CONFIG_HAVE_KVM_IRQ_ROUTING=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_KVM_ASYNC_PF=y -CONFIG_HAVE_KVM_MSI=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_KVM_VFIO=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_KVM_COMPAT=y -CONFIG_HAVE_KVM_IRQ_BYPASS=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m -CONFIG_KVM_INTEL=m -CONFIG_KVM_AMD=m -CONFIG_KVM_AMD_SEV=y -# CONFIG_KVM_MMU_AUDIT is not set -CONFIG_VHOST_NET=m -CONFIG_VHOST_SCSI=m -CONFIG_VHOST_VSOCK=m -CONFIG_VHOST=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set CONFIG_BINARY_PRINTF=y # # Library routines # CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y CONFIG_BITREVERSE=y CONFIG_RATIONAL=y CONFIG_GENERIC_STRNCPY_FROM_USER=y @@ -9163,6 +9103,7 @@ CONFIG_CRC32_SLICEBY8=y # CONFIG_CRC32_SLICEBY4 is not set # CONFIG_CRC32_SARWATE is not set # CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m CONFIG_CRC4=m CONFIG_CRC7=m CONFIG_LIBCRC32C=m @@ -9208,7 +9149,7 @@ CONFIG_TEXTSEARCH_BM=m CONFIG_TEXTSEARCH_FSM=m CONFIG_BTREE=y CONFIG_INTERVAL_TREE=y -CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_XARRAY_MULTI=y CONFIG_ASSOCIATIVE_ARRAY=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT_MAP=y @@ -9216,9 +9157,21 @@ CONFIG_HAS_DMA=y CONFIG_NEED_SG_DMA_LENGTH=y CONFIG_NEED_DMA_MAP_STATE=y CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_DMA_DIRECT_OPS=y +CONFIG_DMA_DECLARE_COHERENT=y CONFIG_DMA_VIRT_OPS=y CONFIG_SWIOTLB=y +CONFIG_DMA_CMA=y + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=0 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set CONFIG_SGL_ALLOC=y CONFIG_IOMMU_HELPER=y CONFIG_CHECK_SIGNATURE=y @@ -9249,8 +9202,8 @@ CONFIG_FONT_8x16=y # CONFIG_FONT_10x18 is not set # CONFIG_FONT_SUN8x16 is not set # CONFIG_FONT_SUN12x22 is not set +# CONFIG_FONT_TER16x32 is not set CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_SG_CHAIN=y CONFIG_ARCH_HAS_PMEM_API=y CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y CONFIG_ARCH_HAS_UACCESS_MCSAFE=y @@ -9258,3 +9211,265 @@ CONFIG_SBITMAP=y CONFIG_PARMAN=m CONFIG_PRIME_NUMBERS=m # CONFIG_STRING_SELFTEST is not set +CONFIG_OBJAGG=m + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +# CONFIG_PRINTK_TIME is not set +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=1 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=1 +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_DYNAMIC_DEBUG is not set + +# +# Compile-time checks and compiler options +# +# CONFIG_DEBUG_INFO is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=0 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_DEBUG_WRITABLE_FUNCTION_POINTERS_VERBOSE is not set +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +CONFIG_SLUB_DEBUG_ON=y +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_CC_HAS_KASAN_GENERIC=y +# CONFIG_KASAN is not set +CONFIG_KASAN_STACK=1 +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +CONFIG_DEBUG_PI_LIST=y +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_DEBUG_CREDENTIALS=y + +# +# RCU Debugging +# +CONFIG_TORTURE_TEST=m +CONFIG_RCU_PERF_TEST=m +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_LATENCYTOP=y +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +# CONFIG_FUNCTION_GRAPH_TRACER is not set +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +# CONFIG_UPROBE_EVENTS is not set +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_FUNCTION_PROFILER=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_MMIOTRACE is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +CONFIG_TRACING_EVENTS_GPIO=y +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set +CONFIG_RUNTIME_TESTING_MENU=y +CONFIG_LKDTM=m +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_ASYNC_RAID6_TEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_STACKINIT is not set +CONFIG_MEMTEST=y +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_UBSAN_ALIGNMENT=y +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +CONFIG_IO_STRICT_DEVMEM=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_EARLY_PRINTK_USB=y +CONFIG_X86_VERBOSE_BOOTUP=y +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_USB_XDBC=y +CONFIG_X86_PTDUMP_CORE=y +# CONFIG_X86_PTDUMP is not set +# CONFIG_EFI_PGT_DUMP is not set +CONFIG_DEBUG_WX=y +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +# CONFIG_IOMMU_DEBUG is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +# CONFIG_X86_DECODER_SELFTEST is not set +CONFIG_IO_DELAY_TYPE_0X80=0 +CONFIG_IO_DELAY_TYPE_0XED=1 +CONFIG_IO_DELAY_TYPE_UDELAY=2 +CONFIG_IO_DELAY_TYPE_NONE=3 +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEFAULT_IO_DELAY_TYPE=0 +# CONFIG_DEBUG_BOOT_PARAMS is not set +# CONFIG_CPA_DEBUG is not set +# CONFIG_OPTIMIZE_INLINING is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +CONFIG_X86_DEBUG_FPU=y +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set diff --git a/sys-kernel/linux-image-redcore/files/ata-fix-NCQ-LOG-strings-and-move-to-debug.patch b/sys-kernel/linux-image-redcore/files/5.1-ata-fix-NCQ-LOG-strings-and-move-to-debug.patch index 344a8c4b..344a8c4b 100644 --- a/sys-kernel/linux-image-redcore/files/ata-fix-NCQ-LOG-strings-and-move-to-debug.patch +++ b/sys-kernel/linux-image-redcore/files/5.1-ata-fix-NCQ-LOG-strings-and-move-to-debug.patch diff --git a/sys-kernel/linux-image-redcore/files/drop_ancient-and-wrong-msg.patch b/sys-kernel/linux-image-redcore/files/5.1-drop_ancient-and-wrong-msg.patch index f184b08e..f184b08e 100644 --- a/sys-kernel/linux-image-redcore/files/drop_ancient-and-wrong-msg.patch +++ b/sys-kernel/linux-image-redcore/files/5.1-drop_ancient-and-wrong-msg.patch diff --git a/sys-kernel/linux-image-redcore/files/enable_alx_wol.patch b/sys-kernel/linux-image-redcore/files/5.1-enable_alx_wol.patch index 1b7f6e13..1b7f6e13 100644 --- a/sys-kernel/linux-image-redcore/files/enable_alx_wol.patch +++ b/sys-kernel/linux-image-redcore/files/5.1-enable_alx_wol.patch diff --git a/sys-kernel/linux-image-redcore/files/5.1-fix-acpi_dbg_level.patch b/sys-kernel/linux-image-redcore/files/5.1-fix-acpi_dbg_level.patch new file mode 100644 index 00000000..d7fb610f --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/5.1-fix-acpi_dbg_level.patch @@ -0,0 +1,12 @@ +diff -Naur linux-5.1/drivers/acpi/bus.c linux-5.1-p/drivers/acpi/bus.c +--- linux-5.1/drivers/acpi/bus.c 2019-05-06 02:42:58.000000000 +0200 ++++ linux-5.1-p/drivers/acpi/bus.c 2019-05-07 08:50:58.819866218 +0200 +@@ -1043,6 +1043,8 @@ + + acpi_permanent_mmap = true; + ++ acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR; ++ + #ifdef CONFIG_X86 + /* + * If the machine falls into the DMI check table, diff --git a/sys-kernel/linux-image-redcore/files/linux-hardened.patch b/sys-kernel/linux-image-redcore/files/5.1-linux-hardened.patch index 7a46a91b..cfb24d72 100644 --- a/sys-kernel/linux-image-redcore/files/linux-hardened.patch +++ b/sys-kernel/linux-image-redcore/files/5.1-linux-hardened.patch @@ -1,8 +1,8 @@ diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index 1370b424a453..54d7125d6912 100644 +index c7937f379d22..6a9c38fdd2e9 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt -@@ -496,16 +496,6 @@ +@@ -505,16 +505,6 @@ nosocket -- Disable socket memory accounting. nokmem -- Disable kernel memory accounting. @@ -19,7 +19,7 @@ index 1370b424a453..54d7125d6912 100644 cio_ignore= [S390] See Documentation/s390/CommonIO for details. clk_ignore_unused -@@ -3066,6 +3056,11 @@ +@@ -3241,6 +3231,11 @@ the specified number of seconds. This is to be used if your oopses keep scrolling off the screen. @@ -32,18 +32,18 @@ index 1370b424a453..54d7125d6912 100644 pcd. [PARIDE] diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt -index eded671d55eb..0abfc33f101d 100644 +index aa058aa7bf28..228632fa5f66 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt -@@ -92,6 +92,7 @@ show up in /proc/sys/kernel: +@@ -97,6 +97,7 @@ show up in /proc/sys/kernel: - sysctl_writes_strict - - tainted + - tainted ==> Documentation/admin-guide/tainted-kernels.rst - threads-max +- tiocsti_restrict - unknown_nmi_panic - watchdog - watchdog_thresh -@@ -1016,6 +1017,26 @@ available RAM pages threads-max is reduced accordingly. +@@ -1082,6 +1083,26 @@ available RAM pages threads-max is reduced accordingly. ============================================================== @@ -71,13 +71,13 @@ index eded671d55eb..0abfc33f101d 100644 The value in this file affects behavior of handling NMI. When the diff --git a/Makefile b/Makefile -index a41692c5827a..ce817616d8a9 100644 +index d7b3c8e3ff3e..029b58e4e67f 100644 --- a/Makefile +++ b/Makefile -@@ -688,6 +688,9 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong +@@ -717,6 +717,9 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong KBUILD_CFLAGS += $(stackp-flags-y) - ifeq ($(cc-name),clang) + ifdef CONFIG_CC_IS_CLANG +ifdef CONFIG_LOCAL_INIT +KBUILD_CFLAGS += -fsanitize=local-init +endif @@ -85,22 +85,10 @@ index a41692c5827a..ce817616d8a9 100644 KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) diff --git a/arch/Kconfig b/arch/Kconfig -index d1f2ed462ac8..19a595a47dae 100644 +index 9092e0ffe4d3..837a0297b720 100644 --- a/arch/Kconfig +++ b/arch/Kconfig -@@ -470,6 +470,11 @@ config GCC_PLUGIN_LATENT_ENTROPY - is some slowdown of the boot process (about 0.5%) and fork and - irq processing. - -+ When extra_latent_entropy is passed on the kernel command line, -+ entropy will be extracted from up to the first 4GB of RAM while the -+ runtime memory allocator is being initialized. This costs even more -+ slowdown of the boot process. -+ - Note that entropy extracted this way is not cryptographically - secure! - -@@ -731,7 +736,7 @@ config ARCH_MMAP_RND_BITS +@@ -631,7 +631,7 @@ config ARCH_MMAP_RND_BITS int "Number of bits to use for ASLR of mmap base address" if EXPERT range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT @@ -109,7 +97,7 @@ index d1f2ed462ac8..19a595a47dae 100644 depends on HAVE_ARCH_MMAP_RND_BITS help This value can be used to select the number of bits to use to -@@ -765,7 +770,7 @@ config ARCH_MMAP_RND_COMPAT_BITS +@@ -665,7 +665,7 @@ config ARCH_MMAP_RND_COMPAT_BITS int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT @@ -118,7 +106,7 @@ index d1f2ed462ac8..19a595a47dae 100644 depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS help This value can be used to select the number of bits to use to -@@ -967,6 +972,7 @@ config ARCH_HAS_REFCOUNT +@@ -873,6 +873,7 @@ config ARCH_HAS_REFCOUNT config REFCOUNT_FULL bool "Perform full reference count validation at the expense of speed" @@ -127,10 +115,10 @@ index d1f2ed462ac8..19a595a47dae 100644 Enabling this switches the refcounting infrastructure from a fast unchecked atomic_t implementation to a fully state checked diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 42c090cf0292..a6e2276009e4 100644 +index d218729ec852..d0e94e76885f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig -@@ -1005,6 +1005,7 @@ endif +@@ -1166,6 +1166,7 @@ endif config ARM64_SW_TTBR0_PAN bool "Emulate Privileged Access Never using TTBR0_EL1 switching" @@ -138,7 +126,7 @@ index 42c090cf0292..a6e2276009e4 100644 help Enabling this option prevents the kernel from accessing user-space memory directly by pointing TTBR0_EL1 to a reserved -@@ -1180,6 +1181,7 @@ config RANDOMIZE_BASE +@@ -1393,6 +1394,7 @@ config RANDOMIZE_BASE bool "Randomize the address of the kernel image" select ARM64_MODULE_PLTS if MODULES select RELOCATABLE @@ -147,10 +135,10 @@ index 42c090cf0292..a6e2276009e4 100644 Randomizes the virtual address at which the kernel image is loaded, as a security feature that deters exploit attempts diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug -index cc6bd559af85..01d5442d4722 100644 +index 69c9170bdd24..a786227db0e3 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug -@@ -45,6 +45,7 @@ config ARM64_RANDOMIZE_TEXT_OFFSET +@@ -42,6 +42,7 @@ config ARM64_RANDOMIZE_TEXT_OFFSET config DEBUG_WX bool "Warn on W+X mappings at boot" select ARM64_PTDUMP_CORE @@ -159,7 +147,7 @@ index cc6bd559af85..01d5442d4722 100644 Generate a warning if any W+X mappings are found at boot. diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig -index f9a186f6af8a..e628231a5a92 100644 +index 32fb03503b0b..228d3770d924 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1,4 +1,3 @@ @@ -168,10 +156,10 @@ index f9a186f6af8a..e628231a5a92 100644 CONFIG_AUDIT=y CONFIG_NO_HZ_IDLE=y diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h -index 433b9554c6a1..1f4b06317c9f 100644 +index 6adc1a90e7e6..1f4b06317c9f 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h -@@ -114,10 +114,10 @@ +@@ -114,14 +114,10 @@ /* * This is the base location for PIE (ET_DYN with INTERP) loads. On @@ -179,12 +167,16 @@ index 433b9554c6a1..1f4b06317c9f 100644 + * 64-bit, this is raised to 4GB to leave the entire 32-bit address * space open for things that want to use the area for 32-bit pointers. */ +-#ifdef CONFIG_ARM64_FORCE_52BIT -#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) +-#else +-#define ELF_ET_DYN_BASE (2 * DEFAULT_MAP_WINDOW_64 / 3) +-#endif /* CONFIG_ARM64_FORCE_52BIT */ +#define ELF_ET_DYN_BASE 0x100000000UL #ifndef __ASSEMBLY__ -@@ -171,10 +171,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, +@@ -175,10 +171,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, /* 1GB of VA */ #ifdef CONFIG_COMPAT #define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \ @@ -199,10 +191,10 @@ index 433b9554c6a1..1f4b06317c9f 100644 #ifdef __AARCH64EB__ diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c -index e10bc363f533..8e5701978b7c 100644 +index 3767fb21a5b8..776cf5d48f7d 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c -@@ -481,9 +481,9 @@ unsigned long arch_align_stack(unsigned long sp) +@@ -538,9 +538,9 @@ unsigned long arch_align_stack(unsigned long sp) unsigned long arch_randomize_brk(struct mm_struct *mm) { if (is_compat_task()) @@ -215,7 +207,7 @@ index e10bc363f533..8e5701978b7c 100644 /* diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 6b8065d718bd..347c8d1c3a45 100644 +index 62fc3fda1a05..fe5f612d7014 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1202,8 +1202,7 @@ config VM86 @@ -228,7 +220,7 @@ index 6b8065d718bd..347c8d1c3a45 100644 depends on MODIFY_LDT_SYSCALL ---help--- This option is required by programs like Wine to run 16-bit -@@ -2295,7 +2294,7 @@ config COMPAT_VDSO +@@ -2291,7 +2290,7 @@ config COMPAT_VDSO choice prompt "vsyscall table for legacy applications" depends on X86_64 @@ -237,7 +229,7 @@ index 6b8065d718bd..347c8d1c3a45 100644 help Legacy user code that does not know how to find the vDSO expects to be able to issue three syscalls by calling fixed addresses in -@@ -2376,8 +2375,7 @@ config CMDLINE_OVERRIDE +@@ -2372,8 +2371,7 @@ config CMDLINE_OVERRIDE be set to 'N' under normal conditions. config MODIFY_LDT_SYSCALL @@ -248,10 +240,10 @@ index 6b8065d718bd..347c8d1c3a45 100644 Linux can allow user programs to install a per-process x86 Local Descriptor Table (LDT) using the modify_ldt(2) system diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug -index c6dd1d980081..0acf0d1d67de 100644 +index 15d0fbe27872..5c32c9818bd4 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug -@@ -104,6 +104,7 @@ config EFI_PGT_DUMP +@@ -91,6 +91,7 @@ config EFI_PGT_DUMP config DEBUG_WX bool "Warn on W+X mappings at boot" select X86_PTDUMP_CORE @@ -260,7 +252,7 @@ index c6dd1d980081..0acf0d1d67de 100644 Generate a warning if any W+X mappings are found at boot. diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig -index e32fc1f274d8..d08acc76502a 100644 +index 1d3badfda09e..bd67e1778ffa 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -1,5 +1,4 @@ @@ -270,10 +262,10 @@ index e32fc1f274d8..d08acc76502a 100644 CONFIG_BSD_PROCESS_ACCT=y CONFIG_TASKSTATS=y diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c -index 5b8b556dbb12..a569f08b4478 100644 +index babc4e7a519c..197b79a50bf5 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c -@@ -204,55 +204,9 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) +@@ -198,55 +198,9 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) } #ifdef CONFIG_X86_64 @@ -331,10 +323,10 @@ index 5b8b556dbb12..a569f08b4478 100644 #endif diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h -index 0d157d2a1e2a..770c8ae97f92 100644 +index 69c0f892e310..f9f7a85bb71e 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h -@@ -249,11 +249,11 @@ extern int force_personality32; +@@ -248,11 +248,11 @@ extern int force_personality32; /* * This is the base location for PIE (ET_DYN with INTERP) loads. On @@ -348,7 +340,7 @@ index 0d157d2a1e2a..770c8ae97f92 100644 /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, -@@ -313,8 +313,8 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len); +@@ -312,8 +312,8 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len); #ifdef CONFIG_X86_32 @@ -359,7 +351,7 @@ index 0d157d2a1e2a..770c8ae97f92 100644 #define ARCH_DLINFO ARCH_DLINFO_IA32 -@@ -323,7 +323,11 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len); +@@ -322,7 +322,11 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len); #else /* CONFIG_X86_32 */ /* 1GB for 64bit, 8MB for 32bit */ @@ -372,17 +364,17 @@ index 0d157d2a1e2a..770c8ae97f92 100644 #define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32()) #define ARCH_DLINFO \ -@@ -381,5 +385,4 @@ struct va_alignment { +@@ -380,5 +384,4 @@ struct va_alignment { } ____cacheline_aligned; extern struct va_alignment va_align; -extern unsigned long align_vdso_addr(unsigned long); #endif /* _ASM_X86_ELF_H */ diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h -index 6690cd3fc8b1..300a1c2819fb 100644 +index f4204bf377fc..8ccc7aa0ece0 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h -@@ -266,6 +266,7 @@ static inline void cr4_set_bits(unsigned long mask) +@@ -294,6 +294,7 @@ static inline void cr4_set_bits(unsigned long mask) local_irq_save(flags); cr4 = this_cpu_read(cpu_tlbstate.cr4); @@ -390,7 +382,7 @@ index 6690cd3fc8b1..300a1c2819fb 100644 if ((cr4 | mask) != cr4) __cr4_set(cr4 | mask); local_irq_restore(flags); -@@ -278,6 +279,7 @@ static inline void cr4_clear_bits(unsigned long mask) +@@ -306,6 +307,7 @@ static inline void cr4_clear_bits(unsigned long mask) local_irq_save(flags); cr4 = this_cpu_read(cpu_tlbstate.cr4); @@ -398,7 +390,7 @@ index 6690cd3fc8b1..300a1c2819fb 100644 if ((cr4 & ~mask) != cr4) __cr4_set(cr4 & ~mask); local_irq_restore(flags); -@@ -288,6 +290,7 @@ static inline void cr4_toggle_bits_irqsoff(unsigned long mask) +@@ -316,6 +318,7 @@ static inline void cr4_toggle_bits_irqsoff(unsigned long mask) unsigned long cr4; cr4 = this_cpu_read(cpu_tlbstate.cr4); @@ -406,7 +398,7 @@ index 6690cd3fc8b1..300a1c2819fb 100644 __cr4_set(cr4 ^ mask); } -@@ -394,6 +397,7 @@ static inline void __native_flush_tlb_global(void) +@@ -422,6 +425,7 @@ static inline void __native_flush_tlb_global(void) raw_local_irq_save(flags); cr4 = this_cpu_read(cpu_tlbstate.cr4); @@ -415,10 +407,10 @@ index 6690cd3fc8b1..300a1c2819fb 100644 native_write_cr4(cr4 ^ X86_CR4_PGE); /* write old PGE again and flush TLBs */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c -index b41b72bd8bb8..d54a3c30902a 100644 +index 132a63dc5a76..05c8de9b452e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c -@@ -1726,7 +1726,6 @@ void cpu_init(void) +@@ -1768,7 +1768,6 @@ void cpu_init(void) wrmsrl(MSR_KERNEL_GS_BASE, 0); barrier(); @@ -427,19 +419,19 @@ index b41b72bd8bb8..d54a3c30902a 100644 /* diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c -index 30ca2d1a9231..bde0a18cd9f9 100644 +index 957eae13b370..01b7bb76bdbc 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c -@@ -39,6 +39,8 @@ - #include <asm/desc.h> +@@ -42,6 +42,8 @@ #include <asm/prctl.h> #include <asm/spec-ctrl.h> + #include <asm/proto.h> +#include <asm/elf.h> +#include <linux/sizes.h> - /* - * per-CPU TSS segments. Threads are completely 'soft' on Linux, -@@ -718,7 +720,10 @@ unsigned long arch_align_stack(unsigned long sp) + #include "process.h" + +@@ -798,7 +800,10 @@ unsigned long arch_align_stack(unsigned long sp) unsigned long arch_randomize_brk(struct mm_struct *mm) { @@ -452,7 +444,7 @@ index 30ca2d1a9231..bde0a18cd9f9 100644 /* diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c -index 6a78d4b36a79..715009f7a96c 100644 +index f7476ce23b6e..652169a2b23a 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -54,13 +54,6 @@ static unsigned long get_align_bits(void) @@ -473,7 +465,7 @@ index 6a78d4b36a79..715009f7a96c 100644 } *begin = get_mmap_base(1); -- if (in_compat_syscall()) +- if (in_32bit_syscall()) - *end = task_size_32bit(); - else - *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW); @@ -491,10 +483,10 @@ index 6a78d4b36a79..715009f7a96c 100644 /* diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c -index 979e0a02cbe1..d6ab882a0091 100644 +index 85c94f9a87f8..6b14ddb6c688 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c -@@ -560,9 +560,9 @@ static void __init pagetable_init(void) +@@ -559,9 +559,9 @@ static void __init pagetable_init(void) #define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL) /* Bits supported by the hardware: */ @@ -506,34 +498,11 @@ index 979e0a02cbe1..d6ab882a0091 100644 EXPORT_SYMBOL_GPL(__supported_pte_mask); /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ EXPORT_SYMBOL(__default_kernel_pte_mask); -@@ -873,7 +873,7 @@ int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) - #endif - #endif - --int kernel_set_to_readonly __read_mostly; -+int kernel_set_to_readonly __ro_after_init; - - void set_kernel_text_rw(void) - { -@@ -925,12 +925,11 @@ void mark_rodata_ro(void) - unsigned long start = PFN_ALIGN(_text); - unsigned long size = PFN_ALIGN(_etext) - start; - -+ kernel_set_to_readonly = 1; - set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); - printk(KERN_INFO "Write protecting the kernel text: %luk\n", - size >> 10); - -- kernel_set_to_readonly = 1; -- - #ifdef CONFIG_CPA_DEBUG - printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", - start, start+size); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c -index 68c292cb1ebf..b81cd1f2d6df 100644 +index bccff68e3267..b4e3a62c2e50 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c -@@ -66,9 +66,9 @@ +@@ -65,9 +65,9 @@ */ /* Bits supported by the hardware: */ @@ -545,28 +514,8 @@ index 68c292cb1ebf..b81cd1f2d6df 100644 EXPORT_SYMBOL_GPL(__supported_pte_mask); /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ EXPORT_SYMBOL(__default_kernel_pte_mask); -@@ -1207,7 +1207,7 @@ void __init mem_init(void) - mem_init_print_info(NULL); - } - --int kernel_set_to_readonly; -+int kernel_set_to_readonly __ro_after_init; - - void set_kernel_text_rw(void) - { -@@ -1256,9 +1256,8 @@ void mark_rodata_ro(void) - - printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", - (end - start) >> 10); -- set_memory_ro(start, (end - start) >> PAGE_SHIFT); -- - kernel_set_to_readonly = 1; -+ set_memory_ro(start, (end - start) >> PAGE_SHIFT); - - /* - * The rodata/data/bss/brk section (but not the kernel text!) diff --git a/block/blk-softirq.c b/block/blk-softirq.c -index 15c1f5e12eb8..ff72cccec5b8 100644 +index 457d9ba3eb20..5f987fc1c0a0 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -20,7 +20,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); @@ -579,10 +528,10 @@ index 15c1f5e12eb8..ff72cccec5b8 100644 struct list_head *cpu_list, local_list; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c -index cc71c63df381..44432ecffbcd 100644 +index 133fed8e4a8b..a04fccec45db 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c -@@ -5154,7 +5154,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) +@@ -5161,7 +5161,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) struct ata_port *ap; unsigned int tag; @@ -591,7 +540,7 @@ index cc71c63df381..44432ecffbcd 100644 ap = qc->ap; qc->flags = 0; -@@ -5171,7 +5171,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) +@@ -5178,7 +5178,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) struct ata_port *ap; struct ata_link *link; @@ -601,7 +550,7 @@ index cc71c63df381..44432ecffbcd 100644 ap = qc->ap; link = qc->dev->link; diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig -index 212f447938ae..0982c7ddd88c 100644 +index 466ebd84ad17..a093e3f158c9 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -9,7 +9,6 @@ source "drivers/tty/Kconfig" @@ -612,7 +561,7 @@ index 212f447938ae..0982c7ddd88c 100644 help Say Y here if you want to support the /dev/mem device. The /dev/mem device is used to access areas of physical -@@ -531,7 +530,6 @@ config TELCLOCK +@@ -536,7 +535,6 @@ config TELCLOCK config DEVPORT bool "/dev/port character device" depends on ISA || PCI @@ -621,7 +570,7 @@ index 212f447938ae..0982c7ddd88c 100644 Say Y here if you want to support the /dev/port device. The /dev/port device is similar to /dev/mem, but for I/O ports. diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig -index 0840d27381ea..ae292fcedaca 100644 +index e0a04bfc873e..ec93f827c599 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -122,7 +122,6 @@ config UNIX98_PTYS @@ -633,10 +582,10 @@ index 0840d27381ea..ae292fcedaca 100644 A pseudo terminal (PTY) is a software device consisting of two halves: a master and a slave. The slave device behaves identical to diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c -index aba59521ad48..90d4779683a3 100644 +index 5fa250157025..fabcb1ebd24b 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c -@@ -172,6 +172,7 @@ static void free_tty_struct(struct tty_struct *tty) +@@ -173,6 +173,7 @@ static void free_tty_struct(struct tty_struct *tty) put_device(tty->dev); kfree(tty->write_buf); tty->magic = 0xDEADDEAD; @@ -644,7 +593,7 @@ index aba59521ad48..90d4779683a3 100644 kfree(tty); } -@@ -2164,11 +2165,19 @@ static int tty_fasync(int fd, struct file *filp, int on) +@@ -2178,11 +2179,19 @@ static int tty_fasync(int fd, struct file *filp, int on) * FIXME: may race normal receive processing */ @@ -664,7 +613,7 @@ index aba59521ad48..90d4779683a3 100644 if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(ch, p)) -@@ -2851,6 +2860,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx) +@@ -3008,6 +3017,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx) tty->index = idx; tty_line_name(driver, idx, tty->name); tty->dev = tty_get_device(tty); @@ -672,11 +621,26 @@ index aba59521ad48..90d4779683a3 100644 return tty; } +diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c +index e38f104db174..8570a8514e46 100644 +--- a/drivers/tty/tty_ldisc.c ++++ b/drivers/tty/tty_ldisc.c +@@ -855,8 +855,8 @@ void tty_ldisc_deinit(struct tty_struct *tty) + tty->ldisc = NULL; + } + +-static int zero; +-static int one = 1; ++static int zero __read_only; ++static int one __read_only = 1; + static struct ctl_table tty_table[] = { + { + .procname = "ldisc_autoload", diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c -index 1fb266809966..db145cb734e4 100644 +index 310eef451db8..2c4c1f7045bf 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c -@@ -41,6 +41,8 @@ +@@ -42,6 +42,8 @@ #define USB_TP_TRANSMISSION_DELAY 40 /* ns */ #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */ @@ -685,7 +649,7 @@ index 1fb266809966..db145cb734e4 100644 /* Protect struct usb_device->state and ->children members * Note: Both are also protected by ->dev.sem, except that ->state can * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */ -@@ -4881,6 +4883,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, +@@ -4981,6 +4983,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, goto done; return; } @@ -699,7 +663,7 @@ index 1fb266809966..db145cb734e4 100644 unit_load = 150; else diff --git a/fs/exec.c b/fs/exec.c -index bdd0eacefdf5..20908a84550c 100644 +index 2e0033348d8e..003933540718 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -62,6 +62,7 @@ @@ -710,7 +674,7 @@ index bdd0eacefdf5..20908a84550c 100644 #include <linux/uaccess.h> #include <asm/mmu_context.h> -@@ -320,6 +321,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm) +@@ -275,6 +276,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm) arch_bprm_mm_init(mm, vma); up_write(&mm->mmap_sem); bprm->p = vma->vm_end - sizeof(void *); @@ -720,17 +684,21 @@ index bdd0eacefdf5..20908a84550c 100644 err: up_write(&mm->mmap_sem); diff --git a/fs/namei.c b/fs/namei.c -index 734cef54fdf8..8e3b3ae0cf30 100644 +index dede0147b3f6..c708a5887276 100644 --- a/fs/namei.c +++ b/fs/namei.c -@@ -885,8 +885,8 @@ static inline void put_link(struct nameidata *nd) +@@ -883,10 +883,10 @@ static inline void put_link(struct nameidata *nd) path_put(&last->link); } -int sysctl_protected_symlinks __read_mostly = 0; -int sysctl_protected_hardlinks __read_mostly = 0; +-int sysctl_protected_fifos __read_mostly; +-int sysctl_protected_regular __read_mostly; +int sysctl_protected_symlinks __read_mostly = 1; +int sysctl_protected_hardlinks __read_mostly = 1; ++int sysctl_protected_fifos __read_mostly = 2; ++int sysctl_protected_regular __read_mostly = 2; /** * may_follow_link - Check symlink following for unsafe situations @@ -744,10 +712,10 @@ index 5f93cfacb3d1..cea0d7d3b23e 100644 select CRC32 - default y diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig -index 0eaeb41453f5..8cd1e66aa408 100644 +index 817c02b13b1d..b8cd62b5cbc3 100644 --- a/fs/proc/Kconfig +++ b/fs/proc/Kconfig -@@ -39,7 +39,6 @@ config PROC_KCORE +@@ -40,7 +40,6 @@ config PROC_KCORE config PROC_VMCORE bool "/proc/vmcore support" depends on PROC_FS && CRASH_DUMP @@ -756,7 +724,7 @@ index 0eaeb41453f5..8cd1e66aa408 100644 Exports the dump image of crashed kernel in ELF format. diff --git a/fs/stat.c b/fs/stat.c -index f8e6fb2c3657..240c1432e18f 100644 +index c38e4c2e1221..6135fbaf7298 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -40,8 +40,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat) @@ -775,10 +743,10 @@ index f8e6fb2c3657..240c1432e18f 100644 stat->ctime = inode->i_ctime; stat->blksize = i_blocksize(inode); stat->blocks = inode->i_blocks; -@@ -75,9 +80,14 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat, - stat->result_mask |= STATX_BASIC_STATS; - request_mask &= STATX_ALL; - query_flags &= KSTAT_QUERY_FLAGS; +@@ -77,9 +82,14 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat, + if (IS_AUTOMOUNT(inode)) + stat->attributes |= STATX_ATTR_AUTOMOUNT; + - if (inode->i_op->getattr) - return inode->i_op->getattr(path, stat, request_mask, - query_flags); @@ -807,18 +775,18 @@ index 750621e41d1c..e7157c18c62c 100644 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) #endif diff --git a/include/linux/capability.h b/include/linux/capability.h -index f640dcbc880c..2b4f5d651f19 100644 +index ecce0f43c73a..e46306dd4401 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h -@@ -207,6 +207,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap); +@@ -208,6 +208,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap); extern bool has_ns_capability_noaudit(struct task_struct *t, struct user_namespace *ns, int cap); extern bool capable(int cap); +extern bool capable_noaudit(int cap); extern bool ns_capable(struct user_namespace *ns, int cap); extern bool ns_capable_noaudit(struct user_namespace *ns, int cap); - #else -@@ -232,6 +233,10 @@ static inline bool capable(int cap) + extern bool ns_capable_setid(struct user_namespace *ns, int cap); +@@ -234,6 +235,10 @@ static inline bool capable(int cap) { return true; } @@ -830,12 +798,12 @@ index f640dcbc880c..2b4f5d651f19 100644 { return true; diff --git a/include/linux/fs.h b/include/linux/fs.h -index 805bf22898cf..e3a036f29e69 100644 +index dd28e7679089..cc0030c688d1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h -@@ -3438,4 +3438,15 @@ static inline bool dir_relax_shared(struct inode *inode) - extern bool path_noexec(const struct path *path); - extern void inode_nohighmem(struct inode *inode); +@@ -3534,4 +3534,15 @@ static inline struct sock *io_uring_get_socket(struct file *file) + } + #endif +extern int device_sidechannel_restrict; + @@ -850,11 +818,11 @@ index 805bf22898cf..e3a036f29e69 100644 + #endif /* _LINUX_FS_H */ diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h -index bdaf22582f6e..326ff15d4637 100644 +index e30d6132c633..d62017d489fa 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h -@@ -181,6 +181,9 @@ static inline void fsnotify_access(struct file *file) - struct inode *inode = path->dentry->d_inode; +@@ -207,6 +207,9 @@ static inline void fsnotify_access(struct file *file) + struct inode *inode = file_inode(file); __u32 mask = FS_ACCESS; + if (is_sidechannel_device(inode)) @@ -863,8 +831,8 @@ index bdaf22582f6e..326ff15d4637 100644 if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; -@@ -199,6 +202,9 @@ static inline void fsnotify_modify(struct file *file) - struct inode *inode = path->dentry->d_inode; +@@ -223,6 +226,9 @@ static inline void fsnotify_modify(struct file *file) + struct inode *inode = file_inode(file); __u32 mask = FS_MODIFY; + if (is_sidechannel_device(inode)) @@ -874,10 +842,10 @@ index bdaf22582f6e..326ff15d4637 100644 mask |= FS_ISDIR; diff --git a/include/linux/gfp.h b/include/linux/gfp.h -index a6afcec53795..dea3241398bb 100644 +index fdab7de7490d..13755aff72ab 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h -@@ -513,9 +513,9 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, +@@ -530,9 +530,9 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); @@ -890,10 +858,10 @@ index a6afcec53795..dea3241398bb 100644 #define __get_free_page(gfp_mask) \ __get_free_pages((gfp_mask), 0) diff --git a/include/linux/highmem.h b/include/linux/highmem.h -index 0690679832d4..b9394bc86fad 100644 +index ea5cdbd8c2c3..805b84d6bbca 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h -@@ -191,6 +191,13 @@ static inline void clear_highpage(struct page *page) +@@ -215,6 +215,13 @@ static inline void clear_highpage(struct page *page) kunmap_atomic(kaddr); } @@ -908,10 +876,10 @@ index 0690679832d4..b9394bc86fad 100644 unsigned start1, unsigned end1, unsigned start2, unsigned end2) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index eeceac3376fc..78ad558bce5f 100644 +index 690b238a44d5..06e831f45016 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h -@@ -490,7 +490,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS]; +@@ -535,7 +535,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS]; struct softirq_action { @@ -920,7 +888,7 @@ index eeceac3376fc..78ad558bce5f 100644 }; asmlinkage void do_softirq(void); -@@ -505,7 +505,7 @@ static inline void do_softirq_own_stack(void) +@@ -550,7 +550,7 @@ static inline void do_softirq_own_stack(void) } #endif @@ -943,10 +911,10 @@ index 069aa2ebef90..cb9e3637a620 100644 const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); diff --git a/include/linux/mm.h b/include/linux/mm.h -index 68a5121694ef..4f6c2e842744 100644 +index 6b10c21630f5..e992a6c03666 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h -@@ -570,7 +570,7 @@ static inline int is_vmalloc_or_module_addr(const void *x) +@@ -599,7 +599,7 @@ static inline int is_vmalloc_or_module_addr(const void *x) } #endif @@ -956,7 +924,7 @@ index 68a5121694ef..4f6c2e842744 100644 { return kvmalloc_node(size, flags, NUMA_NO_NODE); diff --git a/include/linux/percpu.h b/include/linux/percpu.h -index 296bbe49d5d1..b26652c9a98d 100644 +index 70b7123f38c7..09f3019489b2 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -129,7 +129,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, @@ -980,10 +948,10 @@ index 296bbe49d5d1..b26652c9a98d 100644 extern phys_addr_t per_cpu_ptr_to_phys(void *addr); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h -index 87f6db437e4a..bbcd76ec7d6e 100644 +index 1f678f023850..54a9effb6831 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h -@@ -1179,6 +1179,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, +@@ -1205,6 +1205,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, int perf_event_max_stack_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); @@ -996,10 +964,10 @@ index 87f6db437e4a..bbcd76ec7d6e 100644 { return sysctl_perf_event_paranoid > -1; diff --git a/include/linux/slab.h b/include/linux/slab.h -index 14e3fe4bd6a1..2b1d16e90e75 100644 +index 9449b19c5f10..f7beb5b69b60 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h -@@ -178,8 +178,8 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *); +@@ -180,8 +180,8 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *); /* * Common kmalloc functions provided by all allocators */ @@ -1010,7 +978,7 @@ index 14e3fe4bd6a1..2b1d16e90e75 100644 void kfree(const void *); void kzfree(const void *); size_t ksize(const void *); -@@ -352,7 +352,7 @@ static __always_inline unsigned int kmalloc_index(size_t size) +@@ -385,7 +385,7 @@ static __always_inline unsigned int kmalloc_index(size_t size) } #endif /* !CONFIG_SLOB */ @@ -1019,7 +987,7 @@ index 14e3fe4bd6a1..2b1d16e90e75 100644 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *, void *); -@@ -376,7 +376,7 @@ static __always_inline void kfree_bulk(size_t size, void **p) +@@ -409,7 +409,7 @@ static __always_inline void kfree_bulk(size_t size, void **p) } #ifdef CONFIG_NUMA @@ -1028,16 +996,16 @@ index 14e3fe4bd6a1..2b1d16e90e75 100644 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; #else static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) -@@ -498,7 +498,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) - * for general use, and so are not documented here. For a full list of - * potential flags, always refer to linux/gfp.h. +@@ -530,7 +530,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) + * Try really hard to succeed the allocation but fail + * eventually. */ -static __always_inline void *kmalloc(size_t size, gfp_t flags) +static __always_inline __attribute__((alloc_size(1))) void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { - if (size > KMALLOC_MAX_CACHE_SIZE) -@@ -538,7 +538,7 @@ static __always_inline unsigned int kmalloc_size(unsigned int n) + #ifndef CONFIG_SLOB +@@ -572,7 +572,7 @@ static __always_inline unsigned int kmalloc_size(unsigned int n) return 0; } @@ -1047,7 +1015,7 @@ index 14e3fe4bd6a1..2b1d16e90e75 100644 #ifndef CONFIG_SLOB if (__builtin_constant_p(size) && diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h -index 3a1a1dbc6f49..ff38fec9eb76 100644 +index d2153789bd9f..97da977d6060 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -121,6 +121,11 @@ struct kmem_cache { @@ -1063,10 +1031,10 @@ index 3a1a1dbc6f49..ff38fec9eb76 100644 /* * Defragmentation by allocating from a remote node. diff --git a/include/linux/string.h b/include/linux/string.h -index 4a5a0eb7df51..be86cf21d0ce 100644 +index 6ab0a6fa512e..d3c5b10a4102 100644 --- a/include/linux/string.h +++ b/include/linux/string.h -@@ -235,10 +235,16 @@ void __read_overflow2(void) __compiletime_error("detected read beyond size of ob +@@ -245,10 +245,16 @@ void __read_overflow2(void) __compiletime_error("detected read beyond size of ob void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter"); void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); @@ -1084,7 +1052,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 if (__builtin_constant_p(size) && p_size < size) __write_overflow(); if (p_size < size) -@@ -248,7 +254,7 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) +@@ -258,7 +264,7 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) __FORTIFY_INLINE char *strcat(char *p, const char *q) { @@ -1093,7 +1061,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 if (p_size == (size_t)-1) return __builtin_strcat(p, q); if (strlcat(p, q, p_size) >= p_size) -@@ -259,7 +265,7 @@ __FORTIFY_INLINE char *strcat(char *p, const char *q) +@@ -269,7 +275,7 @@ __FORTIFY_INLINE char *strcat(char *p, const char *q) __FORTIFY_INLINE __kernel_size_t strlen(const char *p) { __kernel_size_t ret; @@ -1102,7 +1070,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 /* Work around gcc excess stack consumption issue */ if (p_size == (size_t)-1 || -@@ -274,7 +280,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) +@@ -284,7 +290,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen); __FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen) { @@ -1111,7 +1079,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size); if (p_size <= ret && maxlen != ret) fortify_panic(__func__); -@@ -286,8 +292,8 @@ extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); +@@ -296,8 +302,8 @@ extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) { size_t ret; @@ -1122,7 +1090,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 if (p_size == (size_t)-1 && q_size == (size_t)-1) return __real_strlcpy(p, q, size); ret = strlen(q); -@@ -307,8 +313,8 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) +@@ -317,8 +323,8 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) { size_t p_len, copy_len; @@ -1133,7 +1101,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 if (p_size == (size_t)-1 && q_size == (size_t)-1) return __builtin_strncat(p, q, count); p_len = strlen(p); -@@ -421,8 +427,8 @@ __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp) +@@ -431,8 +437,8 @@ __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp) /* defined after fortified strlen and memcpy to reuse them */ __FORTIFY_INLINE char *strcpy(char *p, const char *q) { @@ -1145,7 +1113,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 return __builtin_strcpy(p, q); memcpy(p, q, strlen(q) + 1); diff --git a/include/linux/tty.h b/include/linux/tty.h -index c56e3978b00f..1625c85f31f0 100644 +index bfa4e2ee94a9..3e18d583fc8d 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -14,6 +14,7 @@ @@ -1208,10 +1176,10 @@ index 398e9c95cd61..baab7195306a 100644 extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags); static inline void *__vmalloc_node_flags_caller(unsigned long size, int node, diff --git a/init/Kconfig b/init/Kconfig -index 041f3a022122..0ddcf8b083d4 100644 +index 4592bf7997c0..2a5dfc8ed04f 100644 --- a/init/Kconfig +++ b/init/Kconfig -@@ -294,6 +294,7 @@ config USELIB +@@ -342,6 +342,7 @@ config USELIB config AUDIT bool "Auditing support" depends on NET @@ -1219,7 +1187,7 @@ index 041f3a022122..0ddcf8b083d4 100644 help Enable auditing infrastructure that can be used with another kernel subsystem, such as SELinux (which requires this for -@@ -1036,6 +1037,12 @@ config CC_OPTIMIZE_FOR_SIZE +@@ -1139,6 +1140,12 @@ config CC_OPTIMIZE_FOR_SIZE endchoice @@ -1232,7 +1200,7 @@ index 041f3a022122..0ddcf8b083d4 100644 config HAVE_LD_DEAD_CODE_DATA_ELIMINATION bool help -@@ -1319,8 +1326,7 @@ config SHMEM +@@ -1425,8 +1432,7 @@ config SHMEM which may be appropriate on small systems without swap. config AIO @@ -1242,7 +1210,7 @@ index 041f3a022122..0ddcf8b083d4 100644 help This option enables POSIX asynchronous I/O which may by used by some high performance threaded applications. Disabling -@@ -1549,7 +1555,7 @@ config VM_EVENT_COUNTERS +@@ -1652,7 +1658,7 @@ config VM_EVENT_COUNTERS config SLUB_DEBUG default y @@ -1251,7 +1219,7 @@ index 041f3a022122..0ddcf8b083d4 100644 depends on SLUB && SYSFS help SLUB has extensive debug support features. Disabling these can -@@ -1573,7 +1579,6 @@ config SLUB_MEMCG_SYSFS_ON +@@ -1676,7 +1682,6 @@ config SLUB_MEMCG_SYSFS_ON config COMPAT_BRK bool "Disable heap randomization" @@ -1259,7 +1227,7 @@ index 041f3a022122..0ddcf8b083d4 100644 help Randomizing heap placement makes heap exploits harder, but it also breaks ancient binaries (including anything libc5 based). -@@ -1620,7 +1625,6 @@ endchoice +@@ -1723,7 +1728,6 @@ endchoice config SLAB_MERGE_DEFAULT bool "Allow slab caches to be merged" @@ -1267,7 +1235,7 @@ index 041f3a022122..0ddcf8b083d4 100644 help For reduced kernel memory fragmentation, slab caches can be merged when they share the same size and other characteristics. -@@ -1633,9 +1637,9 @@ config SLAB_MERGE_DEFAULT +@@ -1736,9 +1740,9 @@ config SLAB_MERGE_DEFAULT command line. config SLAB_FREELIST_RANDOM @@ -1278,7 +1246,7 @@ index 041f3a022122..0ddcf8b083d4 100644 help Randomizes the freelist order used on creating new pages. This security feature reduces the predictability of the kernel slab -@@ -1644,12 +1648,56 @@ config SLAB_FREELIST_RANDOM +@@ -1747,12 +1751,56 @@ config SLAB_FREELIST_RANDOM config SLAB_FREELIST_HARDENED bool "Harden slab freelist metadata" depends on SLUB @@ -1336,10 +1304,10 @@ index 041f3a022122..0ddcf8b083d4 100644 default y depends on SLUB && SMP diff --git a/kernel/audit.c b/kernel/audit.c -index e7478cb58079..69be132956df 100644 +index c89ea48c70a6..eeac2e76739d 100644 --- a/kernel/audit.c +++ b/kernel/audit.c -@@ -1631,6 +1631,9 @@ static int __init audit_enable(char *str) +@@ -1641,6 +1641,9 @@ static int __init audit_enable(char *str) if (audit_default == AUDIT_OFF) audit_initialized = AUDIT_DISABLED; @@ -1350,23 +1318,23 @@ index e7478cb58079..69be132956df 100644 pr_err("audit: error setting audit state (%d)\n", audit_default); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c -index 1e5625d46414..71cac92b6629 100644 +index 06ba9c5f156b..135eaaeff0b5 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c -@@ -367,7 +367,7 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) +@@ -521,7 +521,7 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) #ifdef CONFIG_BPF_JIT /* All BPF JIT sysctl knobs here. */ int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); -int bpf_jit_harden __read_mostly; +int bpf_jit_harden __read_mostly = 2; int bpf_jit_kallsyms __read_mostly; + long bpf_jit_limit __read_mostly; - static __always_inline void diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c -index a31a1ba0f8ea..1d5093b51c63 100644 +index db6e825e2958..9b8d4b5368b1 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c -@@ -48,7 +48,7 @@ static DEFINE_SPINLOCK(prog_idr_lock); +@@ -47,7 +47,7 @@ static DEFINE_SPINLOCK(prog_idr_lock); static DEFINE_IDR(map_idr); static DEFINE_SPINLOCK(map_idr_lock); @@ -1376,10 +1344,10 @@ index a31a1ba0f8ea..1d5093b51c63 100644 static const struct bpf_map_ops * const bpf_map_types[] = { #define BPF_PROG_TYPE(_id, _ops) diff --git a/kernel/capability.c b/kernel/capability.c -index 1e1c0236f55b..452062fe45ce 100644 +index 1444f3954d75..8cc9dd7992f2 100644 --- a/kernel/capability.c +++ b/kernel/capability.c -@@ -431,6 +431,12 @@ bool capable(int cap) +@@ -449,6 +449,12 @@ bool capable(int cap) return ns_capable(&init_user_ns, cap); } EXPORT_SYMBOL(capable); @@ -1393,10 +1361,10 @@ index 1e1c0236f55b..452062fe45ce 100644 /** diff --git a/kernel/events/core.c b/kernel/events/core.c -index eec2d5fb676b..9040756bbb0a 100644 +index dc7dead2d2cc..e078b1fec819 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c -@@ -397,8 +397,13 @@ static cpumask_var_t perf_online_mask; +@@ -398,8 +398,13 @@ static cpumask_var_t perf_online_mask; * 0 - disallow raw tracepoint access for unpriv * 1 - disallow cpu events for unpriv * 2 - disallow kernel profiling for unpriv @@ -1410,7 +1378,7 @@ index eec2d5fb676b..9040756bbb0a 100644 /* Minimum for 512 kiB + 1 user control page */ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ -@@ -10408,6 +10413,9 @@ SYSCALL_DEFINE5(perf_event_open, +@@ -10748,6 +10753,9 @@ SYSCALL_DEFINE5(perf_event_open, if (flags & ~PERF_FLAG_ALL) return -EINVAL; @@ -1421,7 +1389,7 @@ index eec2d5fb676b..9040756bbb0a 100644 if (err) return err; diff --git a/kernel/fork.c b/kernel/fork.c -index 1b27babc4c78..a88dd3ccd31c 100644 +index 2628f3773ca8..a2da35b446a6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -103,6 +103,11 @@ @@ -1436,7 +1404,7 @@ index 1b27babc4c78..a88dd3ccd31c 100644 /* * Minimum number of threads to boot the kernel -@@ -1624,6 +1629,10 @@ static __latent_entropy struct task_struct *copy_process( +@@ -1719,6 +1724,10 @@ static __latent_entropy struct task_struct *copy_process( if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); @@ -1447,7 +1415,7 @@ index 1b27babc4c78..a88dd3ccd31c 100644 /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. -@@ -2420,6 +2429,12 @@ int ksys_unshare(unsigned long unshare_flags) +@@ -2554,6 +2563,12 @@ int ksys_unshare(unsigned long unshare_flags) if (unshare_flags & CLONE_NEWNS) unshare_flags |= CLONE_FS; @@ -1461,10 +1429,10 @@ index 1b27babc4c78..a88dd3ccd31c 100644 if (err) goto bad_unshare_out; diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c -index 3d37c279c090..0789ca413f09 100644 +index f08a1e4ee1d4..ece99ca24ed0 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c -@@ -1138,7 +1138,7 @@ void free_basic_memory_bitmaps(void) +@@ -1142,7 +1142,7 @@ void free_basic_memory_bitmaps(void) void clear_free_pages(void) { @@ -1473,7 +1441,7 @@ index 3d37c279c090..0789ca413f09 100644 struct memory_bitmap *bm = free_pages_map; unsigned long pfn; -@@ -1155,7 +1155,7 @@ void clear_free_pages(void) +@@ -1159,7 +1159,7 @@ void clear_free_pages(void) } memory_bm_position_reset(bm); pr_info("free pages cleared after restore\n"); @@ -1483,36 +1451,36 @@ index 3d37c279c090..0789ca413f09 100644 /** diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c -index a64eee0db39e..4d7de378fe4c 100644 +index 911bd9076d43..b65e2ee716c4 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c -@@ -164,7 +164,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) - } +@@ -74,7 +74,7 @@ void rcu_sched_clock_irq(int user) } + /* Invoke the RCU callbacks whose grace period has elapsed. */ -static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) +static __latent_entropy void rcu_process_callbacks(void) { - __rcu_process_callbacks(&rcu_sched_ctrlblk); - __rcu_process_callbacks(&rcu_bh_ctrlblk); + struct rcu_head *next, *list; + unsigned long flags; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c -index aa7cade1b9f3..d6be47e1c86f 100644 +index acd6ccf56faf..17db4829d2c0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c -@@ -2791,7 +2791,7 @@ __rcu_process_callbacks(struct rcu_state *rsp) - /* - * Do RCU core processing for the current CPU. - */ --static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) -+static __latent_entropy void rcu_process_callbacks(void) - { - struct rcu_state *rsp; +@@ -2731,7 +2731,7 @@ void rcu_fwd_progress_check(unsigned long j) + EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); + /* Perform RCU core processing work for the current CPU. */ +-static __latent_entropy void rcu_core(struct softirq_action *unused) ++static __latent_entropy void rcu_core(void) + { + unsigned long flags; + struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 9c219f7b0970..963a68e64593 100644 +index 232491e3ed0d..194d10702841 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -9862,7 +9862,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) +@@ -10117,7 +10117,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) * run_rebalance_domains is triggered when needed from the scheduler tick. * Also triggered for nohz idle balancing (with nohz_balancing_kick set). */ @@ -1522,7 +1490,7 @@ index 9c219f7b0970..963a68e64593 100644 struct rq *this_rq = this_rq(); enum cpu_idle_type idle = this_rq->idle_balance ? diff --git a/kernel/softirq.c b/kernel/softirq.c -index 6f584861d329..1943fe60f3b9 100644 +index 10277429ed84..d1323bdfc20c 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -53,7 +53,7 @@ DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); @@ -1534,7 +1502,7 @@ index 6f584861d329..1943fe60f3b9 100644 DEFINE_PER_CPU(struct task_struct *, ksoftirqd); -@@ -289,7 +289,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) +@@ -290,7 +290,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) kstat_incr_softirqs_this_cpu(vec_nr); trace_softirq_entry(vec_nr); @@ -1543,7 +1511,7 @@ index 6f584861d329..1943fe60f3b9 100644 trace_softirq_exit(vec_nr); if (unlikely(prev_count != preempt_count())) { pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", -@@ -451,7 +451,7 @@ void __raise_softirq_irqoff(unsigned int nr) +@@ -453,7 +453,7 @@ void __raise_softirq_irqoff(unsigned int nr) or_softirq_pending(1UL << nr); } @@ -1552,7 +1520,7 @@ index 6f584861d329..1943fe60f3b9 100644 { softirq_vec[nr].action = action; } -@@ -497,8 +497,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) +@@ -499,8 +499,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) } EXPORT_SYMBOL(__tasklet_hi_schedule); @@ -1562,7 +1530,7 @@ index 6f584861d329..1943fe60f3b9 100644 unsigned int softirq_nr) { struct tasklet_struct *list; -@@ -535,14 +534,14 @@ static void tasklet_action_common(struct softirq_action *a, +@@ -537,14 +536,14 @@ static void tasklet_action_common(struct softirq_action *a, } } @@ -1582,18 +1550,18 @@ index 6f584861d329..1943fe60f3b9 100644 void tasklet_init(struct tasklet_struct *t, diff --git a/kernel/sysctl.c b/kernel/sysctl.c -index 2d9837c0aff4..852fea68d574 100644 +index 387efbaf464a..238a6d0c25dc 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c -@@ -67,6 +67,7 @@ +@@ -66,6 +66,7 @@ + #include <linux/kexec.h> #include <linux/bpf.h> #include <linux/mount.h> - #include <linux/pipe_fs_i.h> +#include <linux/tty.h> - #include <linux/uaccess.h> - #include <asm/processor.h> -@@ -99,12 +100,19 @@ + #include "../lib/kstrtox.h" + +@@ -102,12 +103,19 @@ #if defined(CONFIG_SYSCTL) /* External variables not in a header file. */ @@ -1613,7 +1581,7 @@ index 2d9837c0aff4..852fea68d574 100644 extern int pid_max; extern int pid_max_min, pid_max_max; extern int percpu_pagelist_fraction; -@@ -116,40 +124,43 @@ extern int sysctl_nr_trim_pages; +@@ -119,35 +127,35 @@ extern int sysctl_nr_trim_pages; /* Constants used for minimum and maximum */ #ifdef CONFIG_LOCKUP_DETECTOR @@ -1624,17 +1592,22 @@ index 2d9837c0aff4..852fea68d574 100644 -static int __maybe_unused neg_one = -1; +static int __maybe_unused neg_one __read_only = -1; - static int zero; +-static int zero; -static int __maybe_unused one = 1; -static int __maybe_unused two = 2; -static int __maybe_unused four = 4; +-static unsigned long zero_ul; -static unsigned long one_ul = 1; +-static unsigned long long_max = LONG_MAX; -static int one_hundred = 100; -static int one_thousand = 1000; ++static int zero __read_only; +static int __maybe_unused one __read_only = 1; +static int __maybe_unused two __read_only = 2; +static int __maybe_unused four __read_only = 4; ++static unsigned long zero_ul __read_only; +static unsigned long one_ul __read_only = 1; ++static unsigned long long_max __read_only = LONG_MAX; +static int one_hundred __read_only = 100; +static int one_thousand __read_only = 1000; #ifdef CONFIG_PRINTK @@ -1660,7 +1633,10 @@ index 2d9837c0aff4..852fea68d574 100644 +static int ngroups_max __read_only = NGROUPS_MAX; static const int cap_last_cap = CAP_LAST_CAP; - /*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */ + /* +@@ -155,9 +163,12 @@ static const int cap_last_cap = CAP_LAST_CAP; + * and hung_task_check_interval_secs + */ #ifdef CONFIG_DETECT_HUNG_TASK -static unsigned long hung_task_timeout_max = (LONG_MAX/HZ); +static unsigned long hung_task_timeout_max __read_only = (LONG_MAX/HZ); @@ -1672,7 +1648,7 @@ index 2d9837c0aff4..852fea68d574 100644 #ifdef CONFIG_INOTIFY_USER #include <linux/inotify.h> #endif -@@ -293,19 +304,19 @@ static struct ctl_table sysctl_base_table[] = { +@@ -306,19 +317,19 @@ static struct ctl_table sysctl_base_table[] = { }; #ifdef CONFIG_SCHED_DEBUG @@ -1700,7 +1676,7 @@ index 2d9837c0aff4..852fea68d574 100644 #endif static struct ctl_table kern_table[] = { -@@ -519,6 +530,15 @@ static struct ctl_table kern_table[] = { +@@ -535,6 +546,15 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif @@ -1716,13 +1692,13 @@ index 2d9837c0aff4..852fea68d574 100644 #ifdef CONFIG_PROC_SYSCTL { .procname = "tainted", -@@ -867,6 +887,37 @@ static struct ctl_table kern_table[] = { +@@ -890,6 +910,37 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &two, }, +#endif +#if defined CONFIG_TTY -+ { ++ { + .procname = "tiocsti_restrict", + .data = &tiocsti_restrict, + .maxlen = sizeof(int), @@ -1730,7 +1706,7 @@ index 2d9837c0aff4..852fea68d574 100644 + .proc_handler = proc_dointvec_minmax_sysadmin, + .extra1 = &zero, + .extra2 = &one, -+ }, ++ }, +#endif + { + .procname = "device_sidechannel_restrict", @@ -1755,10 +1731,10 @@ index 2d9837c0aff4..852fea68d574 100644 { .procname = "ngroups_max", diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 3e93c54bd3a1..0fed811ed850 100644 +index 41dfff23c1f9..298a1554c3e4 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c -@@ -1462,7 +1462,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, +@@ -1453,7 +1453,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, } } @@ -1768,10 +1744,10 @@ index 3e93c54bd3a1..0fed811ed850 100644 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); unsigned long flags; diff --git a/kernel/time/timer.c b/kernel/time/timer.c -index cc2d23e6ff61..438be7646454 100644 +index 2fce056f8a49..acac240068c1 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c -@@ -1672,7 +1672,7 @@ static inline void __run_timers(struct timer_base *base) +@@ -1687,7 +1687,7 @@ static inline void __run_timers(struct timer_base *base) /* * This function runs timers and the timer-tq in bottom half context. */ @@ -1781,7 +1757,7 @@ index cc2d23e6ff61..438be7646454 100644 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c -index c3d7583fcd21..6ee37e516869 100644 +index 923414a246e9..6b9dbc257e34 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -26,6 +26,9 @@ @@ -1795,10 +1771,20 @@ index c3d7583fcd21..6ee37e516869 100644 static DEFINE_MUTEX(userns_state_mutex); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug -index 8838d1158d19..a208770a0e9e 100644 +index d5a4a4036d2f..b16d39c4c407 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug -@@ -945,6 +945,7 @@ endmenu # "Debug lockups and hangs" +@@ -350,6 +350,9 @@ config SECTION_MISMATCH_WARN_ONLY + + If unsure, say Y. + ++config DEBUG_WRITABLE_FUNCTION_POINTERS_VERBOSE ++ bool "Enable verbose reporting of writable function pointers" ++ + # + # Select this config option from the architecture Kconfig, if it + # is preferred to always offer frame pointers as a config +@@ -966,6 +969,7 @@ endmenu # "Debug lockups and hangs" config PANIC_ON_OOPS bool "Panic on Oops" @@ -1806,7 +1792,7 @@ index 8838d1158d19..a208770a0e9e 100644 help Say Y here to enable the kernel to panic when it oopses. This has the same effect as setting oops=panic on the kernel command -@@ -954,7 +955,7 @@ config PANIC_ON_OOPS +@@ -975,7 +979,7 @@ config PANIC_ON_OOPS anything erroneous after an oops which could result in data corruption or other issues. @@ -1815,7 +1801,7 @@ index 8838d1158d19..a208770a0e9e 100644 config PANIC_ON_OOPS_VALUE int -@@ -1323,6 +1324,7 @@ config DEBUG_BUGVERBOSE +@@ -1344,6 +1348,7 @@ config DEBUG_BUGVERBOSE config DEBUG_LIST bool "Debug linked list manipulation" depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION @@ -1823,7 +1809,7 @@ index 8838d1158d19..a208770a0e9e 100644 help Enable this to turn on extended checks in the linked-list walking routines. -@@ -1983,6 +1985,7 @@ config MEMTEST +@@ -2026,6 +2031,7 @@ config MEMTEST config BUG_ON_DATA_CORRUPTION bool "Trigger a BUG when data corruption is detected" select DEBUG_LIST @@ -1831,7 +1817,7 @@ index 8838d1158d19..a208770a0e9e 100644 help Select this option if the kernel should BUG when it encounters data corruption in kernel memory structures when they get checked -@@ -2022,6 +2025,7 @@ config STRICT_DEVMEM +@@ -2065,6 +2071,7 @@ config STRICT_DEVMEM config IO_STRICT_DEVMEM bool "Filter I/O access to /dev/mem" depends on STRICT_DEVMEM @@ -1840,7 +1826,7 @@ index 8838d1158d19..a208770a0e9e 100644 If this option is disabled, you allow userspace (root) access to all io-memory regardless of whether a driver is actively using that diff --git a/lib/irq_poll.c b/lib/irq_poll.c -index 86a709954f5a..6f15787fcb1b 100644 +index 2f17b488d58e..b6e7996a0058 100644 --- a/lib/irq_poll.c +++ b/lib/irq_poll.c @@ -75,7 +75,7 @@ void irq_poll_complete(struct irq_poll *iop) @@ -1853,10 +1839,10 @@ index 86a709954f5a..6f15787fcb1b 100644 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll); int rearm = 0, budget = irq_poll_budget; diff --git a/lib/kobject.c b/lib/kobject.c -index 18989b5b3b56..bd46da8243a6 100644 +index aa89edcd2b63..c505d13ba323 100644 --- a/lib/kobject.c +++ b/lib/kobject.c -@@ -952,9 +952,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add); +@@ -978,9 +978,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add); static DEFINE_SPINLOCK(kobj_ns_type_lock); @@ -1869,10 +1855,10 @@ index 18989b5b3b56..bd46da8243a6 100644 enum kobj_ns_type type = ops->type; int error; diff --git a/lib/nlattr.c b/lib/nlattr.c -index dfa55c873c13..c6b0436f473d 100644 +index d26de6156b97..ed11787fcfe7 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c -@@ -364,6 +364,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count) +@@ -539,6 +539,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count) { int minlen = min_t(int, count, nla_len(src)); @@ -1882,11 +1868,11 @@ index dfa55c873c13..c6b0436f473d 100644 if (count > minlen) memset(dest + minlen, 0, count - minlen); diff --git a/lib/vsprintf.c b/lib/vsprintf.c -index a48aaa79d352..a57213b70cad 100644 +index 791b6fa36905..6d6a3ad3bf0f 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c -@@ -1371,7 +1371,7 @@ char *pointer_string(char *buf, char *end, const void *ptr, - return number(buf, end, (unsigned long int)ptr, spec); +@@ -1476,7 +1476,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr, + return string(buf, end, uuid, spec); } -int kptr_restrict __read_mostly; @@ -1895,10 +1881,10 @@ index a48aaa79d352..a57213b70cad 100644 static noinline_for_stack char *restricted_pointer(char *buf, char *end, const void *ptr, diff --git a/mm/Kconfig b/mm/Kconfig -index ce95491abd6a..19b62893da3d 100644 +index 2e6d24d783f7..c378add17049 100644 --- a/mm/Kconfig +++ b/mm/Kconfig -@@ -312,7 +312,8 @@ config KSM +@@ -306,7 +306,8 @@ config KSM config DEFAULT_MMAP_MIN_ADDR int "Low address space to protect from user allocation" depends on MMU @@ -1909,10 +1895,10 @@ index ce95491abd6a..19b62893da3d 100644 This is the portion of low virtual memory which should be protected from userspace allocation. Keeping a user from writing to low pages diff --git a/mm/mmap.c b/mm/mmap.c -index 17bbf4d3e24f..40ac799db35b 100644 +index 2d6a6662edb9..e154e7fc1d8a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c -@@ -229,6 +229,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) +@@ -233,6 +233,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) newbrk = PAGE_ALIGN(brk); oldbrk = PAGE_ALIGN(mm->brk); @@ -1923,24 +1909,24 @@ index 17bbf4d3e24f..40ac799db35b 100644 + if (mm->brk == min_brk) + oldbrk -= PAGE_SIZE; + } - if (oldbrk == newbrk) - goto set_brk; - + if (oldbrk == newbrk) { + mm->brk = brk; + goto success; diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index 3222193c46c6..b8e36bed196d 100644 +index 475ca5b1a824..87371ac3ad1e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -67,6 +67,7 @@ - #include <linux/ftrace.h> #include <linux/lockdep.h> #include <linux/nmi.h> + #include <linux/psi.h> +#include <linux/random.h> #include <asm/sections.h> #include <asm/tlbflush.h> -@@ -100,6 +101,15 @@ int _node_numa_mem_[MAX_NUMNODES]; +@@ -104,6 +105,15 @@ struct pcpu_drain { DEFINE_MUTEX(pcpu_drain_mutex); - DEFINE_PER_CPU(struct work_struct, pcpu_drain); + DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain); +bool __meminitdata extra_latent_entropy; + @@ -1954,7 +1940,7 @@ index 3222193c46c6..b8e36bed196d 100644 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY volatile unsigned long latent_entropy __latent_entropy; EXPORT_SYMBOL(latent_entropy); -@@ -1027,6 +1037,13 @@ static __always_inline bool free_pages_prepare(struct page *page, +@@ -1142,6 +1152,13 @@ static __always_inline bool free_pages_prepare(struct page *page, debug_check_no_obj_freed(page_address(page), PAGE_SIZE << order); } @@ -1968,10 +1954,13 @@ index 3222193c46c6..b8e36bed196d 100644 arch_free_page(page, order); kernel_poison_pages(page, 1 << order, 0); kernel_map_pages(page, 1 << order, 0); -@@ -1267,6 +1284,21 @@ static void __init __free_pages_boot_core(struct page *page, unsigned int order) - __ClearPageReserved(p); - set_page_count(p, 0); +@@ -1373,6 +1390,25 @@ static void __free_pages_ok(struct page *page, unsigned int order) + local_irq_restore(flags); + } ++static void __init __gather_extra_latent_entropy(struct page *page, ++ unsigned int nr_pages) ++{ + if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) { + unsigned long hash = 0; + size_t index, end = PAGE_SIZE * nr_pages / sizeof hash; @@ -1986,11 +1975,44 @@ index 3222193c46c6..b8e36bed196d 100644 + add_device_randomness((const void *)&hash, sizeof(hash)); +#endif + } ++} + - page_zone(page)->managed_pages += nr_pages; + void __free_pages_core(struct page *page, unsigned int order) + { + unsigned int nr_pages = 1 << order; +@@ -1387,7 +1423,6 @@ void __free_pages_core(struct page *page, unsigned int order) + } + __ClearPageReserved(p); + set_page_count(p, 0); +- + atomic_long_add(nr_pages, &page_zone(page)->managed_pages); set_page_refcounted(page); __free_pages(page, order); -@@ -1855,8 +1887,8 @@ static inline int check_new_page(struct page *page) +@@ -1452,6 +1487,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn, + { + if (early_page_uninitialised(pfn)) + return; ++ __gather_extra_latent_entropy(page, 1 << order); + __free_pages_core(page, order); + } + +@@ -1542,6 +1578,7 @@ static void __init deferred_free_range(unsigned long pfn, + if (nr_pages == pageblock_nr_pages && + (pfn & (pageblock_nr_pages - 1)) == 0) { + set_pageblock_migratetype(page, MIGRATE_MOVABLE); ++ __gather_extra_latent_entropy(page, 1 << pageblock_order); + __free_pages_core(page, pageblock_order); + return; + } +@@ -1549,6 +1586,7 @@ static void __init deferred_free_range(unsigned long pfn, + for (i = 0; i < nr_pages; i++, page++, pfn++) { + if ((pfn & (pageblock_nr_pages - 1)) == 0) + set_pageblock_migratetype(page, MIGRATE_MOVABLE); ++ __gather_extra_latent_entropy(page, 1); + __free_pages_core(page, 0); + } + } +@@ -1969,8 +2007,8 @@ static inline int check_new_page(struct page *page) static inline bool free_pages_prezeroed(void) { @@ -2001,7 +2023,7 @@ index 3222193c46c6..b8e36bed196d 100644 } #ifdef CONFIG_DEBUG_VM -@@ -1913,6 +1945,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags +@@ -2027,6 +2065,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags post_alloc_hook(page, order, gfp_flags); @@ -2014,10 +2036,10 @@ index 3222193c46c6..b8e36bed196d 100644 for (i = 0; i < (1 << order); i++) clear_highpage(page + i); diff --git a/mm/slab.h b/mm/slab.h -index 68bdf498da3b..079ff5df3b16 100644 +index 43ac818b8592..0f4fc66322c4 100644 --- a/mm/slab.h +++ b/mm/slab.h -@@ -313,7 +313,11 @@ static inline bool is_root_cache(struct kmem_cache *s) +@@ -310,7 +310,11 @@ static inline bool is_root_cache(struct kmem_cache *s) static inline bool slab_equal_or_root(struct kmem_cache *s, struct kmem_cache *p) { @@ -2029,7 +2051,7 @@ index 68bdf498da3b..079ff5df3b16 100644 } static inline const char *cache_name(struct kmem_cache *s) -@@ -365,18 +369,26 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) +@@ -362,18 +366,26 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) * to not do even the assignment. In that case, slab_equal_or_root * will also be a constant. */ @@ -2057,7 +2079,7 @@ index 68bdf498da3b..079ff5df3b16 100644 return s; } -@@ -401,7 +413,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s) +@@ -398,7 +410,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s) * back there or track user information then we can * only use the space before that information. */ @@ -2067,7 +2089,7 @@ index 68bdf498da3b..079ff5df3b16 100644 /* * Else we can use all the padding etc for the allocation diff --git a/mm/slab_common.c b/mm/slab_common.c -index 2296caf87bfb..7abd5a11e12d 100644 +index 58251ba63e4a..fbaacef2acaf 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -27,10 +27,10 @@ @@ -2093,10 +2115,10 @@ index 2296caf87bfb..7abd5a11e12d 100644 static int __init setup_slab_nomerge(char *str) { diff --git a/mm/slub.c b/mm/slub.c -index 51258eff4178..075266bfbccf 100644 +index d30ede89f4a6..37db8891a099 100644 --- a/mm/slub.c +++ b/mm/slub.c -@@ -125,6 +125,16 @@ static inline int kmem_cache_debug(struct kmem_cache *s) +@@ -124,6 +124,16 @@ static inline int kmem_cache_debug(struct kmem_cache *s) #endif } @@ -2113,7 +2135,7 @@ index 51258eff4178..075266bfbccf 100644 void *fixup_red_left(struct kmem_cache *s, void *p) { if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) -@@ -299,6 +309,35 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) +@@ -308,6 +318,35 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); } @@ -2149,7 +2171,7 @@ index 51258eff4178..075266bfbccf 100644 /* Loop over all objects in a slab */ #define for_each_object(__p, __s, __addr, __objects) \ for (__p = fixup_red_left(__s, __addr); \ -@@ -471,13 +510,13 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p) +@@ -475,13 +514,13 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p) * Debug settings: */ #if defined(CONFIG_SLUB_DEBUG_ON) @@ -2167,7 +2189,7 @@ index 51258eff4178..075266bfbccf 100644 /* * slub is about to manipulate internal object metadata. This memory lies -@@ -537,6 +576,9 @@ static struct track *get_track(struct kmem_cache *s, void *object, +@@ -542,6 +581,9 @@ static struct track *get_track(struct kmem_cache *s, void *object, else p = object + s->inuse; @@ -2177,7 +2199,7 @@ index 51258eff4178..075266bfbccf 100644 return p + alloc; } -@@ -676,6 +718,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) +@@ -681,6 +723,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) else off = s->inuse; @@ -2187,7 +2209,7 @@ index 51258eff4178..075266bfbccf 100644 if (s->flags & SLAB_STORE_USER) off += 2 * sizeof(struct track); -@@ -805,6 +850,9 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) +@@ -810,6 +855,9 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) /* Freepointer is placed after the object. */ off += sizeof(void *); @@ -2197,18 +2219,18 @@ index 51258eff4178..075266bfbccf 100644 if (s->flags & SLAB_STORE_USER) /* We also have user information there */ off += 2 * sizeof(struct track); -@@ -1419,8 +1467,9 @@ static void setup_object(struct kmem_cache *s, struct page *page, +@@ -1475,8 +1523,9 @@ static void *setup_object(struct kmem_cache *s, struct page *page, void *object) { setup_object_debug(s, page, object); + set_canary(s, object, s->random_inactive); - kasan_init_slab_obj(s, object); + object = kasan_init_slab_obj(s, object); - if (unlikely(s->ctor)) { + if (unlikely(s->ctor) && !has_sanitize_verify(s)) { kasan_unpoison_object_data(s, object); s->ctor(object); kasan_poison_object_data(s, object); -@@ -2702,9 +2751,21 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, +@@ -2750,9 +2799,21 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, stat(s, ALLOC_FASTPATH); } @@ -2231,7 +2253,7 @@ index 51258eff4178..075266bfbccf 100644 slab_post_alloc_hook(s, gfpflags, 1, &object); return object; -@@ -2911,6 +2972,27 @@ static __always_inline void do_slab_free(struct kmem_cache *s, +@@ -2959,6 +3020,27 @@ static __always_inline void do_slab_free(struct kmem_cache *s, void *tail_obj = tail ? : head; struct kmem_cache_cpu *c; unsigned long tid; @@ -2259,7 +2281,7 @@ index 51258eff4178..075266bfbccf 100644 redo: /* * Determine the currently cpus per cpu slab. -@@ -3087,7 +3169,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, +@@ -3135,7 +3217,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { struct kmem_cache_cpu *c; @@ -2268,7 +2290,7 @@ index 51258eff4178..075266bfbccf 100644 /* memcg and kmem_cache debug support */ s = slab_pre_alloc_hook(s, flags); -@@ -3124,13 +3206,29 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, +@@ -3172,13 +3254,29 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, local_irq_enable(); /* Clear memory outside IRQ disabled fastpath loop */ @@ -2299,7 +2321,7 @@ index 51258eff4178..075266bfbccf 100644 /* memcg and kmem_cache debug support */ slab_post_alloc_hook(s, flags, size, p); return i; -@@ -3162,9 +3260,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk); +@@ -3210,9 +3308,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk); * and increases the number of allocations possible without having to * take the list_lock. */ @@ -2312,15 +2334,15 @@ index 51258eff4178..075266bfbccf 100644 /* * Calculate the order of allocation given an slab object size. -@@ -3336,6 +3434,7 @@ static void early_kmem_cache_node_alloc(int node) +@@ -3380,6 +3478,7 @@ static void early_kmem_cache_node_alloc(int node) init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); init_tracking(kmem_cache_node, n); #endif + set_canary(kmem_cache_node, n, kmem_cache_node->random_active); - kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node), + n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node), GFP_KERNEL); - init_kmem_cache_node(n); -@@ -3492,6 +3591,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) + page->freelist = get_freepointer(kmem_cache_node, n); +@@ -3540,6 +3639,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) size += sizeof(void *); } @@ -2330,7 +2352,7 @@ index 51258eff4178..075266bfbccf 100644 #ifdef CONFIG_SLUB_DEBUG if (flags & SLAB_STORE_USER) /* -@@ -3561,6 +3663,10 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) +@@ -3612,6 +3714,10 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) #ifdef CONFIG_SLAB_FREELIST_HARDENED s->random = get_random_long(); #endif @@ -2341,7 +2363,7 @@ index 51258eff4178..075266bfbccf 100644 if (!calculate_sizes(s, -1)) goto error; -@@ -3837,6 +3943,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, +@@ -3887,6 +3993,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, offset -= s->red_left_pad; } @@ -2350,7 +2372,7 @@ index 51258eff4178..075266bfbccf 100644 /* Allow address range falling entirely within usercopy region. */ if (offset >= s->useroffset && offset - s->useroffset <= s->usersize && -@@ -3870,7 +3978,11 @@ static size_t __ksize(const void *object) +@@ -3920,7 +4028,11 @@ static size_t __ksize(const void *object) page = virt_to_head_page(object); if (unlikely(!PageSlab(page))) { @@ -2362,7 +2384,7 @@ index 51258eff4178..075266bfbccf 100644 return PAGE_SIZE << compound_order(page); } -@@ -4730,7 +4842,7 @@ enum slab_stat_type { +@@ -4777,7 +4889,7 @@ enum slab_stat_type { #define SO_TOTAL (1 << SL_TOTAL) #ifdef CONFIG_MEMCG @@ -2372,10 +2394,10 @@ index 51258eff4178..075266bfbccf 100644 static int __init setup_slub_memcg_sysfs(char *str) { diff --git a/mm/swap.c b/mm/swap.c -index 26fc9b5f1b6c..7c9312ca8982 100644 +index 301ed4e04320..cff1e4d6d04f 100644 --- a/mm/swap.c +++ b/mm/swap.c -@@ -93,6 +93,13 @@ static void __put_compound_page(struct page *page) +@@ -92,6 +92,13 @@ static void __put_compound_page(struct page *page) if (!PageHuge(page)) __page_cache_release(page); dtor = get_compound_page_dtor(page); @@ -2390,10 +2412,10 @@ index 26fc9b5f1b6c..7c9312ca8982 100644 } diff --git a/net/core/dev.c b/net/core/dev.c -index 559a91271f82..1e3b8deea1a7 100644 +index c6b2f6db0a9b..89b55db262a0 100644 --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -4259,7 +4259,7 @@ int netif_rx_ni(struct sk_buff *skb) +@@ -4588,7 +4588,7 @@ int netif_rx_ni(struct sk_buff *skb) } EXPORT_SYMBOL(netif_rx_ni); @@ -2402,7 +2424,7 @@ index 559a91271f82..1e3b8deea1a7 100644 { struct softnet_data *sd = this_cpu_ptr(&softnet_data); -@@ -5811,7 +5811,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) +@@ -6402,7 +6402,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) return work; } @@ -2412,7 +2434,7 @@ index 559a91271f82..1e3b8deea1a7 100644 struct softnet_data *sd = this_cpu_ptr(&softnet_data); unsigned long time_limit = jiffies + diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig -index 80dad301361d..0c85c2a29ac1 100644 +index 32cae39cdff6..9141d7ae99b2 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -266,6 +266,7 @@ config IP_PIMSM_V2 @@ -2423,19 +2445,48 @@ index 80dad301361d..0c85c2a29ac1 100644 ---help--- Normal TCP/IP networking is open to an attack known as "SYN flooding". This denial-of-service attack prevents legitimate remote +diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost +index 6b7f354f189a..360b015678a7 100644 +--- a/scripts/Makefile.modpost ++++ b/scripts/Makefile.modpost +@@ -78,6 +78,7 @@ modpost = scripts/mod/modpost \ + $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \ + $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \ + $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \ ++ $(if $(CONFIG_DEBUG_WRITABLE_FUNCTION_POINTERS_VERBOSE),-f) \ + $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) + + MODPOST_OPT=$(subst -i,-n,$(filter -i,$(MAKEFLAGS))) +diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig +index 74271dba4f94..21c842d26556 100644 +--- a/scripts/gcc-plugins/Kconfig ++++ b/scripts/gcc-plugins/Kconfig +@@ -59,6 +59,11 @@ config GCC_PLUGIN_LATENT_ENTROPY + is some slowdown of the boot process (about 0.5%) and fork and + irq processing. + ++ When extra_latent_entropy is passed on the kernel command line, ++ entropy will be extracted from up to the first 4GB of RAM while the ++ runtime memory allocator is being initialized. This costs even more ++ slowdown of the boot process. ++ + Note that entropy extracted this way is not cryptographically + secure! + diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c -index 1663fb19343a..4b44744fc1be 100644 +index f277e116e0eb..f93c582acc69 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c -@@ -35,6 +35,7 @@ static int vmlinux_section_warnings = 1; - static int warn_unresolved = 0; +@@ -36,6 +36,8 @@ static int warn_unresolved = 0; /* How a symbol is exported */ static int sec_mismatch_count = 0; -+static int writable_fptr_count = 0; - static int sec_mismatch_verbose = 1; static int sec_mismatch_fatal = 0; ++static int writable_fptr_count = 0; ++static int writable_fptr_verbose = 0; /* ignore missing files */ -@@ -954,6 +955,7 @@ enum mismatch { + static int ignore_missing_files; + +@@ -953,6 +955,7 @@ enum mismatch { ANY_EXIT_TO_ANY_INIT, EXPORT_TO_INIT_EXIT, EXTABLE_TO_NON_TEXT, @@ -2443,7 +2494,7 @@ index 1663fb19343a..4b44744fc1be 100644 }; /** -@@ -1080,6 +1082,12 @@ static const struct sectioncheck sectioncheck[] = { +@@ -1079,6 +1082,12 @@ static const struct sectioncheck sectioncheck[] = { .good_tosec = {ALL_TEXT_SECTIONS , NULL}, .mismatch = EXTABLE_TO_NON_TEXT, .handler = extable_mismatch_handler, @@ -2456,9 +2507,9 @@ index 1663fb19343a..4b44744fc1be 100644 } }; -@@ -1229,10 +1237,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr, +@@ -1266,10 +1275,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr, continue; - if (ELF_ST_TYPE(sym->st_info) == STT_SECTION) + if (!is_valid_name(elf, sym)) continue; - if (sym->st_value == addr) - return sym; @@ -2469,54 +2520,70 @@ index 1663fb19343a..4b44744fc1be 100644 if (d < 0) d = addr - sym->st_value; if (d < distance) { -@@ -1391,7 +1399,11 @@ static void report_sec_mismatch(const char *modname, +@@ -1404,7 +1413,13 @@ static void report_sec_mismatch(const char *modname, char *prl_from; char *prl_to; - sec_mismatch_count++; -+ if (mismatch->mismatch == DATA_TO_TEXT) ++ if (mismatch->mismatch == DATA_TO_TEXT) { + writable_fptr_count++; -+ else ++ if (!writable_fptr_verbose) ++ return; ++ } else { + sec_mismatch_count++; -+ - if (!sec_mismatch_verbose) - return; ++ } -@@ -1515,6 +1527,14 @@ static void report_sec_mismatch(const char *modname, + get_pretty_name(from_is_func, &from, &from_p); + get_pretty_name(to_is_func, &to, &to_p); +@@ -1526,6 +1541,12 @@ static void report_sec_mismatch(const char *modname, fatal("There's a special handler for this mismatch type, " "we should never get here."); break; + case DATA_TO_TEXT: -+#if 0 + fprintf(stderr, + "The %s %s:%s references\n" + "the %s %s:%s%s\n", + from, fromsec, fromsym, to, tosec, tosym, to_p); -+#endif + break; } fprintf(stderr, "\n"); } -@@ -2523,6 +2543,14 @@ int main(int argc, char **argv) - } - } +@@ -2428,7 +2449,7 @@ int main(int argc, char **argv) + struct ext_sym_list *extsym_iter; + struct ext_sym_list *extsym_start = NULL; + +- while ((opt = getopt(argc, argv, "i:I:e:mnsT:o:awE")) != -1) { ++ while ((opt = getopt(argc, argv, "i:I:e:fmnsT:o:awE")) != -1) { + switch (opt) { + case 'i': + kernel_read = optarg; +@@ -2445,6 +2466,9 @@ int main(int argc, char **argv) + extsym_iter->file = optarg; + extsym_start = extsym_iter; + break; ++ case 'f': ++ writable_fptr_verbose = 1; ++ break; + case 'm': + modversions = 1; + break; +@@ -2521,6 +2545,11 @@ int main(int argc, char **argv) + fatal("modpost: Section mismatches detected.\n" + "Set CONFIG_SECTION_MISMATCH_WARN_ONLY=y to allow them.\n"); free(buf.p); -+ if (writable_fptr_count) { -+ if (!sec_mismatch_verbose) { -+ warn("modpost: Found %d writable function pointer(s).\n" -+ "To see full details build your kernel with:\n" -+ "'make CONFIG_DEBUG_SECTION_MISMATCH=y'\n", -+ writable_fptr_count); -+ } -+ } ++ if (writable_fptr_count && !writable_fptr_verbose) ++ warn("modpost: Found %d writable function pointer%s.\n" ++ "To see full details build your kernel with:\n" ++ "'make CONFIG_DEBUG_WRITABLE_FUNCTION_POINTERS_VERBOSE=y'\n", ++ writable_fptr_count, (writable_fptr_count == 1 ? "" : "s")); return err; } diff --git a/security/Kconfig b/security/Kconfig -index c4302067a3ad..f5f5ff98e6b8 100644 +index 353cfef71d4e..d6b7f1d89985 100644 --- a/security/Kconfig +++ b/security/Kconfig -@@ -8,7 +8,7 @@ source security/keys/Kconfig +@@ -8,7 +8,7 @@ source "security/keys/Kconfig" config SECURITY_DMESG_RESTRICT bool "Restrict unprivileged access to the kernel syslog" @@ -2560,7 +2627,7 @@ index c4302067a3ad..f5f5ff98e6b8 100644 help This allows you to choose different security modules to be configured into your kernel. -@@ -48,6 +72,7 @@ config SECURITYFS +@@ -47,6 +71,7 @@ config SECURITYFS config SECURITY_NETWORK bool "Socket and Networking Security Hooks" depends on SECURITY @@ -2568,15 +2635,15 @@ index c4302067a3ad..f5f5ff98e6b8 100644 help This enables the socket and networking security hooks. If enabled, a security module can use these hooks to -@@ -155,6 +180,7 @@ config HARDENED_USERCOPY +@@ -153,6 +178,7 @@ config HARDENED_USERCOPY + bool "Harden memory copies between kernel and userspace" depends on HAVE_HARDENED_USERCOPY_ALLOCATOR - select BUG imply STRICT_DEVMEM + default y help This option checks for obviously wrong memory regions when copying memory to/from the kernel (via copy_to_user() and -@@ -167,7 +193,6 @@ config HARDENED_USERCOPY +@@ -165,7 +191,6 @@ config HARDENED_USERCOPY config HARDENED_USERCOPY_FALLBACK bool "Allow usercopy whitelist violations to fallback to object size" depends on HARDENED_USERCOPY @@ -2584,7 +2651,7 @@ index c4302067a3ad..f5f5ff98e6b8 100644 help This is a temporary option that allows missing usercopy whitelists to be discovered via a WARN() to the kernel log, instead of -@@ -192,10 +217,36 @@ config HARDENED_USERCOPY_PAGESPAN +@@ -190,10 +215,36 @@ config HARDENED_USERCOPY_PAGESPAN config FORTIFY_SOURCE bool "Harden common str/mem functions against buffer overflows" depends on ARCH_HAS_FORTIFY_SOURCE @@ -2622,7 +2689,7 @@ index c4302067a3ad..f5f5ff98e6b8 100644 bool "Force all usermode helper calls through a single binary" help diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig -index 8af7a690eb40..6539694b0fd3 100644 +index 55f032f1fc2d..7275fde1218d 100644 --- a/security/selinux/Kconfig +++ b/security/selinux/Kconfig @@ -2,7 +2,7 @@ config SECURITY_SELINUX @@ -2634,7 +2701,7 @@ index 8af7a690eb40..6539694b0fd3 100644 help This selects NSA Security-Enhanced Linux (SELinux). You will also need a policy configuration and a labeled filesystem. -@@ -79,23 +79,3 @@ config SECURITY_SELINUX_AVC_STATS +@@ -64,23 +64,3 @@ config SECURITY_SELINUX_AVC_STATS This option collects access vector cache statistics to /selinux/avc/cache_stats, which may be monitored via tools such as avcstat. @@ -2659,11 +2726,11 @@ index 8af7a690eb40..6539694b0fd3 100644 - - If you are unsure how to answer this question, answer 0. diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c -index 2b5ee5fbd652..99c7ed953d4e 100644 +index 614bc753822c..8c68df47483e 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c -@@ -135,18 +135,7 @@ __setup("selinux=", selinux_enabled_setup); - int selinux_enabled = 1; +@@ -134,18 +134,7 @@ static int __init selinux_enabled_setup(char *str) + __setup("selinux=", selinux_enabled_setup); #endif -static unsigned int selinux_checkreqprot_boot = @@ -2680,13 +2747,21 @@ index 2b5ee5fbd652..99c7ed953d4e 100644 -__setup("checkreqprot=", checkreqprot_setup); +static const unsigned int selinux_checkreqprot_boot; - static struct kmem_cache *sel_inode_cache; - static struct kmem_cache *file_security_cache; + /** + * selinux_secmark_enabled - Check to see if SECMARK is currently enabled diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c -index 79d3709b0671..4db06a12d48c 100644 +index 145ee62f205a..8a42d6a531a8 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c -@@ -664,10 +664,9 @@ static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf, +@@ -640,7 +640,6 @@ static ssize_t sel_read_checkreqprot(struct file *filp, char __user *buf, + static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) + { +- struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info; + char *page; + ssize_t length; + unsigned int new_value; +@@ -664,10 +663,9 @@ static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf, return PTR_ERR(page); length = -EINVAL; diff --git a/sys-kernel/linux-image-redcore/files/mute-pps_state_mismatch.patch b/sys-kernel/linux-image-redcore/files/5.1-mute-pps_state_mismatch.patch index 5bc1eff7..5bc1eff7 100644 --- a/sys-kernel/linux-image-redcore/files/mute-pps_state_mismatch.patch +++ b/sys-kernel/linux-image-redcore/files/5.1-mute-pps_state_mismatch.patch diff --git a/sys-kernel/linux-image-redcore/files/radeon_dp_aux_transfer_native-no-ratelimited_debug.patch b/sys-kernel/linux-image-redcore/files/5.1-radeon_dp_aux_transfer_native-no-ratelimited_debug.patch index 6ffcb42c..6ffcb42c 100644 --- a/sys-kernel/linux-image-redcore/files/radeon_dp_aux_transfer_native-no-ratelimited_debug.patch +++ b/sys-kernel/linux-image-redcore/files/5.1-radeon_dp_aux_transfer_native-no-ratelimited_debug.patch diff --git a/sys-kernel/linux-image-redcore/files/uksm-for-linux-hardened.patch b/sys-kernel/linux-image-redcore/files/5.1-uksm-linux-hardened.patch index d973274a..a79cb3d5 100644 --- a/sys-kernel/linux-image-redcore/files/uksm-for-linux-hardened.patch +++ b/sys-kernel/linux-image-redcore/files/5.1-uksm-linux-hardened.patch @@ -1,20 +1,6 @@ -diff -Nur a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX ---- a/Documentation/vm/00-INDEX 2018-08-24 12:04:51.000000000 +0100 -+++ b/Documentation/vm/00-INDEX 2018-08-27 10:44:36.340467187 +0100 -@@ -18,7 +18,9 @@ - - explains what hwpoison is - ksm.rst - - how to use the Kernel Samepage Merging feature. --mmu_notifier.rst -+uksm.txt -+ - Introduction to Ultra KSM -+mmu_notifier.txt - - a note about clearing pte/pmd and mmu notifications - numa.rst - - information about NUMA specific code in the Linux vm. diff -Nur a/Documentation/vm/uksm.txt b/Documentation/vm/uksm.txt --- a/Documentation/vm/uksm.txt 1970-01-01 01:00:00.000000000 +0100 -+++ b/Documentation/vm/uksm.txt 2018-08-27 10:44:36.340467187 +0100 ++++ b/Documentation/vm/uksm.txt 2019-07-07 09:32:50.451840235 +0100 @@ -0,0 +1,61 @@ +The Ultra Kernel Samepage Merging feature +---------------------------------------------- @@ -78,8 +64,8 @@ diff -Nur a/Documentation/vm/uksm.txt b/Documentation/vm/uksm.txt +2016-09-10 UKSM 0.1.2.5 Fix a bug in dedup ratio calculation. +2017-02-26 UKSM 0.1.2.6 Fix a bug in hugetlbpage handling and a race bug with page migration. diff -Nur a/fs/exec.c b/fs/exec.c ---- a/fs/exec.c 2018-08-27 10:42:48.184976507 +0100 -+++ b/fs/exec.c 2018-08-27 10:47:39.413380371 +0100 +--- a/fs/exec.c 2019-07-07 09:08:19.132347946 +0100 ++++ b/fs/exec.c 2019-07-07 09:33:47.653770486 +0100 @@ -63,6 +63,7 @@ #include <linux/compat.h> #include <linux/vmalloc.h> @@ -88,7 +74,7 @@ diff -Nur a/fs/exec.c b/fs/exec.c #include <linux/uaccess.h> #include <asm/mmu_context.h> -@@ -1381,6 +1382,7 @@ +@@ -1385,6 +1386,7 @@ /* An exec changes our domain. We are no longer part of the thread group */ current->self_exec_id++; @@ -97,9 +83,9 @@ diff -Nur a/fs/exec.c b/fs/exec.c } EXPORT_SYMBOL(setup_new_exec); diff -Nur a/fs/proc/meminfo.c b/fs/proc/meminfo.c ---- a/fs/proc/meminfo.c 2018-08-24 12:04:51.000000000 +0100 -+++ b/fs/proc/meminfo.c 2018-08-27 10:44:36.341467220 +0100 -@@ -105,6 +105,10 @@ +--- a/fs/proc/meminfo.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/fs/proc/meminfo.c 2019-07-07 09:32:50.451840235 +0100 +@@ -106,6 +106,10 @@ global_zone_page_state(NR_KERNEL_STACK_KB)); show_val_kb(m, "PageTables: ", global_zone_page_state(NR_PAGETABLE)); @@ -111,9 +97,9 @@ diff -Nur a/fs/proc/meminfo.c b/fs/proc/meminfo.c show_val_kb(m, "Quicklists: ", quicklist_total_size()); #endif diff -Nur a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h ---- a/include/asm-generic/pgtable.h 2018-08-24 12:04:51.000000000 +0100 -+++ b/include/asm-generic/pgtable.h 2018-08-27 10:44:36.341467220 +0100 -@@ -817,12 +817,25 @@ +--- a/include/asm-generic/pgtable.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/asm-generic/pgtable.h 2019-07-07 09:32:50.451840235 +0100 +@@ -855,12 +855,25 @@ extern void untrack_pfn_moved(struct vm_area_struct *vma); #endif @@ -140,7 +126,7 @@ diff -Nur a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h } #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) -@@ -831,7 +844,7 @@ +@@ -869,7 +882,7 @@ static inline int is_zero_pfn(unsigned long pfn) { extern unsigned long zero_pfn; @@ -150,33 +136,43 @@ diff -Nur a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h static inline unsigned long my_zero_pfn(unsigned long addr) diff -Nur a/include/linux/ksm.h b/include/linux/ksm.h ---- a/include/linux/ksm.h 2018-08-24 12:04:51.000000000 +0100 -+++ b/include/linux/ksm.h 2018-08-27 10:44:36.341467220 +0100 -@@ -21,21 +21,6 @@ +--- a/include/linux/ksm.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/ksm.h 2019-07-07 09:32:50.451840235 +0100 +@@ -1,4 +1,4 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ ++/* SPDX-License-Identifier: GPL-3.0 */ + #ifndef __LINUX_KSM_H + #define __LINUX_KSM_H + /* +@@ -21,20 +21,16 @@ #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); -int __ksm_enter(struct mm_struct *mm); -void __ksm_exit(struct mm_struct *mm); -- + -static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) --{ ++static inline struct stable_node *page_stable_node(struct page *page) + { - if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) - return __ksm_enter(mm); - return 0; --} -- ++ return PageKsm(page) ? page_rmapping(page) : NULL; + } + -static inline void ksm_exit(struct mm_struct *mm) --{ ++static inline void set_page_stable_node(struct page *page, ++ struct stable_node *stable_node) + { - if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) - __ksm_exit(mm); --} ++ page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); + } /* - * When do_swap_page() first faults in from swap what used to be a KSM page, -@@ -54,6 +39,46 @@ - void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); - void ksm_migrate_page(struct page *newpage, struct page *oldpage); +@@ -56,6 +52,33 @@ + bool reuse_ksm_page(struct page *page, + struct vm_area_struct *vma, unsigned long address); +#ifdef CONFIG_KSM_LEGACY +int __ksm_enter(struct mm_struct *mm); @@ -203,25 +199,12 @@ diff -Nur a/include/linux/ksm.h b/include/linux/ksm.h +static inline void ksm_exit(struct mm_struct *mm) +{ +} -+ -+static inline void set_page_stable_node(struct page *page, -+ struct stable_node *stable_node) -+{ -+ page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); -+} -+ -+static inline struct stable_node *page_stable_node(struct page *page) -+{ -+ return PageKsm(page) ? page_rmapping(page) : NULL; -+} -+ -+ +#endif /* !CONFIG_UKSM */ + #else /* !CONFIG_KSM */ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) -@@ -89,4 +114,6 @@ +@@ -96,4 +119,6 @@ #endif /* CONFIG_MMU */ #endif /* !CONFIG_KSM */ @@ -229,9 +212,9 @@ diff -Nur a/include/linux/ksm.h b/include/linux/ksm.h + #endif /* __LINUX_KSM_H */ diff -Nur a/include/linux/mm_types.h b/include/linux/mm_types.h ---- a/include/linux/mm_types.h 2018-08-24 12:04:51.000000000 +0100 -+++ b/include/linux/mm_types.h 2018-08-27 10:44:36.342467252 +0100 -@@ -320,6 +320,9 @@ +--- a/include/linux/mm_types.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/mm_types.h 2019-07-07 09:32:50.451840235 +0100 +@@ -334,6 +334,9 @@ struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; @@ -242,8 +225,8 @@ diff -Nur a/include/linux/mm_types.h b/include/linux/mm_types.h struct core_thread { diff -Nur a/include/linux/mmzone.h b/include/linux/mmzone.h ---- a/include/linux/mmzone.h 2018-08-24 12:04:51.000000000 +0100 -+++ b/include/linux/mmzone.h 2018-08-27 10:44:36.342467252 +0100 +--- a/include/linux/mmzone.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/mmzone.h 2019-07-07 09:32:50.451840235 +0100 @@ -148,6 +148,9 @@ NR_ZSPAGES, /* allocated in zsmalloc */ #endif @@ -254,18 +237,9 @@ diff -Nur a/include/linux/mmzone.h b/include/linux/mmzone.h NR_VM_ZONE_STAT_ITEMS }; enum node_stat_item { -@@ -865,7 +868,7 @@ - } - - /** -- * is_highmem - helper function to quickly check if a struct zone is a -+ * is_highmem - helper function to quickly check if a struct zone is a - * highmem zone or not. This is an attempt to keep references - * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. - * @zone - pointer to struct zone variable diff -Nur a/include/linux/sradix-tree.h b/include/linux/sradix-tree.h --- a/include/linux/sradix-tree.h 1970-01-01 01:00:00.000000000 +0100 -+++ b/include/linux/sradix-tree.h 2018-08-27 10:44:36.343467284 +0100 ++++ b/include/linux/sradix-tree.h 2019-07-07 09:32:50.451840235 +0100 @@ -0,0 +1,77 @@ +#ifndef _LINUX_SRADIX_TREE_H +#define _LINUX_SRADIX_TREE_H @@ -346,7 +320,7 @@ diff -Nur a/include/linux/sradix-tree.h b/include/linux/sradix-tree.h +#endif /* _LINUX_SRADIX_TREE_H */ diff -Nur a/include/linux/uksm.h b/include/linux/uksm.h --- a/include/linux/uksm.h 1970-01-01 01:00:00.000000000 +0100 -+++ b/include/linux/uksm.h 2018-08-27 10:44:36.343467284 +0100 ++++ b/include/linux/uksm.h 2019-07-07 09:32:50.451840235 +0100 @@ -0,0 +1,149 @@ +#ifndef __LINUX_UKSM_H +#define __LINUX_UKSM_H @@ -498,9 +472,9 @@ diff -Nur a/include/linux/uksm.h b/include/linux/uksm.h +#endif /* !CONFIG_UKSM */ +#endif /* __LINUX_UKSM_H */ diff -Nur a/kernel/fork.c b/kernel/fork.c ---- a/kernel/fork.c 2018-08-27 10:42:48.208977282 +0100 -+++ b/kernel/fork.c 2018-08-27 10:44:36.344467317 +0100 -@@ -542,7 +542,7 @@ +--- a/kernel/fork.c 2019-07-07 09:08:19.152348621 +0100 ++++ b/kernel/fork.c 2019-07-07 09:32:50.451840235 +0100 +@@ -584,7 +584,7 @@ __vma_link_rb(mm, tmp, rb_link, rb_parent); rb_link = &tmp->vm_rb.rb_right; rb_parent = &tmp->vm_rb; @@ -510,20 +484,20 @@ diff -Nur a/kernel/fork.c b/kernel/fork.c if (!(tmp->vm_flags & VM_WIPEONFORK)) retval = copy_page_range(mm, oldmm, mpnt); diff -Nur a/lib/Makefile b/lib/Makefile ---- a/lib/Makefile 2018-08-24 12:04:51.000000000 +0100 -+++ b/lib/Makefile 2018-08-27 10:44:36.344467317 +0100 -@@ -18,7 +18,7 @@ - KCOV_INSTRUMENT_dynamic_debug.o := n +--- a/lib/Makefile 2019-06-25 04:34:56.000000000 +0100 ++++ b/lib/Makefile 2019-07-07 09:32:50.451840235 +0100 +@@ -29,7 +29,7 @@ + endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ -- rbtree.o radix-tree.o timerqueue.o\ -+ rbtree.o radix-tree.o sradix-tree.o timerqueue.o\ +- rbtree.o radix-tree.o timerqueue.o xarray.o \ ++ rbtree.o radix-tree.o sradix-tree.o timerqueue.o xarray.o \ idr.o int_sqrt.o extable.o \ - sha1.o chacha20.o irq_regs.o argv_split.o \ + sha1.o chacha.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ diff -Nur a/lib/sradix-tree.c b/lib/sradix-tree.c --- a/lib/sradix-tree.c 1970-01-01 01:00:00.000000000 +0100 -+++ b/lib/sradix-tree.c 2018-08-27 10:44:36.344467317 +0100 ++++ b/lib/sradix-tree.c 2019-07-07 09:32:50.451840235 +0100 @@ -0,0 +1,476 @@ +#include <linux/errno.h> +#include <linux/mm.h> @@ -1002,9 +976,9 @@ diff -Nur a/lib/sradix-tree.c b/lib/sradix-tree.c + return 0; +} diff -Nur a/mm/Kconfig b/mm/Kconfig ---- a/mm/Kconfig 2018-08-27 10:42:48.219977637 +0100 -+++ b/mm/Kconfig 2018-08-27 10:44:36.345467349 +0100 -@@ -308,6 +308,32 @@ +--- a/mm/Kconfig 2019-07-07 09:08:19.162348955 +0100 ++++ b/mm/Kconfig 2019-07-07 09:32:50.451840235 +0100 +@@ -302,6 +302,32 @@ See Documentation/vm/ksm.rst for more information: KSM is inactive until a program has madvised that an area is MADV_MERGEABLE, and root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). @@ -1037,10 +1011,31 @@ diff -Nur a/mm/Kconfig b/mm/Kconfig config DEFAULT_MMAP_MIN_ADDR int "Low address space to protect from user allocation" +diff -Nur a/mm/ksm.c b/mm/ksm.c +--- a/mm/ksm.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/mm/ksm.c 2019-07-07 09:32:50.461840572 +0100 +@@ -858,17 +858,6 @@ + return err; + } + +-static inline struct stable_node *page_stable_node(struct page *page) +-{ +- return PageKsm(page) ? page_rmapping(page) : NULL; +-} +- +-static inline void set_page_stable_node(struct page *page, +- struct stable_node *stable_node) +-{ +- page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); +-} +- + #ifdef CONFIG_SYSFS + /* + * Only called through the sysfs control interface: diff -Nur a/mm/Makefile b/mm/Makefile ---- a/mm/Makefile 2018-08-24 12:04:51.000000000 +0100 -+++ b/mm/Makefile 2018-08-27 10:44:36.345467349 +0100 -@@ -65,7 +65,8 @@ +--- a/mm/Makefile 2019-06-25 04:34:56.000000000 +0100 ++++ b/mm/Makefile 2019-07-07 09:32:50.451840235 +0100 +@@ -58,7 +58,8 @@ obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o obj-$(CONFIG_SLOB) += slob.o obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o @@ -1051,9 +1046,9 @@ diff -Nur a/mm/Makefile b/mm/Makefile obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLUB) += slub.o diff -Nur a/mm/memory.c b/mm/memory.c ---- a/mm/memory.c 2018-08-24 12:04:51.000000000 +0100 -+++ b/mm/memory.c 2018-08-27 10:44:36.346467381 +0100 -@@ -128,6 +128,25 @@ +--- a/mm/memory.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/mm/memory.c 2019-07-07 09:32:50.461840572 +0100 +@@ -129,6 +129,25 @@ unsigned long highest_memmap_pfn __read_mostly; @@ -1079,7 +1074,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c /* * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() */ -@@ -139,6 +158,7 @@ +@@ -140,6 +159,7 @@ core_initcall(init_zero_pfn); @@ -1087,7 +1082,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c #if defined(SPLIT_RSS_COUNTING) void sync_mm_rss(struct mm_struct *mm) -@@ -1035,6 +1055,9 @@ +@@ -794,6 +814,9 @@ get_page(page); page_dup_rmap(page, false); rss[mm_counter(page)]++; @@ -1097,7 +1092,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c } else if (pte_devmap(pte)) { page = pte_page(pte); -@@ -1048,6 +1071,8 @@ +@@ -807,6 +830,8 @@ page_dup_rmap(page, false); rss[mm_counter(page)]++; } @@ -1106,7 +1101,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c } out_set_pte: -@@ -1317,8 +1342,10 @@ +@@ -1075,8 +1100,10 @@ ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); tlb_remove_tlb_entry(tlb, pte, addr); @@ -1118,7 +1113,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c if (!PageAnon(page)) { if (pte_dirty(ptent)) { -@@ -2360,8 +2387,10 @@ +@@ -2117,8 +2144,10 @@ clear_page(kaddr); kunmap_atomic(kaddr); flush_dcache_page(dst); @@ -1130,7 +1125,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c } static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) -@@ -2510,6 +2539,7 @@ +@@ -2266,6 +2295,7 @@ vmf->address); if (!new_page) goto oom; @@ -1138,7 +1133,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c } else { new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); -@@ -2536,7 +2566,9 @@ +@@ -2294,7 +2324,9 @@ mm_counter_file(old_page)); inc_mm_counter_fast(mm, MM_ANONPAGES); } @@ -1149,25 +1144,25 @@ diff -Nur a/mm/memory.c b/mm/memory.c } flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); diff -Nur a/mm/mmap.c b/mm/mmap.c ---- a/mm/mmap.c 2018-08-27 10:42:48.220977669 +0100 -+++ b/mm/mmap.c 2018-08-27 10:44:36.348467446 +0100 -@@ -45,6 +45,7 @@ - #include <linux/moduleparam.h> +--- a/mm/mmap.c 2019-07-07 09:08:19.162348955 +0100 ++++ b/mm/mmap.c 2019-07-07 09:34:22.544947675 +0100 +@@ -46,6 +46,7 @@ #include <linux/pkeys.h> #include <linux/oom.h> + #include <linux/sched/mm.h> +#include <linux/ksm.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> -@@ -182,6 +183,7 @@ +@@ -183,6 +184,7 @@ if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); -+ uksm_remove_vma(vma); ++ uksm_remove_vma(vma); vm_area_free(vma); return next; } -@@ -708,9 +710,16 @@ +@@ -733,9 +735,16 @@ long adjust_next = 0; int remove_next = 0; @@ -1184,7 +1179,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c if (end >= next->vm_end) { /* * vma expands, overlapping all the next, and -@@ -843,6 +852,7 @@ +@@ -868,6 +877,7 @@ end_changed = true; } vma->vm_pgoff = pgoff; @@ -1192,7 +1187,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c if (adjust_next) { next->vm_start += adjust_next << PAGE_SHIFT; next->vm_pgoff += adjust_next; -@@ -948,6 +958,7 @@ +@@ -973,6 +983,7 @@ if (remove_next == 2) { remove_next = 1; end = next->vm_end; @@ -1200,7 +1195,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c goto again; } else if (next) -@@ -974,10 +985,14 @@ +@@ -999,10 +1010,14 @@ */ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); } @@ -1215,7 +1210,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c validate_mm(mm); return 0; -@@ -1434,6 +1449,9 @@ +@@ -1459,6 +1474,9 @@ vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; @@ -1225,7 +1220,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; -@@ -1798,6 +1816,7 @@ +@@ -1823,6 +1841,7 @@ allow_write_access(file); } file = vma->vm_file; @@ -1233,7 +1228,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c out: perf_event_mmap(vma); -@@ -1839,6 +1858,7 @@ +@@ -1865,6 +1884,7 @@ if (vm_flags & VM_DENYWRITE) allow_write_access(file); free_vma: @@ -1241,7 +1236,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c vm_area_free(vma); unacct_error: if (charged) -@@ -2658,6 +2678,8 @@ +@@ -2697,6 +2717,8 @@ else err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); @@ -1250,7 +1245,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c /* Success. */ if (!err) return 0; -@@ -2943,6 +2965,7 @@ +@@ -3001,6 +3023,7 @@ if ((flags & (~VM_EXEC)) != 0) return -EINVAL; flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; @@ -1258,7 +1253,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); if (offset_in_page(error)) -@@ -2999,6 +3022,7 @@ +@@ -3051,6 +3074,7 @@ vma->vm_flags = flags; vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); @@ -1266,7 +1261,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; -@@ -3078,6 +3102,12 @@ +@@ -3128,6 +3152,12 @@ up_write(&mm->mmap_sem); } @@ -1279,7 +1274,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c if (mm->locked_vm) { vma = mm->mmap; while (vma) { -@@ -3112,6 +3142,11 @@ +@@ -3162,6 +3192,11 @@ vma = remove_vma(vma); } vm_unacct_memory(nr_accounted); @@ -1291,7 +1286,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c } /* Insert vm structure into process list sorted by address -@@ -3219,6 +3254,7 @@ +@@ -3269,6 +3304,7 @@ new_vma->vm_ops->open(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); *need_rmap_locks = false; @@ -1299,7 +1294,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c } return new_vma; -@@ -3369,6 +3405,7 @@ +@@ -3419,6 +3455,7 @@ vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); perf_event_mmap(vma); @@ -1307,25 +1302,10 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c return vma; -diff -Nur a/mm/rmap.c b/mm/rmap.c ---- a/mm/rmap.c 2018-08-24 12:04:51.000000000 +0100 -+++ b/mm/rmap.c 2018-08-27 10:44:36.348467446 +0100 -@@ -1017,9 +1017,9 @@ - - /** - * __page_set_anon_rmap - set up new anonymous rmap -- * @page: Page to add to rmap -+ * @page: Page to add to rmap - * @vma: VM area to add page to. -- * @address: User virtual address of the mapping -+ * @address: User virtual address of the mapping - * @exclusive: the page is exclusively owned by the current process - */ - static void __page_set_anon_rmap(struct page *page, diff -Nur a/mm/uksm.c b/mm/uksm.c --- a/mm/uksm.c 1970-01-01 01:00:00.000000000 +0100 -+++ b/mm/uksm.c 2018-08-27 10:44:36.351467543 +0100 -@@ -0,0 +1,5584 @@ ++++ b/mm/uksm.c 2019-07-07 09:32:50.461840572 +0100 +@@ -0,0 +1,5580 @@ +/* + * Ultra KSM. Copyright (C) 2011-2012 Nai Xia + * @@ -2821,10 +2801,9 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c + .page = page, + .vma = vma, + }; ++ struct mmu_notifier_range range; + int swapped; + int err = -EFAULT; -+ unsigned long mmun_start; /* For mmu_notifiers */ -+ unsigned long mmun_end; /* For mmu_notifiers */ + + pvmw.address = page_address_in_vma(page, vma); + if (pvmw.address == -EFAULT) @@ -2832,9 +2811,8 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c + + BUG_ON(PageTransCompound(page)); + -+ mmun_start = pvmw.address; -+ mmun_end = pvmw.address + PAGE_SIZE; -+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); ++ mmu_notifier_range_init(&range, mm, pvmw.address, pvmw.address + PAGE_SIZE); ++ mmu_notifier_invalidate_range_start(&range); + + if (!page_vma_mapped_walk(&pvmw)) + goto out_mn; @@ -2884,7 +2862,7 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c +out_unlock: + page_vma_mapped_walk_done(&pvmw); +out_mn: -+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); ++ mmu_notifier_invalidate_range_end(&range); +out: + return err; +} @@ -2908,6 +2886,7 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c + struct page *kpage, pte_t orig_pte) +{ + struct mm_struct *mm = vma->vm_mm; ++ struct mmu_notifier_range range; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; @@ -2918,8 +2897,6 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c + + unsigned long addr; + int err = MERGE_ERR_PGERR; -+ unsigned long mmun_start; /* For mmu_notifiers */ -+ unsigned long mmun_end; /* For mmu_notifiers */ + + addr = page_address_in_vma(page, vma); + if (addr == -EFAULT) @@ -2939,9 +2916,8 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c + if (!pmd_present(*pmd)) + goto out; + -+ mmun_start = addr; -+ mmun_end = addr + PAGE_SIZE; -+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); ++ mmu_notifier_range_init(&range, mm, addr, addr + PAGE_SIZE); ++ mmu_notifier_invalidate_range_start(&range); + + ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!pte_same(*ptep, orig_pte)) { @@ -2974,7 +2950,7 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c + pte_unmap_unlock(ptep, ptl); + err = 0; +out_mn: -+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); ++ mmu_notifier_invalidate_range_end(&range); +out: + return err; +} @@ -6911,11 +6887,11 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c +#endif + diff -Nur a/mm/vmstat.c b/mm/vmstat.c ---- a/mm/vmstat.c 2018-08-24 12:04:51.000000000 +0100 -+++ b/mm/vmstat.c 2018-08-27 10:44:36.352467575 +0100 -@@ -1163,6 +1163,9 @@ +--- a/mm/vmstat.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/mm/vmstat.c 2019-07-07 09:32:50.461840572 +0100 +@@ -1165,6 +1165,9 @@ "nr_written", - "", /* nr_indirectly_reclaimable */ + "nr_kernel_misc_reclaimable", +#ifdef CONFIG_UKSM + "nr_uksm_zero_pages", diff --git a/sys-kernel/linux-image-redcore/files/nouveau-pascal-backlight.patch b/sys-kernel/linux-image-redcore/files/nouveau-pascal-backlight.patch deleted file mode 100644 index 754d982a..00000000 --- a/sys-kernel/linux-image-redcore/files/nouveau-pascal-backlight.patch +++ /dev/null @@ -1,11 +0,0 @@ -diff -up linux-4.16/drivers/gpu/drm/nouveau/nouveau_backlight.c.omv~ linux-4.16/drivers/gpu/drm/nouveau/nouveau_backlight.c ---- linux-4.16/drivers/gpu/drm/nouveau/nouveau_backlight.c.omv~ 2018-04-06 01:04:34.573357055 +0200 -+++ linux-4.16/drivers/gpu/drm/nouveau/nouveau_backlight.c 2018-04-06 01:05:46.985579248 +0200 -@@ -287,6 +287,7 @@ nouveau_backlight_init(struct drm_device - case NV_DEVICE_INFO_V0_FERMI: - case NV_DEVICE_INFO_V0_KEPLER: - case NV_DEVICE_INFO_V0_MAXWELL: -+ case NV_DEVICE_INFO_V0_PASCAL: - return nv50_backlight_init(connector); - default: - break; diff --git a/sys-kernel/linux-image-redcore/files/revert-patches-causing-instant-reboot.patch b/sys-kernel/linux-image-redcore/files/revert-patches-causing-instant-reboot.patch deleted file mode 100644 index a2127cff..00000000 --- a/sys-kernel/linux-image-redcore/files/revert-patches-causing-instant-reboot.patch +++ /dev/null @@ -1,314 +0,0 @@ -diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S -index 8169e8b7a4dc..12915511be61 100644 ---- a/arch/x86/boot/compressed/head_64.S -+++ b/arch/x86/boot/compressed/head_64.S -@@ -305,48 +305,13 @@ ENTRY(startup_64) - /* Set up the stack */ - leaq boot_stack_end(%rbx), %rsp - -- /* -- * paging_prepare() and cleanup_trampoline() below can have GOT -- * references. Adjust the table with address we are running at. -- * -- * Zero RAX for adjust_got: the GOT was not adjusted before; -- * there's no adjustment to undo. -- */ -- xorq %rax, %rax -- -- /* -- * Calculate the address the binary is loaded at and use it as -- * a GOT adjustment. -- */ -- call 1f --1: popq %rdi -- subq $1b, %rdi -- -- call adjust_got -- - /* - * At this point we are in long mode with 4-level paging enabled, -- * but we might want to enable 5-level paging or vice versa. -- * -- * The problem is that we cannot do it directly. Setting or clearing -- * CR4.LA57 in long mode would trigger #GP. So we need to switch off -- * long mode and paging first. -- * -- * We also need a trampoline in lower memory to switch over from -- * 4- to 5-level paging for cases when the bootloader puts the kernel -- * above 4G, but didn't enable 5-level paging for us. -- * -- * The same trampoline can be used to switch from 5- to 4-level paging -- * mode, like when starting 4-level paging kernel via kexec() when -- * original kernel worked in 5-level paging mode. -- * -- * For the trampoline, we need the top page table to reside in lower -- * memory as we don't have a way to load 64-bit values into CR3 in -- * 32-bit mode. -+ * but we want to enable 5-level paging. - * -- * We go though the trampoline even if we don't have to: if we're -- * already in a desired paging mode. This way the trampoline code gets -- * tested on every boot. -+ * The problem is that we cannot do it directly. Setting LA57 in -+ * long mode would trigger #GP. So we need to switch off long mode -+ * first. - */ - - /* Make sure we have GDT with 32-bit code segment */ -@@ -371,32 +336,40 @@ ENTRY(startup_64) - /* Save the trampoline address in RCX */ - movq %rax, %rcx - -+ /* Check if we need to enable 5-level paging */ -+ cmpq $0, %rdx -+ jz lvl5 -+ -+ /* Clear additional page table */ -+ leaq lvl5_pgtable(%rbx), %rdi -+ xorq %rax, %rax -+ movq $(PAGE_SIZE/8), %rcx -+ rep stosq -+ - /* -- * Load the address of trampoline_return() into RDI. -- * It will be used by the trampoline to return to the main code. -+ * Setup current CR3 as the first and only entry in a new top level -+ * page table. - */ -- leaq trampoline_return(%rip), %rdi -+ movq %cr3, %rdi -+ leaq 0x7 (%rdi), %rax -+ movq %rax, lvl5_pgtable(%rbx) - - /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */ - pushq $__KERNEL32_CS -- leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax -+ leaq compatible_mode(%rip), %rax - pushq %rax - lretq --trampoline_return: -+lvl5: - /* Restore the stack, the 32-bit trampoline uses its own stack */ - leaq boot_stack_end(%rbx), %rsp - - /* - * cleanup_trampoline() would restore trampoline memory. - * -- * RDI is address of the page table to use instead of page table -- * in trampoline memory (if required). -- * - * RSI holds real mode data and needs to be preserved across - * this function call. - */ - pushq %rsi -- leaq top_pgtable(%rbx), %rdi - call cleanup_trampoline - popq %rsi - -@@ -404,21 +377,6 @@ trampoline_return: - pushq $0 - popfq - -- /* -- * Previously we've adjusted the GOT with address the binary was -- * loaded at. Now we need to re-adjust for relocation address. -- * -- * Calculate the address the binary is loaded at, so that we can -- * undo the previous GOT adjustment. -- */ -- call 1f --1: popq %rax -- subq $1b, %rax -- -- /* The new adjustment is the relocation address */ -- movq %rbx, %rdi -- call adjust_got -- - /* - * Copy the compressed kernel to the end of our buffer - * where decompression in place becomes safe. -@@ -519,6 +477,19 @@ relocated: - shrq $3, %rcx - rep stosq - -+/* -+ * Adjust our own GOT -+ */ -+ leaq _got(%rip), %rdx -+ leaq _egot(%rip), %rcx -+1: -+ cmpq %rcx, %rdx -+ jae 2f -+ addq %rbx, (%rdx) -+ addq $8, %rdx -+ jmp 1b -+2: -+ - /* - * Do the extraction, and jump to the new kernel.. - */ -@@ -537,36 +508,9 @@ relocated: - */ - jmp *%rax - --/* -- * Adjust the global offset table -- * -- * RAX is the previous adjustment of the table to undo (use 0 if it's the -- * first time we touch GOT). -- * RDI is the new adjustment to apply. -- */ --adjust_got: -- /* Walk through the GOT adding the address to the entries */ -- leaq _got(%rip), %rdx -- leaq _egot(%rip), %rcx --1: -- cmpq %rcx, %rdx -- jae 2f -- subq %rax, (%rdx) /* Undo previous adjustment */ -- addq %rdi, (%rdx) /* Apply the new adjustment */ -- addq $8, %rdx -- jmp 1b --2: -- ret -- - .code32 --/* -- * This is the 32-bit trampoline that will be copied over to low memory. -- * -- * RDI contains the return address (might be above 4G). -- * ECX contains the base address of the trampoline memory. -- * Non zero RDX on return means we need to enable 5-level paging. -- */ - ENTRY(trampoline_32bit_src) -+compatible_mode: - /* Set up data and stack segments */ - movl $__KERNEL_DS, %eax - movl %eax, %ds -@@ -580,61 +524,33 @@ ENTRY(trampoline_32bit_src) - btrl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - -- /* Check what paging mode we want to be in after the trampoline */ -- cmpl $0, %edx -- jz 1f -+ /* Point CR3 to 5-level paging */ -+ leal lvl5_pgtable(%ebx), %eax -+ movl %eax, %cr3 - -- /* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */ -+ /* Enable PAE and LA57 mode */ - movl %cr4, %eax -- testl $X86_CR4_LA57, %eax -- jnz 3f -- jmp 2f --1: -- /* We want 4-level paging: don't touch CR3 if it already points to 4-level page tables */ -- movl %cr4, %eax -- testl $X86_CR4_LA57, %eax -- jz 3f --2: -- /* Point CR3 to the trampoline's new top level page table */ -- leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax -- movl %eax, %cr3 --3: -- /* Enable PAE and LA57 (if required) paging modes */ -- movl $X86_CR4_PAE, %eax -- cmpl $0, %edx -- jz 1f -- orl $X86_CR4_LA57, %eax --1: -+ orl $(X86_CR4_PAE | X86_CR4_LA57), %eax - movl %eax, %cr4 - -- /* Calculate address of paging_enabled() once we are executing in the trampoline */ -- leal paging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax -+ /* Calculate address we are running at */ -+ call 1f -+1: popl %edi -+ subl $1b, %edi - -- /* Prepare the stack for far return to Long Mode */ -+ /* Prepare stack for far return to Long Mode */ - pushl $__KERNEL_CS -- pushl %eax -+ leal lvl5(%edi), %eax -+ push %eax - -- /* Enable paging again */ -+ /* Enable paging back */ - movl $(X86_CR0_PG | X86_CR0_PE), %eax - movl %eax, %cr0 - - lret - -- .code64 --paging_enabled: -- /* Return from the trampoline */ -- jmp *%rdi -- -- /* -- * The trampoline code has a size limit. -- * Make sure we fail to compile if the trampoline code grows -- * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes. -- */ -- .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE -- -- .code32 - no_longmode: -- /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */ -+ /* This isn't an x86-64 CPU so hang */ - 1: - hlt - jmp 1b -@@ -695,10 +611,5 @@ boot_stack_end: - .balign 4096 - pgtable: - .fill BOOT_PGT_SIZE, 1, 0 -- --/* -- * The page table is going to be used instead of page table in the trampoline -- * memory. -- */ --top_pgtable: -+lvl5_pgtable: - .fill PAGE_SIZE, 1, 0 -diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c -index a362fa0b849c..32af1cbcd903 100644 ---- a/arch/x86/boot/compressed/pgtable_64.c -+++ b/arch/x86/boot/compressed/pgtable_64.c -@@ -22,6 +22,14 @@ struct paging_config { - /* Buffer to preserve trampoline memory */ - static char trampoline_save[TRAMPOLINE_32BIT_SIZE]; - -+/* -+ * The page table is going to be used instead of page table in the trampoline -+ * memory. -+ * -+ * It must not be in BSS as BSS is cleared after cleanup_trampoline(). -+ */ -+static char top_pgtable[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data); -+ - /* - * Trampoline address will be printed by extract_kernel() for debugging - * purposes. -@@ -126,7 +134,7 @@ struct paging_config paging_prepare(void) - return paging_config; - } - --void cleanup_trampoline(void *pgtable) -+void cleanup_trampoline(void) - { - void *trampoline_pgtable; - -@@ -137,8 +145,8 @@ void cleanup_trampoline(void *pgtable) - * if it's there. - */ - if ((void *)__native_read_cr3() == trampoline_pgtable) { -- memcpy(pgtable, trampoline_pgtable, PAGE_SIZE); -- native_write_cr3((unsigned long)pgtable); -+ memcpy(top_pgtable, trampoline_pgtable, PAGE_SIZE); -+ native_write_cr3((unsigned long)top_pgtable); - } - - /* Restore trampoline memory */ diff --git a/sys-kernel/linux-image-redcore/files/workaround-BIOS-bugs-on-CCP-SVE-ryzen-TR.patch b/sys-kernel/linux-image-redcore/files/workaround-BIOS-bugs-on-CCP-SVE-ryzen-TR.patch deleted file mode 100644 index 1d1ae0e3..00000000 --- a/sys-kernel/linux-image-redcore/files/workaround-BIOS-bugs-on-CCP-SVE-ryzen-TR.patch +++ /dev/null @@ -1,85 +0,0 @@ -diff -up linux-4.18/drivers/crypto/ccp/psp-dev.c.0333~ linux-4.18/drivers/crypto/ccp/psp-dev.c ---- linux-4.18/drivers/crypto/ccp/psp-dev.c.0333~ 2018-08-21 19:32:36.457890957 +0200 -+++ linux-4.18/drivers/crypto/ccp/psp-dev.c 2018-08-21 19:35:43.283028769 +0200 -@@ -38,6 +38,17 @@ static DEFINE_MUTEX(sev_cmd_mutex); - static struct sev_misc_dev *misc_dev; - static struct psp_device *psp_master; - -+static int psp_cmd_timeout = 100; -+module_param(psp_cmd_timeout, int, 0644); -+MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); -+ -+static int psp_probe_timeout = 5; -+module_param(psp_probe_timeout, int, 0644); -+MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); -+ -+static bool psp_dead; -+static int psp_timeout; -+ - static struct psp_device *psp_alloc_struct(struct sp_device *sp) - { - struct device *dev = sp->dev; -@@ -82,10 +93,15 @@ done: - return IRQ_HANDLED; - } - --static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg) -+static int sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg, unsigned int timeout) - { -- wait_event(psp->sev_int_queue, psp->sev_int_rcvd); -+ int ret; -+ ret = wait_event_timeout(psp->sev_int_queue, psp->sev_int_rcvd, -+ timeout * HZ); -+ if (!ret) -+ return -ETIMEDOUT; - *reg = ioread32(psp->io_regs + PSP_CMDRESP); -+ return 0; - } - - static int sev_cmd_buffer_len(int cmd) -@@ -133,12 +149,15 @@ static int __sev_do_cmd_locked(int cmd, - if (!psp) - return -ENODEV; - -+ if (psp_dead) -+ return -EBUSY; -+ - /* Get the physical address of the command buffer */ - phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; - phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; - -- dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n", -- cmd, phys_msb, phys_lsb); -+ dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", -+ cmd, phys_msb, phys_lsb, psp_timeout); - - print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, - sev_cmd_buffer_len(cmd), false); -@@ -154,7 +173,17 @@ static int __sev_do_cmd_locked(int cmd, - iowrite32(reg, psp->io_regs + PSP_CMDRESP); - - /* wait for command completion */ -- sev_wait_cmd_ioc(psp, ®); -+ ret = sev_wait_cmd_ioc(psp, ®, psp_timeout); -+ if (ret) { -+ if (psp_ret) -+ *psp_ret = 0; -+ -+ dev_err(psp->dev, "sev command %#x timed out, disabling PSP\n", cmd); -+ psp_dead = true; -+ return ret; -+ } -+ -+ psp_timeout = psp_cmd_timeout; - - if (psp_ret) - *psp_ret = reg & PSP_CMDRESP_ERR_MASK; -@@ -886,6 +915,8 @@ void psp_pci_init(void) - - psp_master = sp->psp_data; - -+ psp_timeout = psp_probe_timeout; -+ - if (sev_get_api_version()) - goto err; - diff --git a/sys-kernel/linux-image-redcore/linux-image-redcore-4.18.5.ebuild b/sys-kernel/linux-image-redcore/linux-image-redcore-5.1.15.ebuild index 2100ebc1..d5062a97 100644 --- a/sys-kernel/linux-image-redcore/linux-image-redcore-4.18.5.ebuild +++ b/sys-kernel/linux-image-redcore/linux-image-redcore-5.1.15.ebuild @@ -7,18 +7,20 @@ inherit eutils EXTRAVERSION="redcore" KV_FULL="${PV}-${EXTRAVERSION}" +KV_MAJOR="5.1" DESCRIPTION="Official Redcore Linux Kernel Image" HOMEPAGE="https://redcorelinux.org" -SRC_URI="https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-${PV}.tar.xz" +SRC_URI="https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-${PV}.tar.xz" KEYWORDS="amd64" LICENSE="GPL-2" -SLOT="${PV}" +SLOT="${PVR}" IUSE="+cryptsetup +dmraid +dracut +dkms +mdadm" RESTRICT="binchecks strip mirror" DEPEND=" + app-arch/lz4 app-arch/xz-utils sys-devel/autoconf sys-devel/bc @@ -26,22 +28,37 @@ DEPEND=" cryptsetup? ( sys-fs/cryptsetup ) dmraid? ( sys-fs/dmraid ) dracut? ( >=sys-kernel/dracut-0.44-r8 ) - dkms? ( sys-kernel/dkms ~sys-kernel/linux-sources-redcore-${PV} ) + dkms? ( sys-kernel/dkms sys-kernel/linux-sources-redcore-lts:${SLOT} ) mdadm? ( sys-fs/mdadm ) >=sys-kernel/linux-firmware-20180314" RDEPEND="${DEPEND}" -PATCHES=( "${FILESDIR}"/0001-Revert-x86-ACPI-cstate-Allow-ACPI-C1-FFH-MWAIT-use-o.patch - "${FILESDIR}"/ata-fix-NCQ-LOG-strings-and-move-to-debug.patch - "${FILESDIR}"/drop_ancient-and-wrong-msg.patch - "${FILESDIR}"/enable_alx_wol.patch - "${FILESDIR}"/mute-pps_state_mismatch.patch - "${FILESDIR}"/nouveau-pascal-backlight.patch - "${FILESDIR}"/radeon_dp_aux_transfer_native-no-ratelimited_debug.patch - "${FILESDIR}"/revert-patches-causing-instant-reboot.patch - "${FILESDIR}"/workaround-BIOS-bugs-on-CCP-SVE-ryzen-TR.patch - "${FILESDIR}"/linux-hardened.patch - "${FILESDIR}"/uksm-for-linux-hardened.patch ) +PATCHES=( + "${FILESDIR}"/"${KV_MAJOR}"-acpi-use-kern_warning_even_when_error.patch + "${FILESDIR}"/"${KV_MAJOR}"-ata-fix-NCQ-LOG-strings-and-move-to-debug.patch + "${FILESDIR}"/"${KV_MAJOR}"-drop_ancient-and-wrong-msg.patch + "${FILESDIR}"/"${KV_MAJOR}"-enable_alx_wol.patch + "${FILESDIR}"/"${KV_MAJOR}"-mute-pps_state_mismatch.patch + "${FILESDIR}"/"${KV_MAJOR}"-fix-acpi_dbg_level.patch + "${FILESDIR}"/"${KV_MAJOR}"-radeon_dp_aux_transfer_native-no-ratelimited_debug.patch + "${FILESDIR}"/"${KV_MAJOR}"-Unknow-SSD-HFM128GDHTNG-8310B-QUIRK_NO_APST.patch + "${FILESDIR}"/"${KV_MAJOR}"-linux-hardened.patch + "${FILESDIR}"/"${KV_MAJOR}"-uksm-linux-hardened.patch + "${FILESDIR}"/"${KV_MAJOR}"-0001-MultiQueue-Skiplist-Scheduler-version-0.192-linux-hardened.patch + "${FILESDIR}"/"${KV_MAJOR}"-0002-Fix-Werror-build-failure-in-tools.patch + "${FILESDIR}"/"${KV_MAJOR}"-0003-Make-preemptible-kernel-default.patch + "${FILESDIR}"/"${KV_MAJOR}"-0004-Expose-vmsplit-for-our-poor-32-bit-users.patch + "${FILESDIR}"/"${KV_MAJOR}"-0005-Create-highres-timeout-variants-of-schedule_timeout-.patch + "${FILESDIR}"/"${KV_MAJOR}"-0006-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch + "${FILESDIR}"/"${KV_MAJOR}"-0007-Convert-msleep-to-use-hrtimers-when-active.patch + "${FILESDIR}"/"${KV_MAJOR}"-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch + "${FILESDIR}"/"${KV_MAJOR}"-0009-Replace-all-calls-to-schedule_timeout_interruptible-.patch + "${FILESDIR}"/"${KV_MAJOR}"-0010-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch + "${FILESDIR}"/"${KV_MAJOR}"-0011-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch + "${FILESDIR}"/"${KV_MAJOR}"-0012-Make-threaded-IRQs-optionally-the-default-which-can-.patch + "${FILESDIR}"/"${KV_MAJOR}"-0013-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch + "${FILESDIR}"/"${KV_MAJOR}"-0014-Swap-sucks.patch +) S="${WORKDIR}"/linux-"${PV}" @@ -57,12 +74,12 @@ src_prepare() { default emake mrproper sed -ri "s|^(EXTRAVERSION =).*|\1 -${EXTRAVERSION}|" Makefile - cp "${FILESDIR}"/"${EXTRAVERSION}"-amd64.config .config + cp "${FILESDIR}"/"${KV_MAJOR}"-amd64.config .config + rm -rf $(find . -type f|grep -F \.orig) } src_compile() { - emake prepare modules_prepare - emake bzImage modules + emake prepare modules_prepare bzImage modules } src_install() { @@ -126,19 +143,23 @@ _kernel_modules_delete() { } pkg_postinst() { - if use dracut; then - _dracut_initrd_create + if [ $(stat -c %d:%i /) == $(stat -c %d:%i /proc/1/root/.) ]; then + if use dracut; then + _dracut_initrd_create + fi + _grub2_update_grubcfg fi - _grub2_update_grubcfg } pkg_postrm() { - if use dracut; then - _dracut_initrd_delete + if [ $(stat -c %d:%i /) == $(stat -c %d:%i /proc/1/root/.) ]; then + if use dracut; then + _dracut_initrd_delete + fi + _grub2_update_grubcfg fi if use dkms; then _dkms_modules_delete fi _kernel_modules_delete - _grub2_update_grubcfg } diff --git a/sys-kernel/linux-sources-redcore/Manifest b/sys-kernel/linux-sources-redcore/Manifest index 8028344a..7a19047c 100644 --- a/sys-kernel/linux-sources-redcore/Manifest +++ b/sys-kernel/linux-sources-redcore/Manifest @@ -1 +1 @@ -DIST linux-4.18.5.tar.xz 101796536 BLAKE2B db7c4a46aba53d38ccc5f28a7da4a21f3d53667751f61595123f89e47e956bf13bcd6df85df47e78d2794fe78a7ec6c9082e64cb3025d6c5c1743d9935dde375 SHA512 604b334ccd74b230faf21db8887e382c49d4877ec8ce8298c079001a12222a6c7be2542c8f37c025cb3d625905d30e4c8c37267f0285aea25bbbe5aa3457040c +DIST linux-5.1.15.tar.xz 106273964 BLAKE2B cd029f7f691b69847a0c58f9e4c3ed11eb31c57ccc72874fdd0e5abdff14b3c938543394a4376305d0720ca6df84c1c1446ece77ad6a3a5e4ff8c91af1643ec4 SHA512 c22988286f8eed176d54446222d5c9d15a7a1b3024dffdc4e4884a45c0d2d7ec24c9d52219a3f0b8fe69c8a92332cc37314301e3bd4f671f116376fd5ca45d61 diff --git a/sys-kernel/linux-sources-redcore/files/0001-Revert-x86-ACPI-cstate-Allow-ACPI-C1-FFH-MWAIT-use-o.patch b/sys-kernel/linux-sources-redcore/files/0001-Revert-x86-ACPI-cstate-Allow-ACPI-C1-FFH-MWAIT-use-o.patch deleted file mode 100644 index e8f3bfda..00000000 --- a/sys-kernel/linux-sources-redcore/files/0001-Revert-x86-ACPI-cstate-Allow-ACPI-C1-FFH-MWAIT-use-o.patch +++ /dev/null @@ -1,29 +0,0 @@ -From f912ead404ffa24db7f4aee527aff411db39262a Mon Sep 17 00:00:00 2001 -From: Gabriel Craciunescu <crazy@frugalware.org> -Date: Wed, 11 Apr 2018 17:17:06 +0200 -Subject: [PATCH] Revert "x86/ACPI/cstate: Allow ACPI C1 FFH MWAIT use on AMD systems" - -This reverts commit 5209654a46ee71137ad9b06da99d4ef2794475af. -Please see: https://community.amd.com/thread/224000 - ---- - arch/x86/kernel/acpi/cstate.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c -index dde437f5d14f..8233a630280f 100644 ---- a/arch/x86/kernel/acpi/cstate.c -+++ b/arch/x86/kernel/acpi/cstate.c -@@ -167,8 +167,7 @@ static int __init ffh_cstate_init(void) - { - struct cpuinfo_x86 *c = &boot_cpu_data; - -- if (c->x86_vendor != X86_VENDOR_INTEL && -- c->x86_vendor != X86_VENDOR_AMD) -+ if (c->x86_vendor != X86_VENDOR_INTEL) - return -1; - - cpu_cstate_entry = alloc_percpu(struct cstate_entry); --- -2.17.0 - diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0001-MultiQueue-Skiplist-Scheduler-version-0.192-linux-hardened.patch b/sys-kernel/linux-sources-redcore/files/5.1-0001-MultiQueue-Skiplist-Scheduler-version-0.192-linux-hardened.patch new file mode 100644 index 00000000..392477d4 --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0001-MultiQueue-Skiplist-Scheduler-version-0.192-linux-hardened.patch @@ -0,0 +1,10577 @@ +diff -Nur a/arch/alpha/Kconfig b/arch/alpha/Kconfig +--- a/arch/alpha/Kconfig 2019-06-25 04:34:56.000000000 +0100 ++++ b/arch/alpha/Kconfig 2019-07-07 09:17:41.251241479 +0100 +@@ -670,6 +670,8 @@ + default 1200 if HZ_1200 + default 1024 + ++source "kernel/Kconfig.MuQSS" ++ + config SRM_ENV + tristate "SRM environment through procfs" + depends on PROC_FS +diff -Nur a/arch/arm/Kconfig b/arch/arm/Kconfig +--- a/arch/arm/Kconfig 2019-06-25 04:34:56.000000000 +0100 ++++ b/arch/arm/Kconfig 2019-07-07 09:17:41.251241479 +0100 +@@ -1308,6 +1308,8 @@ + MultiThreading at a cost of slightly increased overhead in some + places. If unsure say N here. + ++source "kernel/Kconfig.MuQSS" ++ + config HAVE_ARM_SCU + bool + help +diff -Nur a/arch/arm64/Kconfig b/arch/arm64/Kconfig +--- a/arch/arm64/Kconfig 2019-07-07 09:08:19.122347611 +0100 ++++ b/arch/arm64/Kconfig 2019-07-07 09:17:41.251241479 +0100 +@@ -825,6 +825,8 @@ + MultiThreading at a cost of slightly increased overhead in some + places. If unsure say N here. + ++source "kernel/Kconfig.MuQSS" ++ + config NR_CPUS + int "Maximum number of CPUs (2-4096)" + range 2 4096 +diff -Nur a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig +--- a/arch/powerpc/Kconfig 2019-06-25 04:34:56.000000000 +0100 ++++ b/arch/powerpc/Kconfig 2019-07-07 09:17:41.251241479 +0100 +@@ -820,6 +820,8 @@ + when dealing with POWER5 cpus at a cost of slightly increased + overhead in some places. If unsure say N here. + ++source "kernel/Kconfig.MuQSS" ++ + config PPC_DENORMALISATION + bool "PowerPC denormalisation exception handling" + depends on PPC_BOOK3S_64 +diff -Nur a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c +--- a/arch/powerpc/platforms/cell/spufs/sched.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/arch/powerpc/platforms/cell/spufs/sched.c 2019-07-07 09:17:41.251241479 +0100 +@@ -65,11 +65,6 @@ + static struct timer_list spuloadavg_timer; + + /* +- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). +- */ +-#define NORMAL_PRIO 120 +- +-/* + * Frequency of the spu scheduler tick. By default we do one SPU scheduler + * tick for every 10 CPU scheduler ticks. + */ +diff -Nur a/arch/x86/Kconfig b/arch/x86/Kconfig +--- a/arch/x86/Kconfig 2019-07-07 09:08:19.122347611 +0100 ++++ b/arch/x86/Kconfig 2019-07-07 09:17:41.251241479 +0100 +@@ -1017,6 +1017,22 @@ + config SCHED_SMT + def_bool y if SMP + ++config SMT_NICE ++ bool "SMT (Hyperthreading) aware nice priority and policy support" ++ depends on SCHED_MUQSS && SCHED_SMT ++ default y ++ ---help--- ++ Enabling Hyperthreading on Intel CPUs decreases the effectiveness ++ of the use of 'nice' levels and different scheduling policies ++ (e.g. realtime) due to sharing of CPU power between hyperthreads. ++ SMT nice support makes each logical CPU aware of what is running on ++ its hyperthread siblings, maintaining appropriate distribution of ++ CPU according to nice levels and scheduling policies at the expense ++ of slightly increased overhead. ++ ++ If unsure say Y here. ++ ++ + config SCHED_MC + def_bool y + prompt "Multi-core scheduler support" +@@ -1047,6 +1063,8 @@ + + If unsure say Y here. + ++source "kernel/Kconfig.MuQSS" ++ + config UP_LATE_INIT + def_bool y + depends on !SMP && X86_LOCAL_APIC +diff -Nur a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +--- a/Documentation/admin-guide/kernel-parameters.txt 2019-07-07 09:08:19.122347611 +0100 ++++ b/Documentation/admin-guide/kernel-parameters.txt 2019-07-07 09:17:41.241241144 +0100 +@@ -4149,6 +4149,14 @@ + Memory area to be used by remote processor image, + managed by CMA. + ++ rqshare= [X86] Select the MuQSS scheduler runqueue sharing type. ++ Format: <string> ++ smt -- Share SMT (hyperthread) sibling runqueues ++ mc -- Share MC (multicore) sibling runqueues ++ smp -- Share SMP runqueues ++ none -- So not share any runqueues ++ Default value is mc ++ + rw [KNL] Mount root device read-write on boot + + S [KNL] Run init in single mode +diff -Nur a/Documentation/scheduler/sched-BFS.txt b/Documentation/scheduler/sched-BFS.txt +--- a/Documentation/scheduler/sched-BFS.txt 1970-01-01 01:00:00.000000000 +0100 ++++ b/Documentation/scheduler/sched-BFS.txt 2019-07-07 09:17:41.241241144 +0100 +@@ -0,0 +1,351 @@ ++BFS - The Brain Fuck Scheduler by Con Kolivas. ++ ++Goals. ++ ++The goal of the Brain Fuck Scheduler, referred to as BFS from here on, is to ++completely do away with the complex designs of the past for the cpu process ++scheduler and instead implement one that is very simple in basic design. ++The main focus of BFS is to achieve excellent desktop interactivity and ++responsiveness without heuristics and tuning knobs that are difficult to ++understand, impossible to model and predict the effect of, and when tuned to ++one workload cause massive detriment to another. ++ ++ ++Design summary. ++ ++BFS is best described as a single runqueue, O(n) lookup, earliest effective ++virtual deadline first design, loosely based on EEVDF (earliest eligible virtual ++deadline first) and my previous Staircase Deadline scheduler. Each component ++shall be described in order to understand the significance of, and reasoning for ++it. The codebase when the first stable version was released was approximately ++9000 lines less code than the existing mainline linux kernel scheduler (in ++2.6.31). This does not even take into account the removal of documentation and ++the cgroups code that is not used. ++ ++Design reasoning. ++ ++The single runqueue refers to the queued but not running processes for the ++entire system, regardless of the number of CPUs. The reason for going back to ++a single runqueue design is that once multiple runqueues are introduced, ++per-CPU or otherwise, there will be complex interactions as each runqueue will ++be responsible for the scheduling latency and fairness of the tasks only on its ++own runqueue, and to achieve fairness and low latency across multiple CPUs, any ++advantage in throughput of having CPU local tasks causes other disadvantages. ++This is due to requiring a very complex balancing system to at best achieve some ++semblance of fairness across CPUs and can only maintain relatively low latency ++for tasks bound to the same CPUs, not across them. To increase said fairness ++and latency across CPUs, the advantage of local runqueue locking, which makes ++for better scalability, is lost due to having to grab multiple locks. ++ ++A significant feature of BFS is that all accounting is done purely based on CPU ++used and nowhere is sleep time used in any way to determine entitlement or ++interactivity. Interactivity "estimators" that use some kind of sleep/run ++algorithm are doomed to fail to detect all interactive tasks, and to falsely tag ++tasks that aren't interactive as being so. The reason for this is that it is ++close to impossible to determine that when a task is sleeping, whether it is ++doing it voluntarily, as in a userspace application waiting for input in the ++form of a mouse click or otherwise, or involuntarily, because it is waiting for ++another thread, process, I/O, kernel activity or whatever. Thus, such an ++estimator will introduce corner cases, and more heuristics will be required to ++cope with those corner cases, introducing more corner cases and failed ++interactivity detection and so on. Interactivity in BFS is built into the design ++by virtue of the fact that tasks that are waking up have not used up their quota ++of CPU time, and have earlier effective deadlines, thereby making it very likely ++they will preempt any CPU bound task of equivalent nice level. See below for ++more information on the virtual deadline mechanism. Even if they do not preempt ++a running task, because the rr interval is guaranteed to have a bound upper ++limit on how long a task will wait for, it will be scheduled within a timeframe ++that will not cause visible interface jitter. ++ ++ ++Design details. ++ ++Task insertion. ++ ++BFS inserts tasks into each relevant queue as an O(1) insertion into a double ++linked list. On insertion, *every* running queue is checked to see if the newly ++queued task can run on any idle queue, or preempt the lowest running task on the ++system. This is how the cross-CPU scheduling of BFS achieves significantly lower ++latency per extra CPU the system has. In this case the lookup is, in the worst ++case scenario, O(n) where n is the number of CPUs on the system. ++ ++Data protection. ++ ++BFS has one single lock protecting the process local data of every task in the ++global queue. Thus every insertion, removal and modification of task data in the ++global runqueue needs to grab the global lock. However, once a task is taken by ++a CPU, the CPU has its own local data copy of the running process' accounting ++information which only that CPU accesses and modifies (such as during a ++timer tick) thus allowing the accounting data to be updated lockless. Once a ++CPU has taken a task to run, it removes it from the global queue. Thus the ++global queue only ever has, at most, ++ ++ (number of tasks requesting cpu time) - (number of logical CPUs) + 1 ++ ++tasks in the global queue. This value is relevant for the time taken to look up ++tasks during scheduling. This will increase if many tasks with CPU affinity set ++in their policy to limit which CPUs they're allowed to run on if they outnumber ++the number of CPUs. The +1 is because when rescheduling a task, the CPU's ++currently running task is put back on the queue. Lookup will be described after ++the virtual deadline mechanism is explained. ++ ++Virtual deadline. ++ ++The key to achieving low latency, scheduling fairness, and "nice level" ++distribution in BFS is entirely in the virtual deadline mechanism. The one ++tunable in BFS is the rr_interval, or "round robin interval". This is the ++maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy) ++tasks of the same nice level will be running for, or looking at it the other ++way around, the longest duration two tasks of the same nice level will be ++delayed for. When a task requests cpu time, it is given a quota (time_slice) ++equal to the rr_interval and a virtual deadline. The virtual deadline is ++offset from the current time in jiffies by this equation: ++ ++ jiffies + (prio_ratio * rr_interval) ++ ++The prio_ratio is determined as a ratio compared to the baseline of nice -20 ++and increases by 10% per nice level. The deadline is a virtual one only in that ++no guarantee is placed that a task will actually be scheduled by this time, but ++it is used to compare which task should go next. There are three components to ++how a task is next chosen. First is time_slice expiration. If a task runs out ++of its time_slice, it is descheduled, the time_slice is refilled, and the ++deadline reset to that formula above. Second is sleep, where a task no longer ++is requesting CPU for whatever reason. The time_slice and deadline are _not_ ++adjusted in this case and are just carried over for when the task is next ++scheduled. Third is preemption, and that is when a newly waking task is deemed ++higher priority than a currently running task on any cpu by virtue of the fact ++that it has an earlier virtual deadline than the currently running task. The ++earlier deadline is the key to which task is next chosen for the first and ++second cases. Once a task is descheduled, it is put back on the queue, and an ++O(n) lookup of all queued-but-not-running tasks is done to determine which has ++the earliest deadline and that task is chosen to receive CPU next. ++ ++The CPU proportion of different nice tasks works out to be approximately the ++ ++ (prio_ratio difference)^2 ++ ++The reason it is squared is that a task's deadline does not change while it is ++running unless it runs out of time_slice. Thus, even if the time actually ++passes the deadline of another task that is queued, it will not get CPU time ++unless the current running task deschedules, and the time "base" (jiffies) is ++constantly moving. ++ ++Task lookup. ++ ++BFS has 103 priority queues. 100 of these are dedicated to the static priority ++of realtime tasks, and the remaining 3 are, in order of best to worst priority, ++SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority ++scheduling). When a task of these priorities is queued, a bitmap of running ++priorities is set showing which of these priorities has tasks waiting for CPU ++time. When a CPU is made to reschedule, the lookup for the next task to get ++CPU time is performed in the following way: ++ ++First the bitmap is checked to see what static priority tasks are queued. If ++any realtime priorities are found, the corresponding queue is checked and the ++first task listed there is taken (provided CPU affinity is suitable) and lookup ++is complete. If the priority corresponds to a SCHED_ISO task, they are also ++taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds ++to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this ++stage, every task in the runlist that corresponds to that priority is checked ++to see which has the earliest set deadline, and (provided it has suitable CPU ++affinity) it is taken off the runqueue and given the CPU. If a task has an ++expired deadline, it is taken and the rest of the lookup aborted (as they are ++chosen in FIFO order). ++ ++Thus, the lookup is O(n) in the worst case only, where n is as described ++earlier, as tasks may be chosen before the whole task list is looked over. ++ ++ ++Scalability. ++ ++The major limitations of BFS will be that of scalability, as the separate ++runqueue designs will have less lock contention as the number of CPUs rises. ++However they do not scale linearly even with separate runqueues as multiple ++runqueues will need to be locked concurrently on such designs to be able to ++achieve fair CPU balancing, to try and achieve some sort of nice-level fairness ++across CPUs, and to achieve low enough latency for tasks on a busy CPU when ++other CPUs would be more suited. BFS has the advantage that it requires no ++balancing algorithm whatsoever, as balancing occurs by proxy simply because ++all CPUs draw off the global runqueue, in priority and deadline order. Despite ++the fact that scalability is _not_ the prime concern of BFS, it both shows very ++good scalability to smaller numbers of CPUs and is likely a more scalable design ++at these numbers of CPUs. ++ ++It also has some very low overhead scalability features built into the design ++when it has been deemed their overhead is so marginal that they're worth adding. ++The first is the local copy of the running process' data to the CPU it's running ++on to allow that data to be updated lockless where possible. Then there is ++deference paid to the last CPU a task was running on, by trying that CPU first ++when looking for an idle CPU to use the next time it's scheduled. Finally there ++is the notion of cache locality beyond the last running CPU. The sched_domains ++information is used to determine the relative virtual "cache distance" that ++other CPUs have from the last CPU a task was running on. CPUs with shared ++caches, such as SMT siblings, or multicore CPUs with shared caches, are treated ++as cache local. CPUs without shared caches are treated as not cache local, and ++CPUs on different NUMA nodes are treated as very distant. This "relative cache ++distance" is used by modifying the virtual deadline value when doing lookups. ++Effectively, the deadline is unaltered between "cache local" CPUs, doubled for ++"cache distant" CPUs, and quadrupled for "very distant" CPUs. The reasoning ++behind the doubling of deadlines is as follows. The real cost of migrating a ++task from one CPU to another is entirely dependant on the cache footprint of ++the task, how cache intensive the task is, how long it's been running on that ++CPU to take up the bulk of its cache, how big the CPU cache is, how fast and ++how layered the CPU cache is, how fast a context switch is... and so on. In ++other words, it's close to random in the real world where we do more than just ++one sole workload. The only thing we can be sure of is that it's not free. So ++BFS uses the principle that an idle CPU is a wasted CPU and utilising idle CPUs ++is more important than cache locality, and cache locality only plays a part ++after that. Doubling the effective deadline is based on the premise that the ++"cache local" CPUs will tend to work on the same tasks up to double the number ++of cache local CPUs, and once the workload is beyond that amount, it is likely ++that none of the tasks are cache warm anywhere anyway. The quadrupling for NUMA ++is a value I pulled out of my arse. ++ ++When choosing an idle CPU for a waking task, the cache locality is determined ++according to where the task last ran and then idle CPUs are ranked from best ++to worst to choose the most suitable idle CPU based on cache locality, NUMA ++node locality and hyperthread sibling business. They are chosen in the ++following preference (if idle): ++ ++* Same core, idle or busy cache, idle threads ++* Other core, same cache, idle or busy cache, idle threads. ++* Same node, other CPU, idle cache, idle threads. ++* Same node, other CPU, busy cache, idle threads. ++* Same core, busy threads. ++* Other core, same cache, busy threads. ++* Same node, other CPU, busy threads. ++* Other node, other CPU, idle cache, idle threads. ++* Other node, other CPU, busy cache, idle threads. ++* Other node, other CPU, busy threads. ++ ++This shows the SMT or "hyperthread" awareness in the design as well which will ++choose a real idle core first before a logical SMT sibling which already has ++tasks on the physical CPU. ++ ++Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark. ++However this benchmarking was performed on an earlier design that was far less ++scalable than the current one so it's hard to know how scalable it is in terms ++of both CPUs (due to the global runqueue) and heavily loaded machines (due to ++O(n) lookup) at this stage. Note that in terms of scalability, the number of ++_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x) ++quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark ++results are very promising indeed, without needing to tweak any knobs, features ++or options. Benchmark contributions are most welcome. ++ ++ ++Features ++ ++As the initial prime target audience for BFS was the average desktop user, it ++was designed to not need tweaking, tuning or have features set to obtain benefit ++from it. Thus the number of knobs and features has been kept to an absolute ++minimum and should not require extra user input for the vast majority of cases. ++There are precisely 2 tunables, and 2 extra scheduling policies. The rr_interval ++and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition ++to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is ++support for CGROUPS. The average user should neither need to know what these ++are, nor should they need to be using them to have good desktop behaviour. ++ ++rr_interval ++ ++There is only one "scheduler" tunable, the round robin interval. This can be ++accessed in ++ ++ /proc/sys/kernel/rr_interval ++ ++The value is in milliseconds, and the default value is set to 6 on a ++uniprocessor machine, and automatically set to a progressively higher value on ++multiprocessor machines. The reasoning behind increasing the value on more CPUs ++is that the effective latency is decreased by virtue of there being more CPUs on ++BFS (for reasons explained above), and increasing the value allows for less ++cache contention and more throughput. Valid values are from 1 to 1000 ++Decreasing the value will decrease latencies at the cost of decreasing ++throughput, while increasing it will improve throughput, but at the cost of ++worsening latencies. The accuracy of the rr interval is limited by HZ resolution ++of the kernel configuration. Thus, the worst case latencies are usually slightly ++higher than this actual value. The default value of 6 is not an arbitrary one. ++It is based on the fact that humans can detect jitter at approximately 7ms, so ++aiming for much lower latencies is pointless under most circumstances. It is ++worth noting this fact when comparing the latency performance of BFS to other ++schedulers. Worst case latencies being higher than 7ms are far worse than ++average latencies not being in the microsecond range. ++ ++Isochronous scheduling. ++ ++Isochronous scheduling is a unique scheduling policy designed to provide ++near-real-time performance to unprivileged (ie non-root) users without the ++ability to starve the machine indefinitely. Isochronous tasks (which means ++"same time") are set using, for example, the schedtool application like so: ++ ++ schedtool -I -e amarok ++ ++This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works ++is that it has a priority level between true realtime tasks and SCHED_NORMAL ++which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie, ++if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval ++rate). However if ISO tasks run for more than a tunable finite amount of time, ++they are then demoted back to SCHED_NORMAL scheduling. This finite amount of ++time is the percentage of _total CPU_ available across the machine, configurable ++as a percentage in the following "resource handling" tunable (as opposed to a ++scheduler tunable): ++ ++ /proc/sys/kernel/iso_cpu ++ ++and is set to 70% by default. It is calculated over a rolling 5 second average ++Because it is the total CPU available, it means that on a multi CPU machine, it ++is possible to have an ISO task running as realtime scheduling indefinitely on ++just one CPU, as the other CPUs will be available. Setting this to 100 is the ++equivalent of giving all users SCHED_RR access and setting it to 0 removes the ++ability to run any pseudo-realtime tasks. ++ ++A feature of BFS is that it detects when an application tries to obtain a ++realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the ++appropriate privileges to use those policies. When it detects this, it will ++give the task SCHED_ISO policy instead. Thus it is transparent to the user. ++Because some applications constantly set their policy as well as their nice ++level, there is potential for them to undo the override specified by the user ++on the command line of setting the policy to SCHED_ISO. To counter this, once ++a task has been set to SCHED_ISO policy, it needs superuser privileges to set ++it back to SCHED_NORMAL. This will ensure the task remains ISO and all child ++processes and threads will also inherit the ISO policy. ++ ++Idleprio scheduling. ++ ++Idleprio scheduling is a scheduling policy designed to give out CPU to a task ++_only_ when the CPU would be otherwise idle. The idea behind this is to allow ++ultra low priority tasks to be run in the background that have virtually no ++effect on the foreground tasks. This is ideally suited to distributed computing ++clients (like setiathome, folding, mprime etc) but can also be used to start ++a video encode or so on without any slowdown of other tasks. To avoid this ++policy from grabbing shared resources and holding them indefinitely, if it ++detects a state where the task is waiting on I/O, the machine is about to ++suspend to ram and so on, it will transiently schedule them as SCHED_NORMAL. As ++per the Isochronous task management, once a task has been scheduled as IDLEPRIO, ++it cannot be put back to SCHED_NORMAL without superuser privileges. Tasks can ++be set to start as SCHED_IDLEPRIO with the schedtool command like so: ++ ++ schedtool -D -e ./mprime ++ ++Subtick accounting. ++ ++It is surprisingly difficult to get accurate CPU accounting, and in many cases, ++the accounting is done by simply determining what is happening at the precise ++moment a timer tick fires off. This becomes increasingly inaccurate as the ++timer tick frequency (HZ) is lowered. It is possible to create an application ++which uses almost 100% CPU, yet by being descheduled at the right time, records ++zero CPU usage. While the main problem with this is that there are possible ++security implications, it is also difficult to determine how much CPU a task ++really does use. BFS tries to use the sub-tick accounting from the TSC clock, ++where possible, to determine real CPU usage. This is not entirely reliable, but ++is far more likely to produce accurate CPU usage data than the existing designs ++and will not show tasks as consuming no CPU usage when they actually are. Thus, ++the amount of CPU reported as being used by BFS will more accurately represent ++how much CPU the task itself is using (as is shown for example by the 'time' ++application), so the reported values may be quite different to other schedulers. ++Values reported as the 'load' are more prone to problems with this design, but ++per process values are closer to real usage. When comparing throughput of BFS ++to other designs, it is important to compare the actual completed work in terms ++of total wall clock time taken and total work done, rather than the reported ++"cpu usage". ++ ++ ++Con Kolivas <kernel@kolivas.org> Fri Aug 27 2010 +diff -Nur a/Documentation/scheduler/sched-MuQSS.txt b/Documentation/scheduler/sched-MuQSS.txt +--- a/Documentation/scheduler/sched-MuQSS.txt 1970-01-01 01:00:00.000000000 +0100 ++++ b/Documentation/scheduler/sched-MuQSS.txt 2019-07-07 09:17:41.241241144 +0100 +@@ -0,0 +1,373 @@ ++MuQSS - The Multiple Queue Skiplist Scheduler by Con Kolivas. ++ ++MuQSS is a per-cpu runqueue variant of the original BFS scheduler with ++one 8 level skiplist per runqueue, and fine grained locking for much more ++scalability. ++ ++ ++Goals. ++ ++The goal of the Multiple Queue Skiplist Scheduler, referred to as MuQSS from ++here on (pronounced mux) is to completely do away with the complex designs of ++the past for the cpu process scheduler and instead implement one that is very ++simple in basic design. The main focus of MuQSS is to achieve excellent desktop ++interactivity and responsiveness without heuristics and tuning knobs that are ++difficult to understand, impossible to model and predict the effect of, and when ++tuned to one workload cause massive detriment to another, while still being ++scalable to many CPUs and processes. ++ ++ ++Design summary. ++ ++MuQSS is best described as per-cpu multiple runqueue, O(log n) insertion, O(1) ++lookup, earliest effective virtual deadline first tickless design, loosely based ++on EEVDF (earliest eligible virtual deadline first) and my previous Staircase ++Deadline scheduler, and evolved from the single runqueue O(n) BFS scheduler. ++Each component shall be described in order to understand the significance of, ++and reasoning for it. ++ ++ ++Design reasoning. ++ ++In BFS, the use of a single runqueue across all CPUs meant that each CPU would ++need to scan the entire runqueue looking for the process with the earliest ++deadline and schedule that next, regardless of which CPU it originally came ++from. This made BFS deterministic with respect to latency and provided ++guaranteed latencies dependent on number of processes and CPUs. The single ++runqueue, however, meant that all CPUs would compete for the single lock ++protecting it, which would lead to increasing lock contention as the number of ++CPUs rose and appeared to limit scalability of common workloads beyond 16 ++logical CPUs. Additionally, the O(n) lookup of the runqueue list obviously ++increased overhead proportionate to the number of queued proecesses and led to ++cache thrashing while iterating over the linked list. ++ ++MuQSS is an evolution of BFS, designed to maintain the same scheduling ++decision mechanism and be virtually deterministic without relying on the ++constrained design of the single runqueue by splitting out the single runqueue ++to be per-CPU and use skiplists instead of linked lists. ++ ++The original reason for going back to a single runqueue design for BFS was that ++once multiple runqueues are introduced, per-CPU or otherwise, there will be ++complex interactions as each runqueue will be responsible for the scheduling ++latency and fairness of the tasks only on its own runqueue, and to achieve ++fairness and low latency across multiple CPUs, any advantage in throughput of ++having CPU local tasks causes other disadvantages. This is due to requiring a ++very complex balancing system to at best achieve some semblance of fairness ++across CPUs and can only maintain relatively low latency for tasks bound to the ++same CPUs, not across them. To increase said fairness and latency across CPUs, ++the advantage of local runqueue locking, which makes for better scalability, is ++lost due to having to grab multiple locks. ++ ++MuQSS works around the problems inherent in multiple runqueue designs by ++making its skip lists priority ordered and through novel use of lockless ++examination of each other runqueue it can decide if it should take the earliest ++deadline task from another runqueue for latency reasons, or for CPU balancing ++reasons. It still does not have a balancing system, choosing to allow the ++next task scheduling decision and task wakeup CPU choice to allow balancing to ++happen by virtue of its choices. ++ ++As a further evolution of the design, MuQSS normally configures sharing of ++runqueues in a logical fashion for when CPU resources are shared for improved ++latency and throughput. By default it shares runqueues and locks between ++multicore siblings. Optionally it can be configured to run with sharing of ++SMT siblings only, all SMP packages or no sharing at all. Additionally it can ++be selected at boot time. ++ ++ ++Design details. ++ ++Custom skip list implementation: ++ ++To avoid the overhead of building up and tearing down skip list structures, ++the variant used by MuQSS has a number of optimisations making it specific for ++its use case in the scheduler. It uses static arrays of 8 'levels' instead of ++building up and tearing down structures dynamically. This makes each runqueue ++only scale O(log N) up to 64k tasks. However as there is one runqueue per CPU ++it means that it scales O(log N) up to 64k x number of logical CPUs which is ++far beyond the realistic task limits each CPU could handle. By being 8 levels ++it also makes the array exactly one cacheline in size. Additionally, each ++skip list node is bidirectional making insertion and removal amortised O(1), ++being O(k) where k is 1-8. Uniquely, we are only ever interested in the very ++first entry in each list at all times with MuQSS, so there is never a need to ++do a search and thus look up is always O(1). In interactive mode, the queues ++will be searched beyond their first entry if the first task is not suitable ++for affinity or SMT nice reasons. ++ ++Task insertion: ++ ++MuQSS inserts tasks into a per CPU runqueue as an O(log N) insertion into ++a custom skip list as described above (based on the original design by William ++Pugh). Insertion is ordered in such a way that there is never a need to do a ++search by ordering tasks according to static priority primarily, and then ++virtual deadline at the time of insertion. ++ ++Niffies: ++ ++Niffies are a monotonic forward moving timer not unlike the "jiffies" but are ++of nanosecond resolution. Niffies are calculated per-runqueue from the high ++resolution TSC timers, and in order to maintain fairness are synchronised ++between CPUs whenever both runqueues are locked concurrently. ++ ++Virtual deadline: ++ ++The key to achieving low latency, scheduling fairness, and "nice level" ++distribution in MuQSS is entirely in the virtual deadline mechanism. The one ++tunable in MuQSS is the rr_interval, or "round robin interval". This is the ++maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy) ++tasks of the same nice level will be running for, or looking at it the other ++way around, the longest duration two tasks of the same nice level will be ++delayed for. When a task requests cpu time, it is given a quota (time_slice) ++equal to the rr_interval and a virtual deadline. The virtual deadline is ++offset from the current time in niffies by this equation: ++ ++ niffies + (prio_ratio * rr_interval) ++ ++The prio_ratio is determined as a ratio compared to the baseline of nice -20 ++and increases by 10% per nice level. The deadline is a virtual one only in that ++no guarantee is placed that a task will actually be scheduled by this time, but ++it is used to compare which task should go next. There are three components to ++how a task is next chosen. First is time_slice expiration. If a task runs out ++of its time_slice, it is descheduled, the time_slice is refilled, and the ++deadline reset to that formula above. Second is sleep, where a task no longer ++is requesting CPU for whatever reason. The time_slice and deadline are _not_ ++adjusted in this case and are just carried over for when the task is next ++scheduled. Third is preemption, and that is when a newly waking task is deemed ++higher priority than a currently running task on any cpu by virtue of the fact ++that it has an earlier virtual deadline than the currently running task. The ++earlier deadline is the key to which task is next chosen for the first and ++second cases. ++ ++The CPU proportion of different nice tasks works out to be approximately the ++ ++ (prio_ratio difference)^2 ++ ++The reason it is squared is that a task's deadline does not change while it is ++running unless it runs out of time_slice. Thus, even if the time actually ++passes the deadline of another task that is queued, it will not get CPU time ++unless the current running task deschedules, and the time "base" (niffies) is ++constantly moving. ++ ++Task lookup: ++ ++As tasks are already pre-ordered according to anticipated scheduling order in ++the skip lists, lookup for the next suitable task per-runqueue is always a ++matter of simply selecting the first task in the 0th level skip list entry. ++In order to maintain optimal latency and fairness across CPUs, MuQSS does a ++novel examination of every other runqueue in cache locality order, choosing the ++best task across all runqueues. This provides near-determinism of how long any ++task across the entire system may wait before receiving CPU time. The other ++runqueues are first examine lockless and then trylocked to minimise the ++potential lock contention if they are likely to have a suitable better task. ++Each other runqueue lock is only held for as long as it takes to examine the ++entry for suitability. In "interactive" mode, the default setting, MuQSS will ++look for the best deadline task across all CPUs, while in !interactive mode, ++it will only select a better deadline task from another CPU if it is more ++heavily laden than the current one. ++ ++Lookup is therefore O(k) where k is number of CPUs. ++ ++ ++Latency. ++ ++Through the use of virtual deadlines to govern the scheduling order of normal ++tasks, queue-to-activation latency per runqueue is guaranteed to be bound by ++the rr_interval tunable which is set to 6ms by default. This means that the ++longest a CPU bound task will wait for more CPU is proportional to the number ++of running tasks and in the common case of 0-2 running tasks per CPU, will be ++under the 7ms threshold for human perception of jitter. Additionally, as newly ++woken tasks will have an early deadline from their previous runtime, the very ++tasks that are usually latency sensitive will have the shortest interval for ++activation, usually preempting any existing CPU bound tasks. ++ ++Tickless expiry: ++ ++A feature of MuQSS is that it is not tied to the resolution of the chosen tick ++rate in Hz, instead depending entirely on the high resolution timers where ++possible for sub-millisecond accuracy on timeouts regarless of the underlying ++tick rate. This allows MuQSS to be run with the low overhead of low Hz rates ++such as 100 by default, benefiting from the improved throughput and lower ++power usage it provides. Another advantage of this approach is that in ++combination with the Full No HZ option, which disables ticks on running task ++CPUs instead of just idle CPUs, the tick can be disabled at all times ++regardless of how many tasks are running instead of being limited to just one ++running task. Note that this option is NOT recommended for regular desktop ++users. ++ ++ ++Scalability and balancing. ++ ++Unlike traditional approaches where balancing is a combination of CPU selection ++at task wakeup and intermittent balancing based on a vast array of rules set ++according to architecture, busyness calculations and special case management, ++MuQSS indirectly balances on the fly at task wakeup and next task selection. ++During initialisation, MuQSS creates a cache coherency ordered list of CPUs for ++each logical CPU and uses this to aid task/CPU selection when CPUs are busy. ++Additionally it selects any idle CPUs, if they are available, at any time over ++busy CPUs according to the following preference: ++ ++ * Same thread, idle or busy cache, idle or busy threads ++ * Other core, same cache, idle or busy cache, idle threads. ++ * Same node, other CPU, idle cache, idle threads. ++ * Same node, other CPU, busy cache, idle threads. ++ * Other core, same cache, busy threads. ++ * Same node, other CPU, busy threads. ++ * Other node, other CPU, idle cache, idle threads. ++ * Other node, other CPU, busy cache, idle threads. ++ * Other node, other CPU, busy threads. ++ ++Mux is therefore SMT, MC and Numa aware without the need for extra ++intermittent balancing to maintain CPUs busy and make the most of cache ++coherency. ++ ++ ++Features ++ ++As the initial prime target audience for MuQSS was the average desktop user, it ++was designed to not need tweaking, tuning or have features set to obtain benefit ++from it. Thus the number of knobs and features has been kept to an absolute ++minimum and should not require extra user input for the vast majority of cases. ++There are 3 optional tunables, and 2 extra scheduling policies. The rr_interval, ++interactive, and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO ++policies. In addition to this, MuQSS also uses sub-tick accounting. What MuQSS ++does _not_ now feature is support for CGROUPS. The average user should neither ++need to know what these are, nor should they need to be using them to have good ++desktop behaviour. However since some applications refuse to work without ++cgroups, one can enable them with MuQSS as a stub and the filesystem will be ++created which will allow the applications to work. ++ ++rr_interval: ++ ++ /proc/sys/kernel/rr_interval ++ ++The value is in milliseconds, and the default value is set to 6. Valid values ++are from 1 to 1000 Decreasing the value will decrease latencies at the cost of ++decreasing throughput, while increasing it will improve throughput, but at the ++cost of worsening latencies. It is based on the fact that humans can detect ++jitter at approximately 7ms, so aiming for much lower latencies is pointless ++under most circumstances. It is worth noting this fact when comparing the ++latency performance of MuQSS to other schedulers. Worst case latencies being ++higher than 7ms are far worse than average latencies not being in the ++microsecond range. ++ ++interactive: ++ ++ /proc/sys/kernel/interactive ++ ++The value is a simple boolean of 1 for on and 0 for off and is set to on by ++default. Disabling this will disable the near-determinism of MuQSS when ++selecting the next task by not examining all CPUs for the earliest deadline ++task, or which CPU to wake to, instead prioritising CPU balancing for improved ++throughput. Latency will still be bound by rr_interval, but on a per-CPU basis ++instead of across the whole system. ++ ++Runqueue sharing. ++ ++By default MuQSS chooses to share runqueue resources (specifically the skip ++list and locking) between multicore siblings. It is configurable at build time ++to select between None, SMT, MC and SMP, corresponding to no sharing, sharing ++only between simultaneous mulithreading siblings, multicore siblings, or ++symmetric multiprocessing physical packages. Additionally it can be se at ++bootime with the use of the rqshare parameter. The reason for configurability ++is that some architectures have CPUs with many multicore siblings (>= 16) ++where it may be detrimental to throughput to share runqueues and another ++sharing option may be desirable. Additionally, more sharing than usual can ++improve latency on a system-wide level at the expense of throughput if desired. ++ ++The options are: ++none, smt, mc, smp ++ ++eg: ++ rqshare=mc ++ ++Isochronous scheduling: ++ ++Isochronous scheduling is a unique scheduling policy designed to provide ++near-real-time performance to unprivileged (ie non-root) users without the ++ability to starve the machine indefinitely. Isochronous tasks (which means ++"same time") are set using, for example, the schedtool application like so: ++ ++ schedtool -I -e amarok ++ ++This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works ++is that it has a priority level between true realtime tasks and SCHED_NORMAL ++which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie, ++if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval ++rate). However if ISO tasks run for more than a tunable finite amount of time, ++they are then demoted back to SCHED_NORMAL scheduling. This finite amount of ++time is the percentage of CPU available per CPU, configurable as a percentage in ++the following "resource handling" tunable (as opposed to a scheduler tunable): ++ ++iso_cpu: ++ ++ /proc/sys/kernel/iso_cpu ++ ++and is set to 70% by default. It is calculated over a rolling 5 second average ++Because it is the total CPU available, it means that on a multi CPU machine, it ++is possible to have an ISO task running as realtime scheduling indefinitely on ++just one CPU, as the other CPUs will be available. Setting this to 100 is the ++equivalent of giving all users SCHED_RR access and setting it to 0 removes the ++ability to run any pseudo-realtime tasks. ++ ++A feature of MuQSS is that it detects when an application tries to obtain a ++realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the ++appropriate privileges to use those policies. When it detects this, it will ++give the task SCHED_ISO policy instead. Thus it is transparent to the user. ++ ++ ++Idleprio scheduling: ++ ++Idleprio scheduling is a scheduling policy designed to give out CPU to a task ++_only_ when the CPU would be otherwise idle. The idea behind this is to allow ++ultra low priority tasks to be run in the background that have virtually no ++effect on the foreground tasks. This is ideally suited to distributed computing ++clients (like setiathome, folding, mprime etc) but can also be used to start a ++video encode or so on without any slowdown of other tasks. To avoid this policy ++from grabbing shared resources and holding them indefinitely, if it detects a ++state where the task is waiting on I/O, the machine is about to suspend to ram ++and so on, it will transiently schedule them as SCHED_NORMAL. Once a task has ++been scheduled as IDLEPRIO, it cannot be put back to SCHED_NORMAL without ++superuser privileges since it is effectively a lower scheduling policy. Tasks ++can be set to start as SCHED_IDLEPRIO with the schedtool command like so: ++ ++schedtool -D -e ./mprime ++ ++Subtick accounting: ++ ++It is surprisingly difficult to get accurate CPU accounting, and in many cases, ++the accounting is done by simply determining what is happening at the precise ++moment a timer tick fires off. This becomes increasingly inaccurate as the timer ++tick frequency (HZ) is lowered. It is possible to create an application which ++uses almost 100% CPU, yet by being descheduled at the right time, records zero ++CPU usage. While the main problem with this is that there are possible security ++implications, it is also difficult to determine how much CPU a task really does ++use. Mux uses sub-tick accounting from the TSC clock to determine real CPU ++usage. Thus, the amount of CPU reported as being used by MuQSS will more ++accurately represent how much CPU the task itself is using (as is shown for ++example by the 'time' application), so the reported values may be quite ++different to other schedulers. When comparing throughput of MuQSS to other ++designs, it is important to compare the actual completed work in terms of total ++wall clock time taken and total work done, rather than the reported "cpu usage". ++ ++Symmetric MultiThreading (SMT) aware nice: ++ ++SMT, a.k.a. hyperthreading, is a very common feature on modern CPUs. While the ++logical CPU count rises by adding thread units to each CPU core, allowing more ++than one task to be run simultaneously on the same core, the disadvantage of it ++is that the CPU power is shared between the tasks, not summating to the power ++of two CPUs. The practical upshot of this is that two tasks running on ++separate threads of the same core run significantly slower than if they had one ++core each to run on. While smart CPU selection allows each task to have a core ++to itself whenever available (as is done on MuQSS), it cannot offset the ++slowdown that occurs when the cores are all loaded and only a thread is left. ++Most of the time this is harmless as the CPU is effectively overloaded at this ++point and the extra thread is of benefit. However when running a niced task in ++the presence of an un-niced task (say nice 19 v nice 0), the nice task gets ++precisely the same amount of CPU power as the unniced one. MuQSS has an ++optional configuration feature known as SMT-NICE which selectively idles the ++secondary niced thread for a period proportional to the nice difference, ++allowing CPU distribution according to nice level to be maintained, at the ++expense of a small amount of extra overhead. If this is configured in on a ++machine without SMT threads, the overhead is minimal. ++ ++ ++Con Kolivas <kernel@kolivas.org> Sat, 29th October 2016 +diff -Nur a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt +--- a/Documentation/sysctl/kernel.txt 2019-07-07 09:08:19.122347611 +0100 ++++ b/Documentation/sysctl/kernel.txt 2019-07-07 09:17:41.251241479 +0100 +@@ -41,6 +41,7 @@ + - hung_task_check_interval_secs + - hung_task_warnings + - hyperv_record_panic_msg ++- iso_cpu + - kexec_load_disabled + - kptr_restrict + - l2cr [ PPC only ] +@@ -77,6 +78,7 @@ + - randomize_va_space + - real-root-dev ==> Documentation/admin-guide/initrd.rst + - reboot-cmd [ SPARC only ] ++- rr_interval + - rtsig-max + - rtsig-nr + - sched_energy_aware +@@ -101,6 +103,7 @@ + - unknown_nmi_panic + - watchdog + - watchdog_thresh ++- yield_type + - version + + ============================================================== +@@ -439,6 +442,16 @@ + + ============================================================== + ++iso_cpu: (MuQSS CPU scheduler only). ++ ++This sets the percentage cpu that the unprivileged SCHED_ISO tasks can ++run effectively at realtime priority, averaged over a rolling five ++seconds over the -whole- system, meaning all cpus. ++ ++Set to 70 (percent) by default. ++ ++============================================================== ++ + l2cr: (PPC only) + + This flag controls the L2 cache of G3 processor boards. If +@@ -882,6 +895,20 @@ + + ============================================================== + ++rr_interval: (MuQSS CPU scheduler only) ++ ++This is the smallest duration that any cpu process scheduling unit ++will run for. Increasing this value can increase throughput of cpu ++bound tasks substantially but at the expense of increased latencies ++overall. Conversely decreasing it will decrease average and maximum ++latencies but at the expense of throughput. This value is in ++milliseconds and the default value chosen depends on the number of ++cpus available at scheduler initialisation with a minimum of 6. ++ ++Valid values are from 1-1000. ++ ++============================================================== ++ + rtsig-max & rtsig-nr: + + The file rtsig-max can be used to tune the maximum number +@@ -1164,3 +1191,13 @@ + tunable to zero will disable lockup detection altogether. + + ============================================================== ++ ++yield_type: (MuQSS CPU scheduler only) ++ ++This determines what type of yield calls to sched_yield will perform. ++ ++ 0: No yield. ++ 1: Yield only to better priority/deadline tasks. (default) ++ 2: Expire timeslice and recalculate deadline. ++ ++============================================================== +diff -Nur a/fs/proc/base.c b/fs/proc/base.c +--- a/fs/proc/base.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/fs/proc/base.c 2019-07-07 09:17:41.251241479 +0100 +@@ -463,7 +463,7 @@ + seq_puts(m, "0 0 0\n"); + else + seq_printf(m, "%llu %llu %lu\n", +- (unsigned long long)task->se.sum_exec_runtime, ++ (unsigned long long)tsk_seruntime(task), + (unsigned long long)task->sched_info.run_delay, + task->sched_info.pcount); + +diff -Nur a/include/linux/init_task.h b/include/linux/init_task.h +--- a/include/linux/init_task.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/init_task.h 2019-07-07 09:17:41.251241479 +0100 +@@ -47,7 +47,11 @@ + #define INIT_CPU_TIMERS(s) + #endif + ++#ifdef CONFIG_SCHED_MUQSS ++#define INIT_TASK_COMM "MuQSS" ++#else + #define INIT_TASK_COMM "swapper" ++#endif + + /* Attach to the init_task data structure for proper alignment */ + #ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK +diff -Nur a/include/linux/ioprio.h b/include/linux/ioprio.h +--- a/include/linux/ioprio.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/ioprio.h 2019-07-07 09:17:41.251241479 +0100 +@@ -53,6 +53,8 @@ + */ + static inline int task_nice_ioprio(struct task_struct *task) + { ++ if (iso_task(task)) ++ return 0; + return (task_nice(task) + 20) / 5; + } + +diff -Nur a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h +--- a/include/linux/sched/nohz.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/sched/nohz.h 2019-07-07 09:17:41.251241479 +0100 +@@ -6,7 +6,7 @@ + * This is the interface between the scheduler and nohz/dynticks: + */ + +-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) ++#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS) + extern void cpu_load_update_nohz_start(void); + extern void cpu_load_update_nohz_stop(void); + #else +@@ -21,7 +21,7 @@ + static inline void nohz_balance_enter_idle(int cpu) { } + #endif + +-#ifdef CONFIG_NO_HZ_COMMON ++#if defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS) + void calc_load_nohz_start(void); + void calc_load_nohz_stop(void); + #else +diff -Nur a/include/linux/sched/prio.h b/include/linux/sched/prio.h +--- a/include/linux/sched/prio.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/sched/prio.h 2019-07-07 09:17:41.251241479 +0100 +@@ -20,8 +20,20 @@ + */ + + #define MAX_USER_RT_PRIO 100 ++ ++#ifdef CONFIG_SCHED_MUQSS ++/* Note different MAX_RT_PRIO */ ++#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1) ++ ++#define ISO_PRIO (MAX_RT_PRIO) ++#define NORMAL_PRIO (MAX_RT_PRIO + 1) ++#define IDLE_PRIO (MAX_RT_PRIO + 2) ++#define PRIO_LIMIT ((IDLE_PRIO) + 1) ++#else /* CONFIG_SCHED_MUQSS */ + #define MAX_RT_PRIO MAX_USER_RT_PRIO + ++#endif /* CONFIG_SCHED_MUQSS */ ++ + #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) + #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) + +diff -Nur a/include/linux/sched/rt.h b/include/linux/sched/rt.h +--- a/include/linux/sched/rt.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/sched/rt.h 2019-07-07 09:17:41.251241479 +0100 +@@ -24,8 +24,10 @@ + + if (policy == SCHED_FIFO || policy == SCHED_RR) + return true; ++#ifndef CONFIG_SCHED_MUQSS + if (policy == SCHED_DEADLINE) + return true; ++#endif + return false; + } + +diff -Nur a/include/linux/sched/task.h b/include/linux/sched/task.h +--- a/include/linux/sched/task.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/sched/task.h 2019-07-07 09:17:41.251241479 +0100 +@@ -82,7 +82,7 @@ + extern void free_task(struct task_struct *tsk); + + /* sched_exec is called by processes performing an exec */ +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_MUQSS) + extern void sched_exec(void); + #else + #define sched_exec() {} +diff -Nur a/include/linux/sched.h b/include/linux/sched.h +--- a/include/linux/sched.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/sched.h 2019-07-07 09:17:41.251241479 +0100 +@@ -30,6 +30,9 @@ + #include <linux/mm_types_task.h> + #include <linux/task_io_accounting.h> + #include <linux/rseq.h> ++#ifdef CONFIG_SCHED_MUQSS ++#include <linux/skip_list.h> ++#endif + + /* task_struct member predeclarations (sorted alphabetically): */ + struct audit_context; +@@ -605,9 +608,11 @@ + unsigned int flags; + unsigned int ptrace; + ++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_MUQSS) ++ int on_cpu; ++#endif + #ifdef CONFIG_SMP + struct llist_node wake_entry; +- int on_cpu; + #ifdef CONFIG_THREAD_INFO_IN_TASK + /* Current CPU: */ + unsigned int cpu; +@@ -632,10 +637,25 @@ + int static_prio; + int normal_prio; + unsigned int rt_priority; ++#ifdef CONFIG_SCHED_MUQSS ++ int time_slice; ++ u64 deadline; ++ skiplist_node node; /* Skip list node */ ++ u64 last_ran; ++ u64 sched_time; /* sched_clock time spent running */ ++#ifdef CONFIG_SMT_NICE ++ int smt_bias; /* Policy/nice level bias across smt siblings */ ++#endif ++#ifdef CONFIG_HOTPLUG_CPU ++ bool zerobound; /* Bound to CPU0 for hotplug */ ++#endif ++ unsigned long rt_timeout; ++#else /* CONFIG_SCHED_MUQSS */ + + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; ++#endif + #ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; + #endif +@@ -791,6 +811,10 @@ + u64 utimescaled; + u64 stimescaled; + #endif ++#ifdef CONFIG_SCHED_MUQSS ++ /* Unbanked cpu time */ ++ unsigned long utime_ns, stime_ns; ++#endif + u64 gtime; + struct prev_cputime prev_cputime; + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +@@ -1217,6 +1241,40 @@ + */ + }; + ++#ifdef CONFIG_SCHED_MUQSS ++#define tsk_seruntime(t) ((t)->sched_time) ++#define tsk_rttimeout(t) ((t)->rt_timeout) ++ ++static inline void tsk_cpus_current(struct task_struct *p) ++{ ++} ++ ++void print_scheduler_version(void); ++ ++static inline bool iso_task(struct task_struct *p) ++{ ++ return (p->policy == SCHED_ISO); ++} ++#else /* CFS */ ++#define tsk_seruntime(t) ((t)->se.sum_exec_runtime) ++#define tsk_rttimeout(t) ((t)->rt.timeout) ++ ++static inline void tsk_cpus_current(struct task_struct *p) ++{ ++ p->nr_cpus_allowed = current->nr_cpus_allowed; ++} ++ ++static inline void print_scheduler_version(void) ++{ ++ printk(KERN_INFO "CFS CPU scheduler.\n"); ++} ++ ++static inline bool iso_task(struct task_struct *p) ++{ ++ return false; ++} ++#endif /* CONFIG_SCHED_MUQSS */ ++ + static inline struct pid *task_pid(struct task_struct *task) + { + return task->thread_pid; +diff -Nur a/include/linux/skip_list.h b/include/linux/skip_list.h +--- a/include/linux/skip_list.h 1970-01-01 01:00:00.000000000 +0100 ++++ b/include/linux/skip_list.h 2019-07-07 09:17:41.251241479 +0100 +@@ -0,0 +1,33 @@ ++#ifndef _LINUX_SKIP_LISTS_H ++#define _LINUX_SKIP_LISTS_H ++typedef u64 keyType; ++typedef void *valueType; ++ ++typedef struct nodeStructure skiplist_node; ++ ++struct nodeStructure { ++ int level; /* Levels in this structure */ ++ keyType key; ++ valueType value; ++ skiplist_node *next[8]; ++ skiplist_node *prev[8]; ++}; ++ ++typedef struct listStructure { ++ int entries; ++ int level; /* Maximum level of the list ++ (1 more than the number of levels in the list) */ ++ skiplist_node *header; /* pointer to header */ ++} skiplist; ++ ++void skiplist_init(skiplist_node *slnode); ++skiplist *new_skiplist(skiplist_node *slnode); ++void free_skiplist(skiplist *l); ++void skiplist_node_init(skiplist_node *node); ++void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed); ++void skiplist_delete(skiplist *l, skiplist_node *node); ++ ++static inline bool skiplist_node_empty(skiplist_node *node) { ++ return (!node->next[0]); ++} ++#endif /* _LINUX_SKIP_LISTS_H */ +diff -Nur a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h +--- a/include/uapi/linux/sched.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/uapi/linux/sched.h 2019-07-07 09:17:41.251241479 +0100 +@@ -37,9 +37,16 @@ + #define SCHED_FIFO 1 + #define SCHED_RR 2 + #define SCHED_BATCH 3 +-/* SCHED_ISO: reserved but not implemented yet */ ++/* SCHED_ISO: Implemented on MuQSS only */ + #define SCHED_IDLE 5 ++#ifdef CONFIG_SCHED_MUQSS ++#define SCHED_ISO 4 ++#define SCHED_IDLEPRIO SCHED_IDLE ++#define SCHED_MAX (SCHED_IDLEPRIO) ++#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX) ++#else /* CONFIG_SCHED_MUQSS */ + #define SCHED_DEADLINE 6 ++#endif /* CONFIG_SCHED_MUQSS */ + + /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ + #define SCHED_RESET_ON_FORK 0x40000000 +diff -Nur a/init/init_task.c b/init/init_task.c +--- a/init/init_task.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/init/init_task.c 2019-07-07 09:17:41.251241479 +0100 +@@ -68,9 +68,17 @@ + .stack = init_stack, + .usage = REFCOUNT_INIT(2), + .flags = PF_KTHREAD, ++#ifdef CONFIG_SCHED_MUQSS ++ .prio = NORMAL_PRIO, ++ .static_prio = MAX_PRIO-20, ++ .normal_prio = NORMAL_PRIO, ++ .deadline = 0, ++ .time_slice = 1000000, ++#else + .prio = MAX_PRIO - 20, + .static_prio = MAX_PRIO - 20, + .normal_prio = MAX_PRIO - 20, ++#endif + .policy = SCHED_NORMAL, + .cpus_allowed = CPU_MASK_ALL, + .nr_cpus_allowed= NR_CPUS, +@@ -79,6 +87,7 @@ + .restart_block = { + .fn = do_no_restart_syscall, + }, ++#ifndef CONFIG_SCHED_MUQSS + .se = { + .group_node = LIST_HEAD_INIT(init_task.se.group_node), + }, +@@ -86,6 +95,7 @@ + .run_list = LIST_HEAD_INIT(init_task.rt.run_list), + .time_slice = RR_TIMESLICE, + }, ++#endif + .tasks = LIST_HEAD_INIT(init_task.tasks), + #ifdef CONFIG_SMP + .pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO), +diff -Nur a/init/Kconfig b/init/Kconfig +--- a/init/Kconfig 2019-07-07 09:08:19.142348283 +0100 ++++ b/init/Kconfig 2019-07-07 09:17:41.251241479 +0100 +@@ -64,6 +64,18 @@ + + menu "General setup" + ++config SCHED_MUQSS ++ bool "MuQSS cpu scheduler" ++ select HIGH_RES_TIMERS ++ ---help--- ++ The Multiple Queue Skiplist Scheduler for excellent interactivity and ++ responsiveness on the desktop and highly scalable deterministic ++ low latency on any hardware. ++ ++ Say Y here. ++ default y ++ ++ + config BROKEN + bool + +@@ -703,6 +715,7 @@ + depends on ARCH_SUPPORTS_NUMA_BALANCING + depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY + depends on SMP && NUMA && MIGRATION ++ depends on !SCHED_MUQSS + help + This option adds support for automatic NUMA aware memory/task placement. + The mechanism is quite primitive and is based on migrating memory when +@@ -810,9 +823,13 @@ + help + This feature lets CPU scheduler recognize task groups and control CPU + bandwidth allocation to such task groups. It uses cgroups to group +- tasks. ++ tasks. In combination with MuQSS this is purely a STUB to create the ++ files associated with the CPU controller cgroup but most of the ++ controls do nothing. This is useful for working in environments and ++ with applications that will only work if this control group is ++ present. + +-if CGROUP_SCHED ++if CGROUP_SCHED && !SCHED_MUQSS + config FAIR_GROUP_SCHED + bool "Group scheduling for SCHED_OTHER" + depends on CGROUP_SCHED +@@ -919,6 +936,7 @@ + + config CGROUP_CPUACCT + bool "Simple CPU accounting controller" ++ depends on !SCHED_MUQSS + help + Provides a simple controller for monitoring the + total CPU consumed by the tasks in a cgroup. +@@ -1037,6 +1055,7 @@ + + config SCHED_AUTOGROUP + bool "Automatic process group scheduling" ++ depends on !SCHED_MUQSS + select CGROUPS + select CGROUP_SCHED + select FAIR_GROUP_SCHED +diff -Nur a/init/main.c b/init/main.c +--- a/init/main.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/init/main.c 2019-07-07 09:17:41.251241479 +0100 +@@ -1083,6 +1083,8 @@ + + rcu_end_inkernel_boot(); + ++ print_scheduler_version(); ++ + if (ramdisk_execute_command) { + ret = run_init_process(ramdisk_execute_command); + if (!ret) +diff -Nur a/kernel/delayacct.c b/kernel/delayacct.c +--- a/kernel/delayacct.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/delayacct.c 2019-07-07 09:17:41.251241479 +0100 +@@ -115,7 +115,7 @@ + */ + t1 = tsk->sched_info.pcount; + t2 = tsk->sched_info.run_delay; +- t3 = tsk->se.sum_exec_runtime; ++ t3 = tsk_seruntime(tsk); + + d->cpu_count += t1; + +diff -Nur a/kernel/exit.c b/kernel/exit.c +--- a/kernel/exit.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/exit.c 2019-07-07 09:17:41.251241479 +0100 +@@ -130,7 +130,7 @@ + sig->curr_target = next_thread(tsk); + } + +- add_device_randomness((const void*) &tsk->se.sum_exec_runtime, ++ add_device_randomness((const void*) &tsk_seruntime(tsk), + sizeof(unsigned long long)); + + /* +@@ -151,7 +151,7 @@ + sig->inblock += task_io_get_inblock(tsk); + sig->oublock += task_io_get_oublock(tsk); + task_io_accounting_add(&sig->ioac, &tsk->ioac); +- sig->sum_sched_runtime += tsk->se.sum_exec_runtime; ++ sig->sum_sched_runtime += tsk_seruntime(tsk); + sig->nr_threads--; + __unhash_process(tsk, group_dead); + write_sequnlock(&sig->stats_lock); +diff -Nur a/kernel/Kconfig.MuQSS b/kernel/Kconfig.MuQSS +--- a/kernel/Kconfig.MuQSS 1970-01-01 01:00:00.000000000 +0100 ++++ b/kernel/Kconfig.MuQSS 2019-07-07 09:17:41.251241479 +0100 +@@ -0,0 +1,89 @@ ++choice ++ prompt "CPU scheduler runqueue sharing" ++ default RQ_MC if SCHED_MUQSS ++ default RQ_NONE ++ ++config RQ_NONE ++ bool "No sharing" ++ help ++ This is the default behaviour where the CPU scheduler has one runqueue ++ per CPU, whether it is a physical or logical CPU (hyperthread). ++ ++ This can still be enabled runtime with the boot parameter ++ rqshare=none ++ ++ If unsure, say N. ++ ++config RQ_SMT ++ bool "SMT (hyperthread) siblings" ++ depends on SCHED_SMT && SCHED_MUQSS ++ ++ help ++ With this option enabled, the CPU scheduler will have one runqueue ++ shared by SMT (hyperthread) siblings. As these logical cores share ++ one physical core, sharing the runqueue resource can lead to decreased ++ overhead, lower latency and higher throughput. ++ ++ This can still be enabled runtime with the boot parameter ++ rqshare=smt ++ ++ If unsure, say N. ++ ++config RQ_MC ++ bool "Multicore siblings" ++ depends on SCHED_MC && SCHED_MUQSS ++ help ++ With this option enabled, the CPU scheduler will have one runqueue ++ shared by multicore siblings in addition to any SMT siblings. ++ As these physical cores share caches, sharing the runqueue resource ++ will lead to lower latency, but its effects on overhead and throughput ++ are less predictable. As a general rule, 6 or fewer cores will likely ++ benefit from this, while larger CPUs will only derive a latency ++ benefit. If your workloads are primarily single threaded, this will ++ possibly worsen throughput. If you are only concerned about latency ++ then enable this regardless of how many cores you have. ++ ++ This can still be enabled runtime with the boot parameter ++ rqshare=mc ++ ++ If unsure, say Y. ++ ++config RQ_SMP ++ bool "Symmetric Multi-Processing" ++ depends on SMP && SCHED_MUQSS ++ help ++ With this option enabled, the CPU scheduler will have one runqueue ++ shared by all physical CPUs unless they are on separate NUMA nodes. ++ As physical CPUs usually do not share resources, sharing the runqueue ++ will normally worsen throughput but improve latency. If you only ++ care about latency enable this. ++ ++ This can still be enabled runtime with the boot parameter ++ rqshare=smp ++ ++ If unsure, say N. ++ ++config RQ_ALL ++ bool "NUMA" ++ depends on SMP && SCHED_MUQSS ++ help ++ With this option enabled, the CPU scheduler will have one runqueue ++ regardless of the architecture configuration, including across NUMA ++ nodes. This can substantially decrease throughput in NUMA ++ configurations, but light NUMA designs will not be dramatically ++ affected. This option should only be chosen if latency is the prime ++ concern. ++ ++ This can still be enabled runtime with the boot parameter ++ rqshare=all ++ ++ If unsure, say N. ++endchoice ++ ++config SHARERQ ++ int ++ default 0 if RQ_NONE ++ default 1 if RQ_SMT ++ default 2 if RQ_MC ++ default 3 if RQ_SMP ++ default 4 if RQ_ALL +diff -Nur a/kernel/kthread.c b/kernel/kthread.c +--- a/kernel/kthread.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/kthread.c 2019-07-07 09:17:41.261241813 +0100 +@@ -431,6 +431,34 @@ + } + EXPORT_SYMBOL(kthread_bind); + ++#if defined(CONFIG_SCHED_MUQSS) && defined(CONFIG_SMP) ++extern void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); ++ ++/* ++ * new_kthread_bind is a special variant of __kthread_bind_mask. ++ * For new threads to work on muqss we want to call do_set_cpus_allowed ++ * without the task_cpu being set and the task rescheduled until they're ++ * rescheduled on their own so we call __do_set_cpus_allowed directly which ++ * only changes the cpumask. This is particularly important for smpboot threads ++ * to work. ++ */ ++static void new_kthread_bind(struct task_struct *p, unsigned int cpu) ++{ ++ unsigned long flags; ++ ++ if (WARN_ON(!wait_task_inactive(p, TASK_UNINTERRUPTIBLE))) ++ return; ++ ++ /* It's safe because the task is inactive. */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ __do_set_cpus_allowed(p, cpumask_of(cpu)); ++ p->flags |= PF_NO_SETAFFINITY; ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++} ++#else ++#define new_kthread_bind(p, cpu) kthread_bind(p, cpu) ++#endif ++ + /** + * kthread_create_on_cpu - Create a cpu bound kthread + * @threadfn: the function to run until signal_pending(current). +@@ -452,7 +480,7 @@ + cpu); + if (IS_ERR(p)) + return p; +- kthread_bind(p, cpu); ++ new_kthread_bind(p, cpu); + /* CPU hotplug need to bind once again when unparking the thread. */ + set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); + to_kthread(p)->cpu = cpu; +diff -Nur a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c +--- a/kernel/livepatch/transition.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/livepatch/transition.c 2019-07-07 09:17:41.261241813 +0100 +@@ -298,7 +298,7 @@ + static bool klp_try_switch_task(struct task_struct *task) + { + struct rq *rq; +- struct rq_flags flags; ++ struct rq_flags rf; + int ret; + bool success = false; + char err_buf[STACK_ERR_BUF_SIZE]; +@@ -314,7 +314,7 @@ + * functions. If all goes well, switch the task to the target patch + * state. + */ +- rq = task_rq_lock(task, &flags); ++ rq = task_rq_lock(task, &rf); + + if (task_running(rq, task) && task != current) { + snprintf(err_buf, STACK_ERR_BUF_SIZE, +@@ -333,7 +333,7 @@ + task->patch_state = klp_target_state; + + done: +- task_rq_unlock(rq, task, &flags); ++ task_rq_unlock(rq, task, &rf); + + /* + * Due to console deadlock issues, pr_debug() can't be used while +diff -Nur a/kernel/Makefile b/kernel/Makefile +--- a/kernel/Makefile 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/Makefile 2019-07-07 09:17:41.251241479 +0100 +@@ -10,7 +10,7 @@ + extable.o params.o \ + kthread.o sys_ni.o nsproxy.o \ + notifier.o ksysfs.o cred.o reboot.o \ +- async.o range.o smpboot.o ucount.o ++ async.o range.o smpboot.o ucount.o skip_list.o + + obj-$(CONFIG_MODULES) += kmod.o + obj-$(CONFIG_MULTIUSER) += groups.o +diff -Nur a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c +--- a/kernel/sched/cpufreq_schedutil.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/cpufreq_schedutil.c 2019-07-07 09:17:41.261241813 +0100 +@@ -175,6 +175,12 @@ + return cpufreq_driver_resolve_freq(policy, freq); + } + ++#ifdef CONFIG_SCHED_MUQSS ++#define rt_rq_runnable(rq_rt) rt_rq_is_runnable(rq) ++#else ++#define rt_rq_runnable(rq_rt) rt_rq_is_runnable(&rq->rt) ++#endif ++ + /* + * This function computes an effective utilization for the given CPU, to be + * used for frequency selection given the linear relation: f = u * f_max. +@@ -201,7 +207,7 @@ + unsigned long dl_util, util, irq; + struct rq *rq = cpu_rq(cpu); + +- if (type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) ++ if (type == FREQUENCY_UTIL && rt_rq_runnable(rq)) + return max; + + /* +@@ -635,7 +641,11 @@ + struct task_struct *thread; + struct sched_attr attr = { + .size = sizeof(struct sched_attr), ++#ifdef CONFIG_SCHED_MUQSS ++ .sched_policy = SCHED_RR, ++#else + .sched_policy = SCHED_DEADLINE, ++#endif + .sched_flags = SCHED_FLAG_SUGOV, + .sched_nice = 0, + .sched_priority = 0, +diff -Nur a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h +--- a/kernel/sched/cpupri.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/cpupri.h 2019-07-07 09:17:41.261241813 +0100 +@@ -17,9 +17,11 @@ + int *cpu_to_pri; + }; + ++#ifndef CONFIG_SCHED_MUQSS + #ifdef CONFIG_SMP + int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask); + void cpupri_set(struct cpupri *cp, int cpu, int pri); + int cpupri_init(struct cpupri *cp); + void cpupri_cleanup(struct cpupri *cp); + #endif ++#endif +diff -Nur a/kernel/sched/cputime.c b/kernel/sched/cputime.c +--- a/kernel/sched/cputime.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/cputime.c 2019-07-07 09:17:41.261241813 +0100 +@@ -265,26 +265,6 @@ + return accounted; + } + +-#ifdef CONFIG_64BIT +-static inline u64 read_sum_exec_runtime(struct task_struct *t) +-{ +- return t->se.sum_exec_runtime; +-} +-#else +-static u64 read_sum_exec_runtime(struct task_struct *t) +-{ +- u64 ns; +- struct rq_flags rf; +- struct rq *rq; +- +- rq = task_rq_lock(t, &rf); +- ns = t->se.sum_exec_runtime; +- task_rq_unlock(rq, t, &rf); +- +- return ns; +-} +-#endif +- + /* + * Accumulate raw cputime values of dead tasks (sig->[us]time) and live + * tasks (sum on group iteration) belonging to @tsk's group. +@@ -662,7 +642,7 @@ + void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) + { + struct task_cputime cputime = { +- .sum_exec_runtime = p->se.sum_exec_runtime, ++ .sum_exec_runtime = tsk_seruntime(p), + }; + + task_cputime(p, &cputime.utime, &cputime.stime); +diff -Nur a/kernel/sched/idle.c b/kernel/sched/idle.c +--- a/kernel/sched/idle.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/idle.c 2019-07-07 09:17:41.261241813 +0100 +@@ -224,6 +224,8 @@ + static void do_idle(void) + { + int cpu = smp_processor_id(); ++ bool pending = false; ++ + /* + * If the arch has a polling bit, we maintain an invariant: + * +@@ -234,7 +236,10 @@ + */ + + __current_set_polling(); +- tick_nohz_idle_enter(); ++ if (unlikely(softirq_pending(cpu))) ++ pending = true; ++ else ++ tick_nohz_idle_enter(); + + while (!need_resched()) { + check_pgt_cache(); +@@ -272,7 +277,8 @@ + * an IPI to fold the state for us. + */ + preempt_set_need_resched(); +- tick_nohz_idle_exit(); ++ if (!pending) ++ tick_nohz_idle_exit(); + __current_clr_polling(); + + /* +@@ -353,6 +359,7 @@ + do_idle(); + } + ++#ifndef CONFIG_SCHED_MUQSS + /* + * idle-task scheduling class. + */ +@@ -465,3 +472,4 @@ + .switched_to = switched_to_idle, + .update_curr = update_curr_idle, + }; ++#endif /* CONFIG_SCHED_MUQSS */ +diff -Nur a/kernel/sched/Makefile b/kernel/sched/Makefile +--- a/kernel/sched/Makefile 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/Makefile 2019-07-07 09:17:41.261241813 +0100 +@@ -16,15 +16,23 @@ + CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer + endif + ++ifdef CONFIG_SCHED_MUQSS ++obj-y += MuQSS.o clock.o cputime.o ++obj-y += idle.o ++obj-y += wait.o wait_bit.o swait.o completion.o ++ ++obj-$(CONFIG_SMP) += topology.o ++else + obj-y += core.o loadavg.o clock.o cputime.o + obj-y += idle.o fair.o rt.o deadline.o + obj-y += wait.o wait_bit.o swait.o completion.o + + obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o + obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o +-obj-$(CONFIG_SCHEDSTATS) += stats.o + obj-$(CONFIG_SCHED_DEBUG) += debug.o + obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o ++endif ++obj-$(CONFIG_SCHEDSTATS) += stats.o + obj-$(CONFIG_CPU_FREQ) += cpufreq.o + obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o + obj-$(CONFIG_MEMBARRIER) += membarrier.o +diff -Nur a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c +--- a/kernel/sched/MuQSS.c 1970-01-01 01:00:00.000000000 +0100 ++++ b/kernel/sched/MuQSS.c 2019-07-07 09:17:41.261241813 +0100 +@@ -0,0 +1,7496 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * kernel/sched/MuQSS.c, was kernel/sched.c ++ * ++ * Kernel scheduler and related syscalls ++ * ++ * Copyright (C) 1991-2002 Linus Torvalds ++ * ++ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and ++ * make semaphores SMP safe ++ * 1998-11-19 Implemented schedule_timeout() and related stuff ++ * by Andrea Arcangeli ++ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: ++ * hybrid priority-list and round-robin design with ++ * an array-switch method of distributing timeslices ++ * and per-CPU runqueues. Cleanups and useful suggestions ++ * by Davide Libenzi, preemptible kernel bits by Robert Love. ++ * 2003-09-03 Interactivity tuning by Con Kolivas. ++ * 2004-04-02 Scheduler domains code by Nick Piggin ++ * 2007-04-15 Work begun on replacing all interactivity tuning with a ++ * fair scheduling design by Con Kolivas. ++ * 2007-05-05 Load balancing (smp-nice) and other improvements ++ * by Peter Williams ++ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith ++ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri ++ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, ++ * Thomas Gleixner, Mike Kravetz ++ * 2009-08-13 Brainfuck deadline scheduling policy by Con Kolivas deletes ++ * a whole lot of those previous things. ++ * 2016-10-01 Multiple Queue Skiplist Scheduler scalable evolution of BFS ++ * scheduler by Con Kolivas. ++ */ ++ ++#include <linux/sched/isolation.h> ++#include <linux/sched/loadavg.h> ++ ++#include <linux/binfmts.h> ++#include <linux/blkdev.h> ++#include <linux/compat.h> ++#include <linux/context_tracking.h> ++#include <linux/cpuset.h> ++#include <linux/delayacct.h> ++#include <linux/init_task.h> ++#include <linux/kcov.h> ++#include <linux/kprobes.h> ++#include <linux/mmu_context.h> ++#include <linux/module.h> ++#include <linux/nmi.h> ++#include <linux/prefetch.h> ++#include <linux/profile.h> ++#include <linux/rcupdate_wait.h> ++#include <linux/sched.h> ++#include <linux/security.h> ++#include <linux/skip_list.h> ++#include <linux/syscalls.h> ++#include <linux/tick.h> ++#include <linux/wait_bit.h> ++ ++#include <asm/irq_regs.h> ++#include <asm/switch_to.h> ++#include <asm/tlb.h> ++ ++#include "../workqueue_internal.h" ++#include "../smpboot.h" ++ ++#define CREATE_TRACE_POINTS ++#include <trace/events/sched.h> ++ ++#include "MuQSS.h" ++ ++#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) ++#define rt_task(p) rt_prio((p)->prio) ++#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) ++#define is_rt_policy(policy) ((policy) == SCHED_FIFO || \ ++ (policy) == SCHED_RR) ++#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy)) ++ ++#define is_idle_policy(policy) ((policy) == SCHED_IDLEPRIO) ++#define idleprio_task(p) unlikely(is_idle_policy((p)->policy)) ++#define task_running_idle(p) unlikely((p)->prio == IDLE_PRIO) ++ ++#define is_iso_policy(policy) ((policy) == SCHED_ISO) ++#define iso_task(p) unlikely(is_iso_policy((p)->policy)) ++#define task_running_iso(p) unlikely((p)->prio == ISO_PRIO) ++ ++#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT) ++ ++#define ISO_PERIOD (5 * HZ) ++ ++#define STOP_PRIO (MAX_RT_PRIO - 1) ++ ++/* ++ * Some helpers for converting to/from various scales. Use shifts to get ++ * approximate multiples of ten for less overhead. ++ */ ++#define APPROX_NS_PS (1073741824) /* Approximate ns per second */ ++#define JIFFIES_TO_NS(TIME) ((TIME) * (APPROX_NS_PS / HZ)) ++#define JIFFY_NS (APPROX_NS_PS / HZ) ++#define JIFFY_US (1048576 / HZ) ++#define NS_TO_JIFFIES(TIME) ((TIME) / JIFFY_NS) ++#define HALF_JIFFY_NS (APPROX_NS_PS / HZ / 2) ++#define HALF_JIFFY_US (1048576 / HZ / 2) ++#define MS_TO_NS(TIME) ((TIME) << 20) ++#define MS_TO_US(TIME) ((TIME) << 10) ++#define NS_TO_MS(TIME) ((TIME) >> 20) ++#define NS_TO_US(TIME) ((TIME) >> 10) ++#define US_TO_NS(TIME) ((TIME) << 10) ++#define TICK_APPROX_NS ((APPROX_NS_PS+HZ/2)/HZ) ++ ++#define RESCHED_US (100) /* Reschedule if less than this many μs left */ ++ ++void print_scheduler_version(void) ++{ ++ printk(KERN_INFO "MuQSS CPU scheduler v0.192 by Con Kolivas.\n"); ++} ++ ++#define RQSHARE_NONE 0 ++#define RQSHARE_SMT 1 ++#define RQSHARE_MC 2 ++#define RQSHARE_SMP 3 ++#define RQSHARE_ALL 4 ++ ++/* ++ * This determines what level of runqueue sharing will be done and is ++ * configurable at boot time with the bootparam rqshare = ++ */ ++static int rqshare __read_mostly = CONFIG_SHARERQ; /* Default RQSHARE_MC */ ++ ++static int __init set_rqshare(char *str) ++{ ++ if (!strncmp(str, "none", 4)) { ++ rqshare = RQSHARE_NONE; ++ return 0; ++ } ++ if (!strncmp(str, "smt", 3)) { ++ rqshare = RQSHARE_SMT; ++ return 0; ++ } ++ if (!strncmp(str, "mc", 2)) { ++ rqshare = RQSHARE_MC; ++ return 0; ++ } ++ if (!strncmp(str, "smp", 3)) { ++ rqshare = RQSHARE_SMP; ++ return 0; ++ } ++ if (!strncmp(str, "all", 3)) { ++ rqshare = RQSHARE_ALL; ++ return 0; ++ } ++ return 1; ++} ++__setup("rqshare=", set_rqshare); ++ ++/* ++ * This is the time all tasks within the same priority round robin. ++ * Value is in ms and set to a minimum of 6ms. ++ * Tunable via /proc interface. ++ */ ++int rr_interval __read_mostly = 6; ++ ++/* ++ * Tunable to choose whether to prioritise latency or throughput, simple ++ * binary yes or no ++ */ ++int sched_interactive __read_mostly = 1; ++ ++/* ++ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks ++ * are allowed to run five seconds as real time tasks. This is the total over ++ * all online cpus. ++ */ ++int sched_iso_cpu __read_mostly = 70; ++ ++/* ++ * sched_yield_type - Choose what sort of yield sched_yield will perform. ++ * 0: No yield. ++ * 1: Yield only to better priority/deadline tasks. (default) ++ * 2: Expire timeslice and recalculate deadline. ++ */ ++int sched_yield_type __read_mostly = 1; ++ ++/* ++ * The relative length of deadline for each priority(nice) level. ++ */ ++static int prio_ratios[NICE_WIDTH] __read_mostly; ++ ++ ++/* ++ * The quota handed out to tasks of all priority levels when refilling their ++ * time_slice. ++ */ ++static inline int timeslice(void) ++{ ++ return MS_TO_US(rr_interval); ++} ++ ++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++ ++#ifdef CONFIG_SMP ++/* ++ * Total number of runqueues. Equals number of CPUs when there is no runqueue ++ * sharing but is usually less with SMT/MC sharing of runqueues. ++ */ ++static int total_runqueues __read_mostly = 1; ++ ++static cpumask_t cpu_idle_map ____cacheline_aligned_in_smp; ++ ++struct rq *cpu_rq(int cpu) ++{ ++ return &per_cpu(runqueues, (cpu)); ++} ++#define cpu_curr(cpu) (cpu_rq(cpu)->curr) ++ ++/* ++ * For asym packing, by default the lower numbered cpu has higher priority. ++ */ ++int __weak arch_asym_cpu_priority(int cpu) ++{ ++ return -cpu; ++} ++ ++int __weak arch_sd_sibling_asym_packing(void) ++{ ++ return 0*SD_ASYM_PACKING; ++} ++ ++#ifdef CONFIG_SCHED_SMT ++DEFINE_STATIC_KEY_FALSE(sched_smt_present); ++EXPORT_SYMBOL_GPL(sched_smt_present); ++#endif ++ ++#else ++struct rq *uprq; ++#endif /* CONFIG_SMP */ ++ ++#include "stats.h" ++ ++/* ++ * All common locking functions performed on rq->lock. rq->clock is local to ++ * the CPU accessing it so it can be modified just with interrupts disabled ++ * when we're not updating niffies. ++ * Looking up task_rq must be done under rq->lock to be safe. ++ */ ++ ++/* ++ * RQ-clock updating methods: ++ */ ++ ++#ifdef HAVE_SCHED_AVG_IRQ ++static void update_irq_load_avg(struct rq *rq, long delta); ++#else ++static inline void update_irq_load_avg(struct rq *rq, long delta) {} ++#endif ++ ++static void update_rq_clock_task(struct rq *rq, s64 delta) ++{ ++/* ++ * In theory, the compile should just see 0 here, and optimize out the call ++ * to sched_rt_avg_update. But I don't trust it... ++ */ ++ s64 __maybe_unused steal = 0, irq_delta = 0; ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; ++ ++ /* ++ * Since irq_time is only updated on {soft,}irq_exit, we might run into ++ * this case when a previous update_rq_clock() happened inside a ++ * {soft,}irq region. ++ * ++ * When this happens, we stop ->clock_task and only update the ++ * prev_irq_time stamp to account for the part that fit, so that a next ++ * update will consume the rest. This ensures ->clock_task is ++ * monotonic. ++ * ++ * It does however cause some slight miss-attribution of {soft,}irq ++ * time, a more accurate solution would be to update the irq_time using ++ * the current rq->clock timestamp, except that would require using ++ * atomic ops. ++ */ ++ if (irq_delta > delta) ++ irq_delta = delta; ++ ++ rq->prev_irq_time += irq_delta; ++ delta -= irq_delta; ++#endif ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ if (static_key_false((¶virt_steal_rq_enabled))) { ++ steal = paravirt_steal_clock(cpu_of(rq)); ++ steal -= rq->prev_steal_time_rq; ++ ++ if (unlikely(steal > delta)) ++ steal = delta; ++ ++ rq->prev_steal_time_rq += steal; ++ delta -= steal; ++ } ++#endif ++ rq->clock_task += delta; ++ ++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ ++ if (irq_delta + steal) ++ update_irq_load_avg(rq, irq_delta + steal); ++#endif ++} ++ ++static inline void update_rq_clock(struct rq *rq) ++{ ++ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; ++ ++ if (unlikely(delta < 0)) ++ return; ++ rq->clock += delta; ++ update_rq_clock_task(rq, delta); ++} ++ ++/* ++ * Niffies are a globally increasing nanosecond counter. They're only used by ++ * update_load_avg and time_slice_expired, however deadlines are based on them ++ * across CPUs. Update them whenever we will call one of those functions, and ++ * synchronise them across CPUs whenever we hold both runqueue locks. ++ */ ++static inline void update_clocks(struct rq *rq) ++{ ++ s64 ndiff, minndiff; ++ long jdiff; ++ ++ update_rq_clock(rq); ++ ndiff = rq->clock - rq->old_clock; ++ rq->old_clock = rq->clock; ++ jdiff = jiffies - rq->last_jiffy; ++ ++ /* Subtract any niffies added by balancing with other rqs */ ++ ndiff -= rq->niffies - rq->last_niffy; ++ minndiff = JIFFIES_TO_NS(jdiff) - rq->niffies + rq->last_jiffy_niffies; ++ if (minndiff < 0) ++ minndiff = 0; ++ ndiff = max(ndiff, minndiff); ++ rq->niffies += ndiff; ++ rq->last_niffy = rq->niffies; ++ if (jdiff) { ++ rq->last_jiffy += jdiff; ++ rq->last_jiffy_niffies = rq->niffies; ++ } ++} ++ ++/* ++ * Any time we have two runqueues locked we use that as an opportunity to ++ * synchronise niffies to the highest value as idle ticks may have artificially ++ * kept niffies low on one CPU and the truth can only be later. ++ */ ++static inline void synchronise_niffies(struct rq *rq1, struct rq *rq2) ++{ ++ if (rq1->niffies > rq2->niffies) ++ rq2->niffies = rq1->niffies; ++ else ++ rq1->niffies = rq2->niffies; ++} ++ ++/* ++ * double_rq_lock - safely lock two runqueues ++ * ++ * Note this does not disable interrupts like task_rq_lock, ++ * you need to do so manually before calling. ++ */ ++ ++/* For when we know rq1 != rq2 */ ++static inline void __double_rq_lock(struct rq *rq1, struct rq *rq2) ++ __acquires(rq1->lock) ++ __acquires(rq2->lock) ++{ ++ if (rq1 < rq2) { ++ raw_spin_lock(rq1->lock); ++ raw_spin_lock_nested(rq2->lock, SINGLE_DEPTH_NESTING); ++ } else { ++ raw_spin_lock(rq2->lock); ++ raw_spin_lock_nested(rq1->lock, SINGLE_DEPTH_NESTING); ++ } ++} ++ ++static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) ++ __acquires(rq1->lock) ++ __acquires(rq2->lock) ++{ ++ BUG_ON(!irqs_disabled()); ++ if (rq1->lock == rq2->lock) { ++ raw_spin_lock(rq1->lock); ++ __acquire(rq2->lock); /* Fake it out ;) */ ++ } else ++ __double_rq_lock(rq1, rq2); ++ synchronise_niffies(rq1, rq2); ++} ++ ++/* ++ * double_rq_unlock - safely unlock two runqueues ++ * ++ * Note this does not restore interrupts like task_rq_unlock, ++ * you need to do so manually after calling. ++ */ ++static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) ++ __releases(rq1->lock) ++ __releases(rq2->lock) ++{ ++ raw_spin_unlock(rq1->lock); ++ if (rq1->lock != rq2->lock) ++ raw_spin_unlock(rq2->lock); ++ else ++ __release(rq2->lock); ++} ++ ++static inline void lock_all_rqs(void) ++{ ++ int cpu; ++ ++ preempt_disable(); ++ for_each_possible_cpu(cpu) { ++ struct rq *rq = cpu_rq(cpu); ++ ++ do_raw_spin_lock(rq->lock); ++ } ++} ++ ++static inline void unlock_all_rqs(void) ++{ ++ int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ struct rq *rq = cpu_rq(cpu); ++ ++ do_raw_spin_unlock(rq->lock); ++ } ++ preempt_enable(); ++} ++ ++/* Specially nest trylock an rq */ ++static inline bool trylock_rq(struct rq *this_rq, struct rq *rq) ++{ ++ if (unlikely(!do_raw_spin_trylock(rq->lock))) ++ return false; ++ spin_acquire(&rq->lock->dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_); ++ synchronise_niffies(this_rq, rq); ++ return true; ++} ++ ++/* Unlock a specially nested trylocked rq */ ++static inline void unlock_rq(struct rq *rq) ++{ ++ spin_release(&rq->lock->dep_map, 1, _RET_IP_); ++ do_raw_spin_unlock(rq->lock); ++} ++ ++/* ++ * cmpxchg based fetch_or, macro so it works for different integer types ++ */ ++#define fetch_or(ptr, mask) \ ++ ({ \ ++ typeof(ptr) _ptr = (ptr); \ ++ typeof(mask) _mask = (mask); \ ++ typeof(*_ptr) _old, _val = *_ptr; \ ++ \ ++ for (;;) { \ ++ _old = cmpxchg(_ptr, _val, _val | _mask); \ ++ if (_old == _val) \ ++ break; \ ++ _val = _old; \ ++ } \ ++ _old; \ ++}) ++ ++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) ++/* ++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, ++ * this avoids any races wrt polling state changes and thereby avoids ++ * spurious IPIs. ++ */ ++static bool set_nr_and_not_polling(struct task_struct *p) ++{ ++ struct thread_info *ti = task_thread_info(p); ++ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); ++} ++ ++/* ++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. ++ * ++ * If this returns true, then the idle task promises to call ++ * sched_ttwu_pending() and reschedule soon. ++ */ ++static bool set_nr_if_polling(struct task_struct *p) ++{ ++ struct thread_info *ti = task_thread_info(p); ++ typeof(ti->flags) old, val = READ_ONCE(ti->flags); ++ ++ for (;;) { ++ if (!(val & _TIF_POLLING_NRFLAG)) ++ return false; ++ if (val & _TIF_NEED_RESCHED) ++ return true; ++ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); ++ if (old == val) ++ break; ++ val = old; ++ } ++ return true; ++} ++ ++#else ++static bool set_nr_and_not_polling(struct task_struct *p) ++{ ++ set_tsk_need_resched(p); ++ return true; ++} ++ ++#ifdef CONFIG_SMP ++static bool set_nr_if_polling(struct task_struct *p) ++{ ++ return false; ++} ++#endif ++#endif ++ ++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) ++{ ++ struct wake_q_node *node = &task->wake_q; ++ ++ /* ++ * Atomically grab the task, if ->wake_q is !nil already it means ++ * its already queued (either by us or someone else) and will get the ++ * wakeup due to that. ++ * ++ * In order to ensure that a pending wakeup will observe our pending ++ * state, even in the failed case, an explicit smp_mb() must be used. ++ */ ++ smp_mb__before_atomic(); ++ if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) ++ return false; ++ ++ /* ++ * The head is context local, there can be no concurrency. ++ */ ++ *head->lastp = node; ++ head->lastp = &node->next; ++ return true; ++} ++ ++/** ++ * wake_q_add() - queue a wakeup for 'later' waking. ++ * @head: the wake_q_head to add @task to ++ * @task: the task to queue for 'later' wakeup ++ * ++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the ++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come ++ * instantly. ++ * ++ * This function must be used as-if it were wake_up_process(); IOW the task ++ * must be ready to be woken at this location. ++ */ ++void wake_q_add(struct wake_q_head *head, struct task_struct *task) ++{ ++ if (__wake_q_add(head, task)) ++ get_task_struct(task); ++} ++ ++/** ++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking. ++ * @head: the wake_q_head to add @task to ++ * @task: the task to queue for 'later' wakeup ++ * ++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the ++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come ++ * instantly. ++ * ++ * This function must be used as-if it were wake_up_process(); IOW the task ++ * must be ready to be woken at this location. ++ * ++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers ++ * that already hold reference to @task can call the 'safe' version and trust ++ * wake_q to do the right thing depending whether or not the @task is already ++ * queued for wakeup. ++ */ ++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) ++{ ++ if (!__wake_q_add(head, task)) ++ put_task_struct(task); ++} ++ ++void wake_up_q(struct wake_q_head *head) ++{ ++ struct wake_q_node *node = head->first; ++ ++ while (node != WAKE_Q_TAIL) { ++ struct task_struct *task; ++ ++ task = container_of(node, struct task_struct, wake_q); ++ BUG_ON(!task); ++ /* Task can safely be re-inserted now */ ++ node = node->next; ++ task->wake_q.next = NULL; ++ ++ /* ++ * wake_up_process() executes a full barrier, which pairs with ++ * the queueing in wake_q_add() so as not to miss wakeups. ++ */ ++ wake_up_process(task); ++ put_task_struct(task); ++ } ++} ++ ++static inline void smp_sched_reschedule(int cpu) ++{ ++ if (likely(cpu_online(cpu))) ++ smp_send_reschedule(cpu); ++} ++ ++/* ++ * resched_task - mark a task 'to be rescheduled now'. ++ * ++ * On UP this means the setting of the need_resched flag, on SMP it ++ * might also involve a cross-CPU call to trigger the scheduler on ++ * the target CPU. ++ */ ++void resched_task(struct task_struct *p) ++{ ++ int cpu; ++#ifdef CONFIG_LOCKDEP ++ /* Kernel threads call this when creating workqueues while still ++ * inactive from __kthread_bind_mask, holding only the pi_lock */ ++ if (!(p->flags & PF_KTHREAD)) { ++ struct rq *rq = task_rq(p); ++ ++ lockdep_assert_held(rq->lock); ++ } ++#endif ++ if (test_tsk_need_resched(p)) ++ return; ++ ++ cpu = task_cpu(p); ++ if (cpu == smp_processor_id()) { ++ set_tsk_need_resched(p); ++ set_preempt_need_resched(); ++ return; ++ } ++ ++ if (set_nr_and_not_polling(p)) ++ smp_sched_reschedule(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++/* ++ * A task that is not running or queued will not have a node set. ++ * A task that is queued but not running will have a node set. ++ * A task that is currently running will have ->on_cpu set but no node set. ++ */ ++static inline bool task_queued(struct task_struct *p) ++{ ++ return !skiplist_node_empty(&p->node); ++} ++ ++static void enqueue_task(struct rq *rq, struct task_struct *p, int flags); ++static inline void resched_if_idle(struct rq *rq); ++ ++/* Dodgy workaround till we figure out where the softirqs are going */ ++static inline void do_pending_softirq(struct rq *rq, struct task_struct *next) ++{ ++ if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt())) ++ do_softirq_own_stack(); ++} ++ ++static inline bool deadline_before(u64 deadline, u64 time) ++{ ++ return (deadline < time); ++} ++ ++/* ++ * Deadline is "now" in niffies + (offset by priority). Setting the deadline ++ * is the key to everything. It distributes cpu fairly amongst tasks of the ++ * same nice value, it proportions cpu according to nice level, it means the ++ * task that last woke up the longest ago has the earliest deadline, thus ++ * ensuring that interactive tasks get low latency on wake up. The CPU ++ * proportion works out to the square of the virtual deadline difference, so ++ * this equation will give nice 19 3% CPU compared to nice 0. ++ */ ++static inline u64 prio_deadline_diff(int user_prio) ++{ ++ return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128)); ++} ++ ++static inline u64 task_deadline_diff(struct task_struct *p) ++{ ++ return prio_deadline_diff(TASK_USER_PRIO(p)); ++} ++ ++static inline u64 static_deadline_diff(int static_prio) ++{ ++ return prio_deadline_diff(USER_PRIO(static_prio)); ++} ++ ++static inline int longest_deadline_diff(void) ++{ ++ return prio_deadline_diff(39); ++} ++ ++static inline int ms_longest_deadline_diff(void) ++{ ++ return NS_TO_MS(longest_deadline_diff()); ++} ++ ++static inline bool rq_local(struct rq *rq); ++ ++#ifndef SCHED_CAPACITY_SCALE ++#define SCHED_CAPACITY_SCALE 1024 ++#endif ++ ++static inline int rq_load(struct rq *rq) ++{ ++ return rq->nr_running; ++} ++ ++/* ++ * Update the load average for feeding into cpu frequency governors. Use a ++ * rough estimate of a rolling average with ~ time constant of 32ms. ++ * 80/128 ~ 0.63. * 80 / 32768 / 128 == * 5 / 262144 ++ * Make sure a call to update_clocks has been made before calling this to get ++ * an updated rq->niffies. ++ */ ++static void update_load_avg(struct rq *rq, unsigned int flags) ++{ ++ long us_interval, load; ++ unsigned long curload; ++ ++ us_interval = NS_TO_US(rq->niffies - rq->load_update); ++ if (unlikely(us_interval <= 0)) ++ return; ++ ++ curload = rq_load(rq); ++ load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144); ++ if (unlikely(load < 0)) ++ load = 0; ++ load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144; ++ rq->load_avg = load; ++ ++ rq->load_update = rq->niffies; ++ update_irq_load_avg(rq, 0); ++ if (likely(rq_local(rq))) ++ cpufreq_trigger(rq, flags); ++} ++ ++#ifdef HAVE_SCHED_AVG_IRQ ++/* ++ * IRQ variant of update_load_avg below. delta is actually time in nanoseconds ++ * here so we scale curload to how long it's been since the last update. ++ */ ++static void update_irq_load_avg(struct rq *rq, long delta) ++{ ++ long us_interval, load; ++ unsigned long curload; ++ ++ us_interval = NS_TO_US(rq->niffies - rq->irq_load_update); ++ if (unlikely(us_interval <= 0)) ++ return; ++ ++ curload = NS_TO_US(delta) / us_interval; ++ load = rq->irq_load_avg - (rq->irq_load_avg * us_interval * 5 / 262144); ++ if (unlikely(load < 0)) ++ load = 0; ++ load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144; ++ rq->irq_load_avg = load; ++ ++ rq->irq_load_update = rq->niffies; ++} ++#endif ++ ++/* ++ * Removing from the runqueue. Enter with rq locked. Deleting a task ++ * from the skip list is done via the stored node reference in the task struct ++ * and does not require a full look up. Thus it occurs in O(k) time where k ++ * is the "level" of the list the task was stored at - usually < 4, max 8. ++ */ ++static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) ++{ ++ skiplist_delete(rq->sl, &p->node); ++ rq->best_key = rq->node->next[0]->key; ++ update_clocks(rq); ++ ++ if (!(flags & DEQUEUE_SAVE)) { ++ sched_info_dequeued(rq, p); ++ psi_dequeue(p, flags & DEQUEUE_SLEEP); ++ } ++ rq->nr_running--; ++ if (rt_task(p)) ++ rq->rt_nr_running--; ++ update_load_avg(rq, flags); ++} ++ ++#ifdef CONFIG_PREEMPT_RCU ++static bool rcu_read_critical(struct task_struct *p) ++{ ++ return p->rcu_read_unlock_special.b.blocked; ++} ++#else /* CONFIG_PREEMPT_RCU */ ++#define rcu_read_critical(p) (false) ++#endif /* CONFIG_PREEMPT_RCU */ ++ ++/* ++ * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as ++ * an idle task, we ensure none of the following conditions are met. ++ */ ++static bool idleprio_suitable(struct task_struct *p) ++{ ++ return (!(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING)) && ++ !signal_pending(p) && !rcu_read_critical(p) && !freezing(p)); ++} ++ ++/* ++ * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check ++ * that the iso_refractory flag is not set. ++ */ ++static inline bool isoprio_suitable(struct rq *rq) ++{ ++ return !rq->iso_refractory; ++} ++ ++/* ++ * Adding to the runqueue. Enter with rq locked. ++ */ ++static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) ++{ ++ unsigned int randseed, cflags = 0; ++ u64 sl_id; ++ ++ if (!rt_task(p)) { ++ /* Check it hasn't gotten rt from PI */ ++ if ((idleprio_task(p) && idleprio_suitable(p)) || ++ (iso_task(p) && isoprio_suitable(rq))) ++ p->prio = p->normal_prio; ++ else ++ p->prio = NORMAL_PRIO; ++ } else ++ rq->rt_nr_running++; ++ /* ++ * The sl_id key passed to the skiplist generates a sorted list. ++ * Realtime and sched iso tasks run FIFO so they only need be sorted ++ * according to priority. The skiplist will put tasks of the same ++ * key inserted later in FIFO order. Tasks of sched normal, batch ++ * and idleprio are sorted according to their deadlines. Idleprio ++ * tasks are offset by an impossibly large deadline value ensuring ++ * they get sorted into last positions, but still according to their ++ * own deadlines. This creates a "landscape" of skiplists running ++ * from priority 0 realtime in first place to the lowest priority ++ * idleprio tasks last. Skiplist insertion is an O(log n) process. ++ */ ++ if (p->prio <= ISO_PRIO) { ++ sl_id = p->prio; ++ } else { ++ sl_id = p->deadline; ++ if (idleprio_task(p)) { ++ if (p->prio == IDLE_PRIO) ++ sl_id |= 0xF000000000000000; ++ else ++ sl_id += longest_deadline_diff(); ++ } ++ } ++ /* ++ * Some architectures don't have better than microsecond resolution ++ * so mask out ~microseconds as the random seed for skiplist insertion. ++ */ ++ update_clocks(rq); ++ if (!(flags & ENQUEUE_RESTORE)) { ++ sched_info_queued(rq, p); ++ psi_enqueue(p, flags & ENQUEUE_WAKEUP); ++ } ++ ++ randseed = (rq->niffies >> 10) & 0xFFFFFFFF; ++ skiplist_insert(rq->sl, &p->node, sl_id, p, randseed); ++ rq->best_key = rq->node->next[0]->key; ++ if (p->in_iowait) ++ cflags |= SCHED_CPUFREQ_IOWAIT; ++ rq->nr_running++; ++ update_load_avg(rq, cflags); ++} ++ ++/* ++ * Returns the relative length of deadline all compared to the shortest ++ * deadline which is that of nice -20. ++ */ ++static inline int task_prio_ratio(struct task_struct *p) ++{ ++ return prio_ratios[TASK_USER_PRIO(p)]; ++} ++ ++/* ++ * task_timeslice - all tasks of all priorities get the exact same timeslice ++ * length. CPU distribution is handled by giving different deadlines to ++ * tasks of different priorities. Use 128 as the base value for fast shifts. ++ */ ++static inline int task_timeslice(struct task_struct *p) ++{ ++ return (rr_interval * task_prio_ratio(p) / 128); ++} ++ ++#ifdef CONFIG_SMP ++/* Entered with rq locked */ ++static inline void resched_if_idle(struct rq *rq) ++{ ++ if (rq_idle(rq)) ++ resched_task(rq->curr); ++} ++ ++static inline bool rq_local(struct rq *rq) ++{ ++ return (rq->cpu == smp_processor_id()); ++} ++#ifdef CONFIG_SMT_NICE ++static const cpumask_t *thread_cpumask(int cpu); ++ ++/* Find the best real time priority running on any SMT siblings of cpu and if ++ * none are running, the static priority of the best deadline task running. ++ * The lookups to the other runqueues is done lockless as the occasional wrong ++ * value would be harmless. */ ++static int best_smt_bias(struct rq *this_rq) ++{ ++ int other_cpu, best_bias = 0; ++ ++ for_each_cpu(other_cpu, &this_rq->thread_mask) { ++ struct rq *rq = cpu_rq(other_cpu); ++ ++ if (rq_idle(rq)) ++ continue; ++ if (unlikely(!rq->online)) ++ continue; ++ if (!rq->rq_mm) ++ continue; ++ if (likely(rq->rq_smt_bias > best_bias)) ++ best_bias = rq->rq_smt_bias; ++ } ++ return best_bias; ++} ++ ++static int task_prio_bias(struct task_struct *p) ++{ ++ if (rt_task(p)) ++ return 1 << 30; ++ else if (task_running_iso(p)) ++ return 1 << 29; ++ else if (task_running_idle(p)) ++ return 0; ++ return MAX_PRIO - p->static_prio; ++} ++ ++static bool smt_always_schedule(struct task_struct __maybe_unused *p, struct rq __maybe_unused *this_rq) ++{ ++ return true; ++} ++ ++static bool (*smt_schedule)(struct task_struct *p, struct rq *this_rq) = &smt_always_schedule; ++ ++/* We've already decided p can run on CPU, now test if it shouldn't for SMT ++ * nice reasons. */ ++static bool smt_should_schedule(struct task_struct *p, struct rq *this_rq) ++{ ++ int best_bias, task_bias; ++ ++ /* Kernel threads always run */ ++ if (unlikely(!p->mm)) ++ return true; ++ if (rt_task(p)) ++ return true; ++ if (!idleprio_suitable(p)) ++ return true; ++ best_bias = best_smt_bias(this_rq); ++ /* The smt siblings are all idle or running IDLEPRIO */ ++ if (best_bias < 1) ++ return true; ++ task_bias = task_prio_bias(p); ++ if (task_bias < 1) ++ return false; ++ if (task_bias >= best_bias) ++ return true; ++ /* Dither 25% cpu of normal tasks regardless of nice difference */ ++ if (best_bias % 4 == 1) ++ return true; ++ /* Sorry, you lose */ ++ return false; ++} ++#else /* CONFIG_SMT_NICE */ ++#define smt_schedule(p, this_rq) (true) ++#endif /* CONFIG_SMT_NICE */ ++ ++static inline void atomic_set_cpu(int cpu, cpumask_t *cpumask) ++{ ++ set_bit(cpu, (volatile unsigned long *)cpumask); ++} ++ ++/* ++ * The cpu_idle_map stores a bitmap of all the CPUs currently idle to ++ * allow easy lookup of whether any suitable idle CPUs are available. ++ * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the ++ * idle_cpus variable than to do a full bitmask check when we are busy. The ++ * bits are set atomically but read locklessly as occasional false positive / ++ * negative is harmless. ++ */ ++static inline void set_cpuidle_map(int cpu) ++{ ++ if (likely(cpu_online(cpu))) ++ atomic_set_cpu(cpu, &cpu_idle_map); ++} ++ ++static inline void atomic_clear_cpu(int cpu, cpumask_t *cpumask) ++{ ++ clear_bit(cpu, (volatile unsigned long *)cpumask); ++} ++ ++static inline void clear_cpuidle_map(int cpu) ++{ ++ atomic_clear_cpu(cpu, &cpu_idle_map); ++} ++ ++static bool suitable_idle_cpus(struct task_struct *p) ++{ ++ return (cpumask_intersects(&p->cpus_allowed, &cpu_idle_map)); ++} ++ ++/* ++ * Resched current on rq. We don't know if rq is local to this CPU nor if it ++ * is locked so we do not use an intermediate variable for the task to avoid ++ * having it dereferenced. ++ */ ++static void resched_curr(struct rq *rq) ++{ ++ int cpu; ++ ++ if (test_tsk_need_resched(rq->curr)) ++ return; ++ ++ rq->preempt = rq->curr; ++ cpu = rq->cpu; ++ ++ /* We're doing this without holding the rq lock if it's not task_rq */ ++ ++ if (cpu == smp_processor_id()) { ++ set_tsk_need_resched(rq->curr); ++ set_preempt_need_resched(); ++ return; ++ } ++ ++ if (set_nr_and_not_polling(rq->curr)) ++ smp_sched_reschedule(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++#define CPUIDLE_DIFF_THREAD (1) ++#define CPUIDLE_DIFF_CORE (2) ++#define CPUIDLE_CACHE_BUSY (4) ++#define CPUIDLE_DIFF_CPU (8) ++#define CPUIDLE_THREAD_BUSY (16) ++#define CPUIDLE_DIFF_NODE (32) ++ ++/* ++ * The best idle CPU is chosen according to the CPUIDLE ranking above where the ++ * lowest value would give the most suitable CPU to schedule p onto next. The ++ * order works out to be the following: ++ * ++ * Same thread, idle or busy cache, idle or busy threads ++ * Other core, same cache, idle or busy cache, idle threads. ++ * Same node, other CPU, idle cache, idle threads. ++ * Same node, other CPU, busy cache, idle threads. ++ * Other core, same cache, busy threads. ++ * Same node, other CPU, busy threads. ++ * Other node, other CPU, idle cache, idle threads. ++ * Other node, other CPU, busy cache, idle threads. ++ * Other node, other CPU, busy threads. ++ */ ++static int best_mask_cpu(int best_cpu, struct rq *rq, cpumask_t *tmpmask) ++{ ++ int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THREAD_BUSY | ++ CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | CPUIDLE_DIFF_CORE | ++ CPUIDLE_DIFF_THREAD; ++ int cpu_tmp; ++ ++ if (cpumask_test_cpu(best_cpu, tmpmask)) ++ goto out; ++ ++ for_each_cpu(cpu_tmp, tmpmask) { ++ int ranking, locality; ++ struct rq *tmp_rq; ++ ++ ranking = 0; ++ tmp_rq = cpu_rq(cpu_tmp); ++ ++ locality = rq->cpu_locality[cpu_tmp]; ++#ifdef CONFIG_NUMA ++ if (locality > 3) ++ ranking |= CPUIDLE_DIFF_NODE; ++ else ++#endif ++ if (locality > 2) ++ ranking |= CPUIDLE_DIFF_CPU; ++#ifdef CONFIG_SCHED_MC ++ else if (locality == 2) ++ ranking |= CPUIDLE_DIFF_CORE; ++ else if (!(tmp_rq->cache_idle(tmp_rq))) ++ ranking |= CPUIDLE_CACHE_BUSY; ++#endif ++#ifdef CONFIG_SCHED_SMT ++ if (locality == 1) ++ ranking |= CPUIDLE_DIFF_THREAD; ++ if (!(tmp_rq->siblings_idle(tmp_rq))) ++ ranking |= CPUIDLE_THREAD_BUSY; ++#endif ++ if (ranking < best_ranking) { ++ best_cpu = cpu_tmp; ++ best_ranking = ranking; ++ } ++ } ++out: ++ return best_cpu; ++} ++ ++bool cpus_share_cache(int this_cpu, int that_cpu) ++{ ++ struct rq *this_rq = cpu_rq(this_cpu); ++ ++ return (this_rq->cpu_locality[that_cpu] < 3); ++} ++ ++/* As per resched_curr but only will resched idle task */ ++static inline void resched_idle(struct rq *rq) ++{ ++ if (test_tsk_need_resched(rq->idle)) ++ return; ++ ++ rq->preempt = rq->idle; ++ ++ set_tsk_need_resched(rq->idle); ++ ++ if (rq_local(rq)) { ++ set_preempt_need_resched(); ++ return; ++ } ++ ++ smp_sched_reschedule(rq->cpu); ++} ++ ++static struct rq *resched_best_idle(struct task_struct *p, int cpu) ++{ ++ cpumask_t tmpmask; ++ struct rq *rq; ++ int best_cpu; ++ ++ cpumask_and(&tmpmask, &p->cpus_allowed, &cpu_idle_map); ++ best_cpu = best_mask_cpu(cpu, task_rq(p), &tmpmask); ++ rq = cpu_rq(best_cpu); ++ if (!smt_schedule(p, rq)) ++ return NULL; ++ rq->preempt = p; ++ resched_idle(rq); ++ return rq; ++} ++ ++static inline void resched_suitable_idle(struct task_struct *p) ++{ ++ if (suitable_idle_cpus(p)) ++ resched_best_idle(p, task_cpu(p)); ++} ++ ++static inline struct rq *rq_order(struct rq *rq, int cpu) ++{ ++ return rq->rq_order[cpu]; ++} ++#else /* CONFIG_SMP */ ++static inline void set_cpuidle_map(int cpu) ++{ ++} ++ ++static inline void clear_cpuidle_map(int cpu) ++{ ++} ++ ++static inline bool suitable_idle_cpus(struct task_struct *p) ++{ ++ return uprq->curr == uprq->idle; ++} ++ ++static inline void resched_suitable_idle(struct task_struct *p) ++{ ++} ++ ++static inline void resched_curr(struct rq *rq) ++{ ++ resched_task(rq->curr); ++} ++ ++static inline void resched_if_idle(struct rq *rq) ++{ ++} ++ ++static inline bool rq_local(struct rq *rq) ++{ ++ return true; ++} ++ ++static inline struct rq *rq_order(struct rq *rq, int cpu) ++{ ++ return rq; ++} ++ ++static inline bool smt_schedule(struct task_struct *p, struct rq *rq) ++{ ++ return true; ++} ++#endif /* CONFIG_SMP */ ++ ++static inline int normal_prio(struct task_struct *p) ++{ ++ if (has_rt_policy(p)) ++ return MAX_RT_PRIO - 1 - p->rt_priority; ++ if (idleprio_task(p)) ++ return IDLE_PRIO; ++ if (iso_task(p)) ++ return ISO_PRIO; ++ return NORMAL_PRIO; ++} ++ ++/* ++ * Calculate the current priority, i.e. the priority ++ * taken into account by the scheduler. This value might ++ * be boosted by RT tasks as it will be RT if the task got ++ * RT-boosted. If not then it returns p->normal_prio. ++ */ ++static int effective_prio(struct task_struct *p) ++{ ++ p->normal_prio = normal_prio(p); ++ /* ++ * If we are RT tasks or we were boosted to RT priority, ++ * keep the priority unchanged. Otherwise, update priority ++ * to the normal priority: ++ */ ++ if (!rt_prio(p->prio)) ++ return p->normal_prio; ++ return p->prio; ++} ++ ++/* ++ * activate_task - move a task to the runqueue. Enter with rq locked. ++ */ ++static void activate_task(struct task_struct *p, struct rq *rq, int flags) ++{ ++ resched_if_idle(rq); ++ ++ /* ++ * Sleep time is in units of nanosecs, so shift by 20 to get a ++ * milliseconds-range estimation of the amount of time that the task ++ * spent sleeping: ++ */ ++ if (unlikely(prof_on == SLEEP_PROFILING)) { ++ if (p->state == TASK_UNINTERRUPTIBLE) ++ profile_hits(SLEEP_PROFILING, (void *)get_wchan(p), ++ (rq->niffies - p->last_ran) >> 20); ++ } ++ ++ p->prio = effective_prio(p); ++ if (task_contributes_to_load(p)) ++ rq->nr_uninterruptible--; ++ ++ enqueue_task(rq, p, flags); ++ p->on_rq = TASK_ON_RQ_QUEUED; ++} ++ ++/* ++ * deactivate_task - If it's running, it's not on the runqueue and we can just ++ * decrement the nr_running. Enter with rq locked. ++ */ ++static inline void deactivate_task(struct task_struct *p, struct rq *rq, int flags) ++{ ++ if (task_contributes_to_load(p)) ++ rq->nr_uninterruptible++; ++ ++ p->on_rq = 0; ++ if (!(flags & DEQUEUE_SAVE)) { ++ sched_info_dequeued(rq, p); ++ psi_dequeue(p, flags & DEQUEUE_SLEEP); ++ } ++} ++ ++#ifdef CONFIG_SMP ++void set_task_cpu(struct task_struct *p, unsigned int new_cpu) ++{ ++ struct rq *rq; ++ ++ if (task_cpu(p) == new_cpu) ++ return; ++ ++ /* Do NOT call set_task_cpu on a currently queued task as we will not ++ * be reliably holding the rq lock after changing CPU. */ ++ BUG_ON(task_queued(p)); ++ rq = task_rq(p); ++ ++#ifdef CONFIG_LOCKDEP ++ /* ++ * The caller should hold either p->pi_lock or rq->lock, when changing ++ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. ++ * ++ * Furthermore, all task_rq users should acquire both locks, see ++ * task_rq_lock(). ++ */ ++ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || ++ lockdep_is_held(rq->lock))); ++#endif ++ ++ trace_sched_migrate_task(p, new_cpu); ++ rseq_migrate(p); ++ perf_event_task_migrate(p); ++ ++ /* ++ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be ++ * successfully executed on another CPU. We must ensure that updates of ++ * per-task data have been completed by this moment. ++ */ ++ smp_wmb(); ++ ++ p->wake_cpu = new_cpu; ++ ++ if (task_running(rq, p)) { ++ /* ++ * We should only be calling this on a running task if we're ++ * holding rq lock. ++ */ ++ lockdep_assert_held(rq->lock); ++ ++ /* ++ * We can't change the task_thread_info CPU on a running task ++ * as p will still be protected by the rq lock of the CPU it ++ * is still running on so we only set the wake_cpu for it to be ++ * lazily updated once off the CPU. ++ */ ++ return; ++ } ++ ++#ifdef CONFIG_THREAD_INFO_IN_TASK ++ WRITE_ONCE(p->cpu, new_cpu); ++#else ++ WRITE_ONCE(task_thread_info(p)->cpu, new_cpu); ++#endif ++ /* We're no longer protecting p after this point since we're holding ++ * the wrong runqueue lock. */ ++} ++#endif /* CONFIG_SMP */ ++ ++/* ++ * Move a task off the runqueue and take it to a cpu for it will ++ * become the running task. ++ */ ++static inline void take_task(struct rq *rq, int cpu, struct task_struct *p) ++{ ++ struct rq *p_rq = task_rq(p); ++ ++ dequeue_task(p_rq, p, DEQUEUE_SAVE); ++ if (p_rq != rq) { ++ sched_info_dequeued(p_rq, p); ++ sched_info_queued(rq, p); ++ } ++ set_task_cpu(p, cpu); ++} ++ ++/* ++ * Returns a descheduling task to the runqueue unless it is being ++ * deactivated. ++ */ ++static inline void return_task(struct task_struct *p, struct rq *rq, ++ int cpu, bool deactivate) ++{ ++ if (deactivate) ++ deactivate_task(p, rq, DEQUEUE_SLEEP); ++ else { ++#ifdef CONFIG_SMP ++ /* ++ * set_task_cpu was called on the running task that doesn't ++ * want to deactivate so it has to be enqueued to a different ++ * CPU and we need its lock. Tag it to be moved with as the ++ * lock is dropped in finish_lock_switch. ++ */ ++ if (unlikely(p->wake_cpu != cpu)) ++ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); ++ else ++#endif ++ enqueue_task(rq, p, ENQUEUE_RESTORE); ++ } ++} ++ ++/* Enter with rq lock held. We know p is on the local cpu */ ++static inline void __set_tsk_resched(struct task_struct *p) ++{ ++ set_tsk_need_resched(p); ++ set_preempt_need_resched(); ++} ++ ++/** ++ * task_curr - is this task currently executing on a CPU? ++ * @p: the task in question. ++ * ++ * Return: 1 if the task is currently executing. 0 otherwise. ++ */ ++inline int task_curr(const struct task_struct *p) ++{ ++ return cpu_curr(task_cpu(p)) == p; ++} ++ ++#ifdef CONFIG_SMP ++/* ++ * wait_task_inactive - wait for a thread to unschedule. ++ * ++ * If @match_state is nonzero, it's the @p->state value just checked and ++ * not expected to change. If it changes, i.e. @p might have woken up, ++ * then return zero. When we succeed in waiting for @p to be off its CPU, ++ * we return a positive number (its total switch count). If a second call ++ * a short while later returns the same number, the caller can be sure that ++ * @p has remained unscheduled the whole time. ++ * ++ * The caller must ensure that the task *will* unschedule sometime soon, ++ * else this function might spin for a *long* time. This function can't ++ * be called with interrupts off, or it may introduce deadlock with ++ * smp_call_function() if an IPI is sent by the same process we are ++ * waiting to become inactive. ++ */ ++unsigned long wait_task_inactive(struct task_struct *p, long match_state) ++{ ++ int running, queued; ++ struct rq_flags rf; ++ unsigned long ncsw; ++ struct rq *rq; ++ ++ for (;;) { ++ rq = task_rq(p); ++ ++ /* ++ * If the task is actively running on another CPU ++ * still, just relax and busy-wait without holding ++ * any locks. ++ * ++ * NOTE! Since we don't hold any locks, it's not ++ * even sure that "rq" stays as the right runqueue! ++ * But we don't care, since this will return false ++ * if the runqueue has changed and p is actually now ++ * running somewhere else! ++ */ ++ while (task_running(rq, p)) { ++ if (match_state && unlikely(p->state != match_state)) ++ return 0; ++ cpu_relax(); ++ } ++ ++ /* ++ * Ok, time to look more closely! We need the rq ++ * lock now, to be *sure*. If we're wrong, we'll ++ * just go back and repeat. ++ */ ++ rq = task_rq_lock(p, &rf); ++ trace_sched_wait_task(p); ++ running = task_running(rq, p); ++ queued = task_on_rq_queued(p); ++ ncsw = 0; ++ if (!match_state || p->state == match_state) ++ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ ++ task_rq_unlock(rq, p, &rf); ++ ++ /* ++ * If it changed from the expected state, bail out now. ++ */ ++ if (unlikely(!ncsw)) ++ break; ++ ++ /* ++ * Was it really running after all now that we ++ * checked with the proper locks actually held? ++ * ++ * Oops. Go back and try again.. ++ */ ++ if (unlikely(running)) { ++ cpu_relax(); ++ continue; ++ } ++ ++ /* ++ * It's not enough that it's not actively running, ++ * it must be off the runqueue _entirely_, and not ++ * preempted! ++ * ++ * So if it was still runnable (but just not actively ++ * running right now), it's preempted, and we should ++ * yield - it could be a while. ++ */ ++ if (unlikely(queued)) { ++ ktime_t to = NSEC_PER_SEC / HZ; ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_hrtimeout(&to, HRTIMER_MODE_REL); ++ continue; ++ } ++ ++ /* ++ * Ahh, all good. It wasn't running, and it wasn't ++ * runnable, which means that it will never become ++ * running in the future either. We're all done! ++ */ ++ break; ++ } ++ ++ return ncsw; ++} ++ ++/*** ++ * kick_process - kick a running thread to enter/exit the kernel ++ * @p: the to-be-kicked thread ++ * ++ * Cause a process which is running on another CPU to enter ++ * kernel-mode, without any delay. (to get signals handled.) ++ * ++ * NOTE: this function doesn't have to take the runqueue lock, ++ * because all it wants to ensure is that the remote task enters ++ * the kernel. If the IPI races and the task has been migrated ++ * to another CPU then no harm is done and the purpose has been ++ * achieved as well. ++ */ ++void kick_process(struct task_struct *p) ++{ ++ int cpu; ++ ++ preempt_disable(); ++ cpu = task_cpu(p); ++ if ((cpu != smp_processor_id()) && task_curr(p)) ++ smp_sched_reschedule(cpu); ++ preempt_enable(); ++} ++EXPORT_SYMBOL_GPL(kick_process); ++#endif ++ ++/* ++ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the ++ * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or ++ * between themselves, they cooperatively multitask. An idle rq scores as ++ * prio PRIO_LIMIT so it is always preempted. ++ */ ++static inline bool ++can_preempt(struct task_struct *p, int prio, u64 deadline) ++{ ++ /* Better static priority RT task or better policy preemption */ ++ if (p->prio < prio) ++ return true; ++ if (p->prio > prio) ++ return false; ++ if (p->policy == SCHED_BATCH) ++ return false; ++ /* SCHED_NORMAL and ISO will preempt based on deadline */ ++ if (!deadline_before(p->deadline, deadline)) ++ return false; ++ return true; ++} ++ ++#ifdef CONFIG_SMP ++ ++static inline bool is_per_cpu_kthread(struct task_struct *p) ++{ ++ if (!(p->flags & PF_KTHREAD)) ++ return false; ++ ++ if (p->nr_cpus_allowed != 1) ++ return false; ++ ++ return true; ++} ++ ++/* ++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see ++ * __set_cpus_allowed_ptr(). ++ */ ++static inline bool is_cpu_allowed(struct task_struct *p, int cpu) ++{ ++ if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) ++ return false; ++ ++ if (is_per_cpu_kthread(p)) ++ return cpu_online(cpu); ++ ++ return cpu_active(cpu); ++} ++ ++/* ++ * Check to see if p can run on cpu, and if not, whether there are any online ++ * CPUs it can run on instead. This only happens with the hotplug threads that ++ * bring up the CPUs. ++ */ ++static inline bool sched_other_cpu(struct task_struct *p, int cpu) ++{ ++ if (likely(cpumask_test_cpu(cpu, &p->cpus_allowed))) ++ return false; ++ if (p->nr_cpus_allowed == 1) { ++ cpumask_t valid_mask; ++ ++ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_online_mask); ++ if (unlikely(cpumask_empty(&valid_mask))) ++ return false; ++ } ++ return true; ++} ++ ++static inline bool needs_other_cpu(struct task_struct *p, int cpu) ++{ ++ if (cpumask_test_cpu(cpu, &p->cpus_allowed)) ++ return false; ++ return true; ++} ++ ++#define cpu_online_map (*(cpumask_t *)cpu_online_mask) ++ ++static void try_preempt(struct task_struct *p, struct rq *this_rq) ++{ ++ int i, this_entries = rq_load(this_rq); ++ cpumask_t tmp; ++ ++ if (suitable_idle_cpus(p) && resched_best_idle(p, task_cpu(p))) ++ return; ++ ++ /* IDLEPRIO tasks never preempt anything but idle */ ++ if (p->policy == SCHED_IDLEPRIO) ++ return; ++ ++ cpumask_and(&tmp, &cpu_online_map, &p->cpus_allowed); ++ ++ for (i = 0; i < num_possible_cpus(); i++) { ++ struct rq *rq = this_rq->cpu_order[i]; ++ ++ if (!cpumask_test_cpu(rq->cpu, &tmp)) ++ continue; ++ ++ if (!sched_interactive && rq != this_rq && rq_load(rq) <= this_entries) ++ continue; ++ if (smt_schedule(p, rq) && can_preempt(p, rq->rq_prio, rq->rq_deadline)) { ++ /* We set rq->preempting lockless, it's a hint only */ ++ rq->preempting = p; ++ resched_curr(rq); ++ return; ++ } ++ } ++} ++ ++static int __set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, bool check); ++#else /* CONFIG_SMP */ ++static inline bool needs_other_cpu(struct task_struct *p, int cpu) ++{ ++ return false; ++} ++ ++static void try_preempt(struct task_struct *p, struct rq *this_rq) ++{ ++ if (p->policy == SCHED_IDLEPRIO) ++ return; ++ if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline)) ++ resched_curr(uprq); ++} ++ ++static inline int __set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, bool check) ++{ ++ return set_cpus_allowed_ptr(p, new_mask); ++} ++#endif /* CONFIG_SMP */ ++ ++/* ++ * wake flags ++ */ ++#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ ++#define WF_FORK 0x02 /* child wakeup after fork */ ++#define WF_MIGRATED 0x04 /* internal use, task got migrated */ ++ ++static void ++ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ++{ ++ struct rq *rq; ++ ++ if (!schedstat_enabled()) ++ return; ++ ++ rq = this_rq(); ++ ++#ifdef CONFIG_SMP ++ if (cpu == rq->cpu) { ++ __schedstat_inc(rq->ttwu_local); ++ } else { ++ struct sched_domain *sd; ++ ++ rcu_read_lock(); ++ for_each_domain(rq->cpu, sd) { ++ if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { ++ __schedstat_inc(sd->ttwu_wake_remote); ++ break; ++ } ++ } ++ rcu_read_unlock(); ++ } ++ ++#endif /* CONFIG_SMP */ ++ ++ __schedstat_inc(rq->ttwu_count); ++} ++ ++static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) ++{ ++ activate_task(p, rq, en_flags); ++ ++ /* if a worker is waking up, notify the workqueue */ ++ if (p->flags & PF_WQ_WORKER) ++ wq_worker_waking_up(p, cpu_of(rq)); ++} ++ ++/* ++ * Mark the task runnable and perform wakeup-preemption. ++ */ ++static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) ++{ ++ /* ++ * Sync wakeups (i.e. those types of wakeups where the waker ++ * has indicated that it will leave the CPU in short order) ++ * don't trigger a preemption if there are no idle cpus, ++ * instead waiting for current to deschedule. ++ */ ++ if (wake_flags & WF_SYNC) ++ resched_suitable_idle(p); ++ else ++ try_preempt(p, rq); ++ p->state = TASK_RUNNING; ++ trace_sched_wakeup(p); ++} ++ ++static void ++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) ++{ ++ int en_flags = ENQUEUE_WAKEUP; ++ ++ lockdep_assert_held(rq->lock); ++ ++#ifdef CONFIG_SMP ++ if (p->sched_contributes_to_load) ++ rq->nr_uninterruptible--; ++ ++ if (wake_flags & WF_MIGRATED) ++ en_flags |= ENQUEUE_MIGRATED; ++#endif ++ ++ ttwu_activate(rq, p, en_flags); ++ ttwu_do_wakeup(rq, p, wake_flags); ++} ++ ++/* ++ * Called in case the task @p isn't fully descheduled from its runqueue, ++ * in this case we must do a remote wakeup. Its a 'light' wakeup though, ++ * since all we need to do is flip p->state to TASK_RUNNING, since ++ * the task is still ->on_rq. ++ */ ++static int ttwu_remote(struct task_struct *p, int wake_flags) ++{ ++ struct rq *rq; ++ int ret = 0; ++ ++ rq = __task_rq_lock(p, NULL); ++ if (likely(task_on_rq_queued(p))) { ++ ttwu_do_wakeup(rq, p, wake_flags); ++ ret = 1; ++ } ++ __task_rq_unlock(rq, NULL); ++ ++ return ret; ++} ++ ++#ifdef CONFIG_SMP ++void sched_ttwu_pending(void) ++{ ++ struct rq *rq = this_rq(); ++ struct llist_node *llist = llist_del_all(&rq->wake_list); ++ struct task_struct *p, *t; ++ struct rq_flags rf; ++ ++ if (!llist) ++ return; ++ ++ rq_lock_irqsave(rq, &rf); ++ ++ llist_for_each_entry_safe(p, t, llist, wake_entry) ++ ttwu_do_activate(rq, p, 0); ++ ++ rq_unlock_irqrestore(rq, &rf); ++} ++ ++void scheduler_ipi(void) ++{ ++ /* ++ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting ++ * TIF_NEED_RESCHED remotely (for the first time) will also send ++ * this IPI. ++ */ ++ preempt_fold_need_resched(); ++ ++ if (llist_empty(&this_rq()->wake_list) && (!idle_cpu(smp_processor_id()) || need_resched())) ++ return; ++ ++ /* ++ * Not all reschedule IPI handlers call irq_enter/irq_exit, since ++ * traditionally all their work was done from the interrupt return ++ * path. Now that we actually do some work, we need to make sure ++ * we do call them. ++ * ++ * Some archs already do call them, luckily irq_enter/exit nest ++ * properly. ++ * ++ * Arguably we should visit all archs and update all handlers, ++ * however a fair share of IPIs are still resched only so this would ++ * somewhat pessimize the simple resched case. ++ */ ++ irq_enter(); ++ sched_ttwu_pending(); ++ irq_exit(); ++} ++ ++static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { ++ if (!set_nr_if_polling(rq->idle)) ++ smp_sched_reschedule(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++ } ++} ++ ++void wake_up_if_idle(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ struct rq_flags rf; ++ ++ rcu_read_lock(); ++ ++ if (!is_idle_task(rcu_dereference(rq->curr))) ++ goto out; ++ ++ if (set_nr_if_polling(rq->idle)) { ++ trace_sched_wake_idle_without_ipi(cpu); ++ } else { ++ rq_lock_irqsave(rq, &rf); ++ if (likely(is_idle_task(rq->curr))) ++ smp_sched_reschedule(cpu); ++ /* Else cpu is not in idle, do nothing here */ ++ rq_unlock_irqrestore(rq, &rf); ++ } ++ ++out: ++ rcu_read_unlock(); ++} ++ ++static int valid_task_cpu(struct task_struct *p) ++{ ++ cpumask_t valid_mask; ++ ++ if (p->flags & PF_KTHREAD) ++ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_all_mask); ++ else ++ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_active_mask); ++ ++ if (unlikely(!cpumask_weight(&valid_mask))) { ++ /* We shouldn't be hitting this any more */ ++ printk(KERN_WARNING "SCHED: No cpumask for %s/%d weight %d\n", p->comm, ++ p->pid, cpumask_weight(&p->cpus_allowed)); ++ return cpumask_any(&p->cpus_allowed); ++ } ++ return cpumask_any(&valid_mask); ++} ++ ++/* ++ * For a task that's just being woken up we have a valuable balancing ++ * opportunity so choose the nearest cache most lightly loaded runqueue. ++ * Entered with rq locked and returns with the chosen runqueue locked. ++ */ ++static inline int select_best_cpu(struct task_struct *p) ++{ ++ unsigned int idlest = ~0U; ++ struct rq *rq = NULL; ++ int i; ++ ++ if (suitable_idle_cpus(p)) { ++ int cpu = task_cpu(p); ++ ++ if (unlikely(needs_other_cpu(p, cpu))) ++ cpu = valid_task_cpu(p); ++ rq = resched_best_idle(p, cpu); ++ if (likely(rq)) ++ return rq->cpu; ++ } ++ ++ for (i = 0; i < num_possible_cpus(); i++) { ++ struct rq *other_rq = task_rq(p)->cpu_order[i]; ++ int entries; ++ ++ if (!other_rq->online) ++ continue; ++ if (needs_other_cpu(p, other_rq->cpu)) ++ continue; ++ entries = rq_load(other_rq); ++ if (entries >= idlest) ++ continue; ++ idlest = entries; ++ rq = other_rq; ++ } ++ if (unlikely(!rq)) ++ return task_cpu(p); ++ return rq->cpu; ++} ++#else /* CONFIG_SMP */ ++static int valid_task_cpu(struct task_struct *p) ++{ ++ return 0; ++} ++ ++static inline int select_best_cpu(struct task_struct *p) ++{ ++ return 0; ++} ++ ++static struct rq *resched_best_idle(struct task_struct *p, int cpu) ++{ ++ return NULL; ++} ++#endif /* CONFIG_SMP */ ++ ++static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++#if defined(CONFIG_SMP) ++ if (!cpus_share_cache(smp_processor_id(), cpu)) { ++ sched_clock_cpu(cpu); /* Sync clocks across CPUs */ ++ ttwu_queue_remote(p, cpu, wake_flags); ++ return; ++ } ++#endif ++ rq_lock(rq); ++ ttwu_do_activate(rq, p, wake_flags); ++ rq_unlock(rq); ++} ++ ++/*** ++ * try_to_wake_up - wake up a thread ++ * @p: the thread to be awakened ++ * @state: the mask of task states that can be woken ++ * @wake_flags: wake modifier flags (WF_*) ++ * ++ * Put it on the run-queue if it's not already there. The "current" ++ * thread is always on the run-queue (except when the actual ++ * re-schedule is in progress), and as such you're allowed to do ++ * the simpler "current->state = TASK_RUNNING" to mark yourself ++ * runnable without the overhead of this. ++ * ++ * Return: %true if @p was woken up, %false if it was already running. ++ * or @state didn't match @p's state. ++ */ ++static int ++try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) ++{ ++ unsigned long flags; ++ int cpu, success = 0; ++ ++ /* ++ * If we are going to wake up a thread waiting for CONDITION we ++ * need to ensure that CONDITION=1 done by the caller can not be ++ * reordered with p->state check below. This pairs with mb() in ++ * set_current_state() the waiting thread does. ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ smp_mb__after_spinlock(); ++ /* state is a volatile long, どうして、分からない */ ++ if (!((unsigned int)p->state & state)) ++ goto out; ++ ++ trace_sched_waking(p); ++ ++ /* We're going to change ->state: */ ++ success = 1; ++ cpu = task_cpu(p); ++ ++ /* ++ * Ensure we load p->on_rq _after_ p->state, otherwise it would ++ * be possible to, falsely, observe p->on_rq == 0 and get stuck ++ * in smp_cond_load_acquire() below. ++ * ++ * sched_ttwu_pending() try_to_wake_up() ++ * STORE p->on_rq = 1 LOAD p->state ++ * UNLOCK rq->lock ++ * ++ * __schedule() (switch to task 'p') ++ * LOCK rq->lock smp_rmb(); ++ * smp_mb__after_spinlock(); ++ * UNLOCK rq->lock ++ * ++ * [task p] ++ * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq ++ * ++ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in ++ * __schedule(). See the comment for smp_mb__after_spinlock(). ++ */ ++ smp_rmb(); ++ if (p->on_rq && ttwu_remote(p, wake_flags)) ++ goto stat; ++ ++#ifdef CONFIG_SMP ++ /* ++ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be ++ * possible to, falsely, observe p->on_cpu == 0. ++ * ++ * One must be running (->on_cpu == 1) in order to remove oneself ++ * from the runqueue. ++ * ++ * __schedule() (switch to task 'p') try_to_wake_up() ++ * STORE p->on_cpu = 1 LOAD p->on_rq ++ * UNLOCK rq->lock ++ * ++ * __schedule() (put 'p' to sleep) ++ * LOCK rq->lock smp_rmb(); ++ * smp_mb__after_spinlock(); ++ * STORE p->on_rq = 0 LOAD p->on_cpu ++ * ++ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in ++ * __schedule(). See the comment for smp_mb__after_spinlock(). ++ */ ++ smp_rmb(); ++ ++ /* ++ * If the owning (remote) CPU is still in the middle of schedule() with ++ * this task as prev, wait until its done referencing the task. ++ * ++ * Pairs with the smp_store_release() in finish_task(). ++ * ++ * This ensures that tasks getting woken will be fully ordered against ++ * their previous state and preserve Program Order. ++ */ ++ smp_cond_load_acquire(&p->on_cpu, !VAL); ++ ++ p->sched_contributes_to_load = !!task_contributes_to_load(p); ++ p->state = TASK_WAKING; ++ ++ if (p->in_iowait) { ++ delayacct_blkio_end(p); ++ atomic_dec(&task_rq(p)->nr_iowait); ++ } ++ ++ cpu = select_best_cpu(p); ++ if (task_cpu(p) != cpu) { ++ wake_flags |= WF_MIGRATED; ++ psi_ttwu_dequeue(p); ++ set_task_cpu(p, cpu); ++ } ++ ++#else /* CONFIG_SMP */ ++ ++ if (p->in_iowait) { ++ delayacct_blkio_end(p); ++ atomic_dec(&task_rq(p)->nr_iowait); ++ } ++ ++#endif /* CONFIG_SMP */ ++ ++ ttwu_queue(p, cpu, wake_flags); ++stat: ++ ttwu_stat(p, cpu, wake_flags); ++out: ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++ return success; ++} ++ ++/** ++ * try_to_wake_up_local - try to wake up a local task with rq lock held ++ * @p: the thread to be awakened ++ * ++ * Put @p on the run-queue if it's not already there. The caller must ++ * ensure that rq is locked and, @p is not the current task. ++ * rq stays locked over invocation. ++ */ ++static void try_to_wake_up_local(struct task_struct *p) ++{ ++ struct rq *rq = task_rq(p); ++ ++ if (WARN_ON_ONCE(rq != this_rq()) || ++ WARN_ON_ONCE(p == current)) ++ return; ++ ++ lockdep_assert_held(rq->lock); ++ ++ if (!raw_spin_trylock(&p->pi_lock)) { ++ /* ++ * This is OK, because current is on_cpu, which avoids it being ++ * picked for load-balance and preemption/IRQs are still ++ * disabled avoiding further scheduler activity on it and we've ++ * not yet picked a replacement task. ++ */ ++ rq_unlock(rq); ++ raw_spin_lock(&p->pi_lock); ++ rq_lock(rq); ++ } ++ ++ if (!(p->state & TASK_NORMAL)) ++ goto out; ++ ++ trace_sched_waking(p); ++ ++ if (!task_on_rq_queued(p)) { ++ if (p->in_iowait) { ++ delayacct_blkio_end(p); ++ atomic_dec(&rq->nr_iowait); ++ } ++ ttwu_activate(rq, p, ENQUEUE_WAKEUP); ++ } ++ ++ ttwu_do_wakeup(rq, p, 0); ++ ttwu_stat(p, smp_processor_id(), 0); ++out: ++ raw_spin_unlock(&p->pi_lock); ++} ++ ++/** ++ * wake_up_process - Wake up a specific process ++ * @p: The process to be woken up. ++ * ++ * Attempt to wake up the nominated process and move it to the set of runnable ++ * processes. ++ * ++ * Return: 1 if the process was woken up, 0 if it was already running. ++ * ++ * This function executes a full memory barrier before accessing the task state. ++ */ ++int wake_up_process(struct task_struct *p) ++{ ++ return try_to_wake_up(p, TASK_NORMAL, 0); ++} ++EXPORT_SYMBOL(wake_up_process); ++ ++int wake_up_state(struct task_struct *p, unsigned int state) ++{ ++ return try_to_wake_up(p, state, 0); ++} ++ ++static void time_slice_expired(struct task_struct *p, struct rq *rq); ++ ++/* ++ * Perform scheduler related setup for a newly forked process p. ++ * p is forked by current. ++ */ ++int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p) ++{ ++ unsigned long flags; ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ INIT_HLIST_HEAD(&p->preempt_notifiers); ++#endif ++ ++#ifdef CONFIG_COMPACTION ++ p->capture_control = NULL; ++#endif ++ ++ /* ++ * We mark the process as NEW here. This guarantees that ++ * nobody will actually run it, and a signal or other external ++ * event cannot wake it up and insert it on the runqueue either. ++ */ ++ p->state = TASK_NEW; ++ ++ /* ++ * The process state is set to the same value of the process executing ++ * do_fork() code. That is running. This guarantees that nobody will ++ * actually run it, and a signal or other external event cannot wake ++ * it up and insert it on the runqueue either. ++ */ ++ ++ /* Should be reset in fork.c but done here for ease of MuQSS patching */ ++ p->on_cpu = ++ p->on_rq = ++ p->utime = ++ p->stime = ++ p->sched_time = ++ p->stime_ns = ++ p->utime_ns = 0; ++ skiplist_node_init(&p->node); ++ ++ /* ++ * Revert to default priority/policy on fork if requested. ++ */ ++ if (unlikely(p->sched_reset_on_fork)) { ++ if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { ++ p->policy = SCHED_NORMAL; ++ p->normal_prio = normal_prio(p); ++ } ++ ++ if (PRIO_TO_NICE(p->static_prio) < 0) { ++ p->static_prio = NICE_TO_PRIO(0); ++ p->normal_prio = p->static_prio; ++ } ++ ++ /* ++ * We don't need the reset flag anymore after the fork. It has ++ * fulfilled its duty: ++ */ ++ p->sched_reset_on_fork = 0; ++ } ++ ++ /* ++ * Silence PROVE_RCU. ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ set_task_cpu(p, smp_processor_id()); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++#ifdef CONFIG_SCHED_INFO ++ if (unlikely(sched_info_on())) ++ memset(&p->sched_info, 0, sizeof(p->sched_info)); ++#endif ++ init_task_preempt_count(p); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_SCHEDSTATS ++ ++DEFINE_STATIC_KEY_FALSE(sched_schedstats); ++static bool __initdata __sched_schedstats = false; ++ ++static void set_schedstats(bool enabled) ++{ ++ if (enabled) ++ static_branch_enable(&sched_schedstats); ++ else ++ static_branch_disable(&sched_schedstats); ++} ++ ++void force_schedstat_enabled(void) ++{ ++ if (!schedstat_enabled()) { ++ pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); ++ static_branch_enable(&sched_schedstats); ++ } ++} ++ ++static int __init setup_schedstats(char *str) ++{ ++ int ret = 0; ++ if (!str) ++ goto out; ++ ++ /* ++ * This code is called before jump labels have been set up, so we can't ++ * change the static branch directly just yet. Instead set a temporary ++ * variable so init_schedstats() can do it later. ++ */ ++ if (!strcmp(str, "enable")) { ++ __sched_schedstats = true; ++ ret = 1; ++ } else if (!strcmp(str, "disable")) { ++ __sched_schedstats = false; ++ ret = 1; ++ } ++out: ++ if (!ret) ++ pr_warn("Unable to parse schedstats=\n"); ++ ++ return ret; ++} ++__setup("schedstats=", setup_schedstats); ++ ++static void __init init_schedstats(void) ++{ ++ set_schedstats(__sched_schedstats); ++} ++ ++#ifdef CONFIG_PROC_SYSCTL ++int sysctl_schedstats(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos) ++{ ++ struct ctl_table t; ++ int err; ++ int state = static_branch_likely(&sched_schedstats); ++ ++ if (write && !capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ t = *table; ++ t.data = &state; ++ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); ++ if (err < 0) ++ return err; ++ if (write) ++ set_schedstats(state); ++ return err; ++} ++#endif /* CONFIG_PROC_SYSCTL */ ++#else /* !CONFIG_SCHEDSTATS */ ++static inline void init_schedstats(void) {} ++#endif /* CONFIG_SCHEDSTATS */ ++ ++static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p); ++ ++static void account_task_cpu(struct rq *rq, struct task_struct *p) ++{ ++ update_clocks(rq); ++ /* This isn't really a context switch but accounting is the same */ ++ update_cpu_clock_switch(rq, p); ++ p->last_ran = rq->niffies; ++} ++ ++bool sched_smp_initialized __read_mostly; ++ ++static inline int hrexpiry_enabled(struct rq *rq) ++{ ++ if (unlikely(!cpu_active(cpu_of(rq)) || !sched_smp_initialized)) ++ return 0; ++ return hrtimer_is_hres_active(&rq->hrexpiry_timer); ++} ++ ++/* ++ * Use HR-timers to deliver accurate preemption points. ++ */ ++static inline void hrexpiry_clear(struct rq *rq) ++{ ++ if (!hrexpiry_enabled(rq)) ++ return; ++ if (hrtimer_active(&rq->hrexpiry_timer)) ++ hrtimer_cancel(&rq->hrexpiry_timer); ++} ++ ++/* ++ * High-resolution time_slice expiry. ++ * Runs from hardirq context with interrupts disabled. ++ */ ++static enum hrtimer_restart hrexpiry(struct hrtimer *timer) ++{ ++ struct rq *rq = container_of(timer, struct rq, hrexpiry_timer); ++ struct task_struct *p; ++ ++ /* This can happen during CPU hotplug / resume */ ++ if (unlikely(cpu_of(rq) != smp_processor_id())) ++ goto out; ++ ++ /* ++ * We're doing this without the runqueue lock but this should always ++ * be run on the local CPU. Time slice should run out in __schedule ++ * but we set it to zero here in case niffies is slightly less. ++ */ ++ p = rq->curr; ++ p->time_slice = 0; ++ __set_tsk_resched(p); ++out: ++ return HRTIMER_NORESTART; ++} ++ ++/* ++ * Called to set the hrexpiry timer state. ++ * ++ * called with irqs disabled from the local CPU only ++ */ ++static void hrexpiry_start(struct rq *rq, u64 delay) ++{ ++ if (!hrexpiry_enabled(rq)) ++ return; ++ ++ hrtimer_start(&rq->hrexpiry_timer, ns_to_ktime(delay), ++ HRTIMER_MODE_REL_PINNED); ++} ++ ++static void init_rq_hrexpiry(struct rq *rq) ++{ ++ hrtimer_init(&rq->hrexpiry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ rq->hrexpiry_timer.function = hrexpiry; ++} ++ ++static inline int rq_dither(struct rq *rq) ++{ ++ if (!hrexpiry_enabled(rq)) ++ return HALF_JIFFY_US; ++ return 0; ++} ++ ++/* ++ * wake_up_new_task - wake up a newly created task for the first time. ++ * ++ * This function will do some initial scheduler statistics housekeeping ++ * that must be done for every newly created context, then puts the task ++ * on the runqueue and wakes it. ++ */ ++void wake_up_new_task(struct task_struct *p) ++{ ++ struct task_struct *parent, *rq_curr; ++ struct rq *rq, *new_rq; ++ unsigned long flags; ++ ++ parent = p->parent; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ p->state = TASK_RUNNING; ++ /* Task_rq can't change yet on a new task */ ++ new_rq = rq = task_rq(p); ++ if (unlikely(needs_other_cpu(p, task_cpu(p)))) { ++ set_task_cpu(p, valid_task_cpu(p)); ++ new_rq = task_rq(p); ++ } ++ ++ double_rq_lock(rq, new_rq); ++ rq_curr = rq->curr; ++ ++ /* ++ * Make sure we do not leak PI boosting priority to the child. ++ */ ++ p->prio = rq_curr->normal_prio; ++ ++ trace_sched_wakeup_new(p); ++ ++ /* ++ * Share the timeslice between parent and child, thus the ++ * total amount of pending timeslices in the system doesn't change, ++ * resulting in more scheduling fairness. If it's negative, it won't ++ * matter since that's the same as being 0. rq->rq_deadline is only ++ * modified within schedule() so it is always equal to ++ * current->deadline. ++ */ ++ account_task_cpu(rq, rq_curr); ++ p->last_ran = rq_curr->last_ran; ++ if (likely(rq_curr->policy != SCHED_FIFO)) { ++ rq_curr->time_slice /= 2; ++ if (rq_curr->time_slice < RESCHED_US) { ++ /* ++ * Forking task has run out of timeslice. Reschedule it and ++ * start its child with a new time slice and deadline. The ++ * child will end up running first because its deadline will ++ * be slightly earlier. ++ */ ++ __set_tsk_resched(rq_curr); ++ time_slice_expired(p, new_rq); ++ if (suitable_idle_cpus(p)) ++ resched_best_idle(p, task_cpu(p)); ++ else if (unlikely(rq != new_rq)) ++ try_preempt(p, new_rq); ++ } else { ++ p->time_slice = rq_curr->time_slice; ++ if (rq_curr == parent && rq == new_rq && !suitable_idle_cpus(p)) { ++ /* ++ * The VM isn't cloned, so we're in a good position to ++ * do child-runs-first in anticipation of an exec. This ++ * usually avoids a lot of COW overhead. ++ */ ++ __set_tsk_resched(rq_curr); ++ } else { ++ /* ++ * Adjust the hrexpiry since rq_curr will keep ++ * running and its timeslice has been shortened. ++ */ ++ hrexpiry_start(rq, US_TO_NS(rq_curr->time_slice)); ++ try_preempt(p, new_rq); ++ } ++ } ++ } else { ++ time_slice_expired(p, new_rq); ++ try_preempt(p, new_rq); ++ } ++ activate_task(p, new_rq, 0); ++ double_rq_unlock(rq, new_rq); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++} ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ ++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); ++ ++void preempt_notifier_inc(void) ++{ ++ static_branch_inc(&preempt_notifier_key); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_inc); ++ ++void preempt_notifier_dec(void) ++{ ++ static_branch_dec(&preempt_notifier_key); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_dec); ++ ++/** ++ * preempt_notifier_register - tell me when current is being preempted & rescheduled ++ * @notifier: notifier struct to register ++ */ ++void preempt_notifier_register(struct preempt_notifier *notifier) ++{ ++ if (!static_branch_unlikely(&preempt_notifier_key)) ++ WARN(1, "registering preempt_notifier while notifiers disabled\n"); ++ ++ hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_register); ++ ++/** ++ * preempt_notifier_unregister - no longer interested in preemption notifications ++ * @notifier: notifier struct to unregister ++ * ++ * This is *not* safe to call from within a preemption notifier. ++ */ ++void preempt_notifier_unregister(struct preempt_notifier *notifier) ++{ ++ hlist_del(¬ifier->link); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_unregister); ++ ++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_in(notifier, raw_smp_processor_id()); ++} ++ ++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ if (static_branch_unlikely(&preempt_notifier_key)) ++ __fire_sched_in_preempt_notifiers(curr); ++} ++ ++static void ++__fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_out(notifier, next); ++} ++ ++static __always_inline void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ if (static_branch_unlikely(&preempt_notifier_key)) ++ __fire_sched_out_preempt_notifiers(curr, next); ++} ++ ++#else /* !CONFIG_PREEMPT_NOTIFIERS */ ++ ++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++} ++ ++static inline void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++} ++ ++#endif /* CONFIG_PREEMPT_NOTIFIERS */ ++ ++static inline void prepare_task(struct task_struct *next) ++{ ++ /* ++ * Claim the task as running, we do this before switching to it ++ * such that any running task will have this set. ++ */ ++ next->on_cpu = 1; ++} ++ ++static inline void finish_task(struct task_struct *prev) ++{ ++#ifdef CONFIG_SMP ++ /* ++ * After ->on_cpu is cleared, the task can be moved to a different CPU. ++ * We must ensure this doesn't happen until the switch is completely ++ * finished. ++ * ++ * In particular, the load of prev->state in finish_task_switch() must ++ * happen before this. ++ * ++ * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). ++ */ ++ smp_store_release(&prev->on_cpu, 0); ++#endif ++} ++ ++static inline void ++prepare_lock_switch(struct rq *rq, struct task_struct *next) ++{ ++ /* ++ * Since the runqueue lock will be released by the next ++ * task (which is an invalid locking op but in the case ++ * of the scheduler it's an obvious special-case), so we ++ * do an early lockdep release here: ++ */ ++ spin_release(&rq->lock->dep_map, 1, _THIS_IP_); ++#ifdef CONFIG_DEBUG_SPINLOCK ++ /* this is a valid case when another task releases the spinlock */ ++ rq->lock->owner = next; ++#endif ++} ++ ++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ++{ ++ /* ++ * If we are tracking spinlock dependencies then we have to ++ * fix up the runqueue lock - which gets 'carried over' from ++ * prev into current: ++ */ ++ spin_acquire(&rq->lock->dep_map, 0, 0, _THIS_IP_); ++ ++#ifdef CONFIG_SMP ++ /* ++ * If prev was marked as migrating to another CPU in return_task, drop ++ * the local runqueue lock but leave interrupts disabled and grab the ++ * remote lock we're migrating it to before enabling them. ++ */ ++ if (unlikely(task_on_rq_migrating(prev))) { ++ sched_info_dequeued(rq, prev); ++ /* ++ * We move the ownership of prev to the new cpu now. ttwu can't ++ * activate prev to the wrong cpu since it has to grab this ++ * runqueue in ttwu_remote. ++ */ ++#ifdef CONFIG_THREAD_INFO_IN_TASK ++ prev->cpu = prev->wake_cpu; ++#else ++ task_thread_info(prev)->cpu = prev->wake_cpu; ++#endif ++ raw_spin_unlock(rq->lock); ++ ++ raw_spin_lock(&prev->pi_lock); ++ rq = __task_rq_lock(prev, NULL); ++ /* Check that someone else hasn't already queued prev */ ++ if (likely(!task_queued(prev))) { ++ enqueue_task(rq, prev, 0); ++ prev->on_rq = TASK_ON_RQ_QUEUED; ++ /* Wake up the CPU if it's not already running */ ++ resched_if_idle(rq); ++ } ++ raw_spin_unlock(&prev->pi_lock); ++ } ++#endif ++ rq_unlock(rq); ++ ++ do_pending_softirq(rq, current); ++ ++ local_irq_enable(); ++} ++ ++#ifndef prepare_arch_switch ++# define prepare_arch_switch(next) do { } while (0) ++#endif ++#ifndef finish_arch_switch ++# define finish_arch_switch(prev) do { } while (0) ++#endif ++#ifndef finish_arch_post_lock_switch ++# define finish_arch_post_lock_switch() do { } while (0) ++#endif ++ ++/** ++ * prepare_task_switch - prepare to switch tasks ++ * @rq: the runqueue preparing to switch ++ * @next: the task we are going to switch to. ++ * ++ * This is called with the rq lock held and interrupts off. It must ++ * be paired with a subsequent finish_task_switch after the context ++ * switch. ++ * ++ * prepare_task_switch sets up locking and calls architecture specific ++ * hooks. ++ */ ++static inline void ++prepare_task_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ kcov_prepare_switch(prev); ++ sched_info_switch(rq, prev, next); ++ perf_event_task_sched_out(prev, next); ++ rseq_preempt(prev); ++ fire_sched_out_preempt_notifiers(prev, next); ++ prepare_task(next); ++ prepare_arch_switch(next); ++} ++ ++/** ++ * finish_task_switch - clean up after a task-switch ++ * @rq: runqueue associated with task-switch ++ * @prev: the thread we just switched away from. ++ * ++ * finish_task_switch must be called after the context switch, paired ++ * with a prepare_task_switch call before the context switch. ++ * finish_task_switch will reconcile locking set up by prepare_task_switch, ++ * and do any other architecture-specific cleanup actions. ++ * ++ * Note that we may have delayed dropping an mm in context_switch(). If ++ * so, we finish that here outside of the runqueue lock. (Doing it ++ * with the lock held can cause deadlocks; see schedule() for ++ * details.) ++ * ++ * The context switch have flipped the stack from under us and restored the ++ * local variables which were saved when this task called schedule() in the ++ * past. prev == current is still correct but we need to recalculate this_rq ++ * because prev may have moved to another CPU. ++ */ ++static void finish_task_switch(struct task_struct *prev) ++ __releases(rq->lock) ++{ ++ struct rq *rq = this_rq(); ++ struct mm_struct *mm = rq->prev_mm; ++ long prev_state; ++ ++ /* ++ * The previous task will have left us with a preempt_count of 2 ++ * because it left us after: ++ * ++ * schedule() ++ * preempt_disable(); // 1 ++ * __schedule() ++ * raw_spin_lock_irq(rq->lock) // 2 ++ * ++ * Also, see FORK_PREEMPT_COUNT. ++ */ ++ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, ++ "corrupted preempt_count: %s/%d/0x%x\n", ++ current->comm, current->pid, preempt_count())) ++ preempt_count_set(FORK_PREEMPT_COUNT); ++ ++ rq->prev_mm = NULL; ++ ++ /* ++ * A task struct has one reference for the use as "current". ++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls ++ * schedule one last time. The schedule call will never return, and ++ * the scheduled task must drop that reference. ++ * ++ * We must observe prev->state before clearing prev->on_cpu (in ++ * finish_task), otherwise a concurrent wakeup can get prev ++ * running on another CPU and we could rave with its RUNNING -> DEAD ++ * transition, resulting in a double drop. ++ */ ++ prev_state = prev->state; ++ vtime_task_switch(prev); ++ perf_event_task_sched_in(prev, current); ++ finish_task(prev); ++ finish_lock_switch(rq, prev); ++ finish_arch_post_lock_switch(); ++ kcov_finish_switch(current); ++ ++ fire_sched_in_preempt_notifiers(current); ++ /* ++ * When switching through a kernel thread, the loop in ++ * membarrier_{private,global}_expedited() may have observed that ++ * kernel thread and not issued an IPI. It is therefore possible to ++ * schedule between user->kernel->user threads without passing though ++ * switch_mm(). Membarrier requires a barrier after storing to ++ * rq->curr, before returning to userspace, so provide them here: ++ * ++ * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly ++ * provided by mmdrop(), ++ * - a sync_core for SYNC_CORE. ++ */ ++ if (mm) { ++ membarrier_mm_sync_core_before_usermode(mm); ++ mmdrop(mm); ++ } ++ if (unlikely(prev_state == TASK_DEAD)) { ++ /* ++ * Remove function-return probe instances associated with this ++ * task and put them back on the free list. ++ */ ++ kprobe_flush_task(prev); ++ ++ /* Task is done with its stack. */ ++ put_task_stack(prev); ++ ++ put_task_struct(prev); ++ } ++} ++ ++/** ++ * schedule_tail - first thing a freshly forked thread must call. ++ * @prev: the thread we just switched away from. ++ */ ++asmlinkage __visible void schedule_tail(struct task_struct *prev) ++{ ++ /* ++ * New tasks start with FORK_PREEMPT_COUNT, see there and ++ * finish_task_switch() for details. ++ * ++ * finish_task_switch() will drop rq->lock() and lower preempt_count ++ * and the preempt_enable() will end up enabling preemption (on ++ * PREEMPT_COUNT kernels). ++ */ ++ ++ finish_task_switch(prev); ++ preempt_enable(); ++ ++ if (current->set_child_tid) ++ put_user(task_pid_vnr(current), current->set_child_tid); ++ ++ calculate_sigpending(); ++} ++ ++/* ++ * context_switch - switch to the new MM and the new thread's register state. ++ */ ++static __always_inline void ++context_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ struct mm_struct *mm, *oldmm; ++ ++ prepare_task_switch(rq, prev, next); ++ ++ mm = next->mm; ++ oldmm = prev->active_mm; ++ /* ++ * For paravirt, this is coupled with an exit in switch_to to ++ * combine the page table reload and the switch backend into ++ * one hypercall. ++ */ ++ arch_start_context_switch(prev); ++ ++ /* ++ * If mm is non-NULL, we pass through switch_mm(). If mm is ++ * NULL, we will pass through mmdrop() in finish_task_switch(). ++ * Both of these contain the full memory barrier required by ++ * membarrier after storing to rq->curr, before returning to ++ * user-space. ++ */ ++ if (!mm) { ++ next->active_mm = oldmm; ++ mmgrab(oldmm); ++ enter_lazy_tlb(oldmm, next); ++ } else ++ switch_mm_irqs_off(oldmm, mm, next); ++ ++ if (!prev->mm) { ++ prev->active_mm = NULL; ++ rq->prev_mm = oldmm; ++ } ++ prepare_lock_switch(rq, next); ++ ++ /* Here we just switch the register state and the stack. */ ++ switch_to(prev, next, prev); ++ barrier(); ++ ++ finish_task_switch(prev); ++} ++ ++/* ++ * nr_running, nr_uninterruptible and nr_context_switches: ++ * ++ * externally visible scheduler statistics: current number of runnable ++ * threads, total number of context switches performed since bootup. ++ */ ++unsigned long nr_running(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_online_cpu(i) ++ sum += cpu_rq(i)->nr_running; ++ ++ return sum; ++} ++ ++static unsigned long nr_uninterruptible(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_online_cpu(i) ++ sum += cpu_rq(i)->nr_uninterruptible; ++ ++ return sum; ++} ++ ++/* ++ * Check if only the current task is running on the CPU. ++ * ++ * Caution: this function does not check that the caller has disabled ++ * preemption, thus the result might have a time-of-check-to-time-of-use ++ * race. The caller is responsible to use it correctly, for example: ++ * ++ * - from a non-preemptible section (of course) ++ * ++ * - from a thread that is bound to a single CPU ++ * ++ * - in a loop with very short iterations (e.g. a polling loop) ++ */ ++bool single_task_running(void) ++{ ++ struct rq *rq = cpu_rq(smp_processor_id()); ++ ++ if (rq_load(rq) == 1) ++ return true; ++ else ++ return false; ++} ++EXPORT_SYMBOL(single_task_running); ++ ++unsigned long long nr_context_switches(void) ++{ ++ int i; ++ unsigned long long sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += cpu_rq(i)->nr_switches; ++ ++ return sum; ++} ++ ++/* ++ * Consumers of these two interfaces, like for example the cpufreq menu ++ * governor are using nonsensical data. Boosting frequency for a CPU that has ++ * IO-wait which might not even end up running the task when it does become ++ * runnable. ++ */ ++ ++unsigned long nr_iowait_cpu(int cpu) ++{ ++ return atomic_read(&cpu_rq(cpu)->nr_iowait); ++} ++ ++/* ++ * IO-wait accounting, and how its mostly bollocks (on SMP). ++ * ++ * The idea behind IO-wait account is to account the idle time that we could ++ * have spend running if it were not for IO. That is, if we were to improve the ++ * storage performance, we'd have a proportional reduction in IO-wait time. ++ * ++ * This all works nicely on UP, where, when a task blocks on IO, we account ++ * idle time as IO-wait, because if the storage were faster, it could've been ++ * running and we'd not be idle. ++ * ++ * This has been extended to SMP, by doing the same for each CPU. This however ++ * is broken. ++ * ++ * Imagine for instance the case where two tasks block on one CPU, only the one ++ * CPU will have IO-wait accounted, while the other has regular idle. Even ++ * though, if the storage were faster, both could've ran at the same time, ++ * utilising both CPUs. ++ * ++ * This means, that when looking globally, the current IO-wait accounting on ++ * SMP is a lower bound, by reason of under accounting. ++ * ++ * Worse, since the numbers are provided per CPU, they are sometimes ++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly ++ * associated with any one particular CPU, it can wake to another CPU than it ++ * blocked on. This means the per CPU IO-wait number is meaningless. ++ * ++ * Task CPU affinities can make all that even more 'interesting'. ++ */ ++ ++unsigned long nr_iowait(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += nr_iowait_cpu(i); ++ ++ return sum; ++} ++ ++unsigned long nr_active(void) ++{ ++ return nr_running() + nr_uninterruptible(); ++} ++ ++/* Variables and functions for calc_load */ ++static unsigned long calc_load_update; ++unsigned long avenrun[3]; ++EXPORT_SYMBOL(avenrun); ++ ++/** ++ * get_avenrun - get the load average array ++ * @loads: pointer to dest load array ++ * @offset: offset to add ++ * @shift: shift count to shift the result left ++ * ++ * These values are estimates at best, so no need for locking. ++ */ ++void get_avenrun(unsigned long *loads, unsigned long offset, int shift) ++{ ++ loads[0] = (avenrun[0] + offset) << shift; ++ loads[1] = (avenrun[1] + offset) << shift; ++ loads[2] = (avenrun[2] + offset) << shift; ++} ++ ++/* ++ * calc_load - update the avenrun load estimates every LOAD_FREQ seconds. ++ */ ++void calc_global_load(unsigned long ticks) ++{ ++ long active; ++ ++ if (time_before(jiffies, READ_ONCE(calc_load_update))) ++ return; ++ active = nr_active() * FIXED_1; ++ ++ avenrun[0] = calc_load(avenrun[0], EXP_1, active); ++ avenrun[1] = calc_load(avenrun[1], EXP_5, active); ++ avenrun[2] = calc_load(avenrun[2], EXP_15, active); ++ ++ calc_load_update = jiffies + LOAD_FREQ; ++} ++ ++/** ++ * fixed_power_int - compute: x^n, in O(log n) time ++ * ++ * @x: base of the power ++ * @frac_bits: fractional bits of @x ++ * @n: power to raise @x to. ++ * ++ * By exploiting the relation between the definition of the natural power ++ * function: x^n := x*x*...*x (x multiplied by itself for n times), and ++ * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, ++ * (where: n_i \elem {0, 1}, the binary vector representing n), ++ * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is ++ * of course trivially computable in O(log_2 n), the length of our binary ++ * vector. ++ */ ++static unsigned long ++fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) ++{ ++ unsigned long result = 1UL << frac_bits; ++ ++ if (n) { ++ for (;;) { ++ if (n & 1) { ++ result *= x; ++ result += 1UL << (frac_bits - 1); ++ result >>= frac_bits; ++ } ++ n >>= 1; ++ if (!n) ++ break; ++ x *= x; ++ x += 1UL << (frac_bits - 1); ++ x >>= frac_bits; ++ } ++ } ++ ++ return result; ++} ++ ++/* ++ * a1 = a0 * e + a * (1 - e) ++ * ++ * a2 = a1 * e + a * (1 - e) ++ * = (a0 * e + a * (1 - e)) * e + a * (1 - e) ++ * = a0 * e^2 + a * (1 - e) * (1 + e) ++ * ++ * a3 = a2 * e + a * (1 - e) ++ * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) ++ * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) ++ * ++ * ... ++ * ++ * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] ++ * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) ++ * = a0 * e^n + a * (1 - e^n) ++ * ++ * [1] application of the geometric series: ++ * ++ * n 1 - x^(n+1) ++ * S_n := \Sum x^i = ------------- ++ * i=0 1 - x ++ */ ++unsigned long ++calc_load_n(unsigned long load, unsigned long exp, ++ unsigned long active, unsigned int n) ++{ ++ return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); ++} ++ ++DEFINE_PER_CPU(struct kernel_stat, kstat); ++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); ++ ++EXPORT_PER_CPU_SYMBOL(kstat); ++EXPORT_PER_CPU_SYMBOL(kernel_cpustat); ++ ++#ifdef CONFIG_PARAVIRT ++static inline u64 steal_ticks(u64 steal) ++{ ++ if (unlikely(steal > NSEC_PER_SEC)) ++ return div_u64(steal, TICK_NSEC); ++ ++ return __iter_div_u64_rem(steal, TICK_NSEC, &steal); ++} ++#endif ++ ++#ifndef nsecs_to_cputime ++# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) ++#endif ++ ++/* ++ * On each tick, add the number of nanoseconds to the unbanked variables and ++ * once one tick's worth has accumulated, account it allowing for accurate ++ * sub-tick accounting and totals. Use the TICK_APPROX_NS to match the way we ++ * deduct nanoseconds. ++ */ ++static void pc_idle_time(struct rq *rq, struct task_struct *idle, unsigned long ns) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ unsigned long ticks; ++ ++ if (atomic_read(&rq->nr_iowait) > 0) { ++ rq->iowait_ns += ns; ++ if (rq->iowait_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->iowait_ns); ++ cpustat[CPUTIME_IOWAIT] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->iowait_ns %= JIFFY_NS; ++ } ++ } else { ++ rq->idle_ns += ns; ++ if (rq->idle_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->idle_ns); ++ cpustat[CPUTIME_IDLE] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->idle_ns %= JIFFY_NS; ++ } ++ } ++ acct_update_integrals(idle); ++} ++ ++static void pc_system_time(struct rq *rq, struct task_struct *p, ++ int hardirq_offset, unsigned long ns) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ unsigned long ticks; ++ ++ p->stime_ns += ns; ++ if (p->stime_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(p->stime_ns); ++ p->stime_ns %= JIFFY_NS; ++ p->stime += (__force u64)TICK_APPROX_NS * ticks; ++ account_group_system_time(p, TICK_APPROX_NS * ticks); ++ } ++ p->sched_time += ns; ++ account_group_exec_runtime(p, ns); ++ ++ if (hardirq_count() - hardirq_offset) { ++ rq->irq_ns += ns; ++ if (rq->irq_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->irq_ns); ++ cpustat[CPUTIME_IRQ] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->irq_ns %= JIFFY_NS; ++ } ++ } else if (in_serving_softirq()) { ++ rq->softirq_ns += ns; ++ if (rq->softirq_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->softirq_ns); ++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->softirq_ns %= JIFFY_NS; ++ } ++ } else { ++ rq->system_ns += ns; ++ if (rq->system_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->system_ns); ++ cpustat[CPUTIME_SYSTEM] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->system_ns %= JIFFY_NS; ++ } ++ } ++ acct_update_integrals(p); ++} ++ ++static void pc_user_time(struct rq *rq, struct task_struct *p, unsigned long ns) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ unsigned long ticks; ++ ++ p->utime_ns += ns; ++ if (p->utime_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(p->utime_ns); ++ p->utime_ns %= JIFFY_NS; ++ p->utime += (__force u64)TICK_APPROX_NS * ticks; ++ account_group_user_time(p, TICK_APPROX_NS * ticks); ++ } ++ p->sched_time += ns; ++ account_group_exec_runtime(p, ns); ++ ++ if (this_cpu_ksoftirqd() == p) { ++ /* ++ * ksoftirqd time do not get accounted in cpu_softirq_time. ++ * So, we have to handle it separately here. ++ */ ++ rq->softirq_ns += ns; ++ if (rq->softirq_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->softirq_ns); ++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->softirq_ns %= JIFFY_NS; ++ } ++ } ++ ++ if (task_nice(p) > 0 || idleprio_task(p)) { ++ rq->nice_ns += ns; ++ if (rq->nice_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->nice_ns); ++ cpustat[CPUTIME_NICE] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->nice_ns %= JIFFY_NS; ++ } ++ } else { ++ rq->user_ns += ns; ++ if (rq->user_ns >= JIFFY_NS) { ++ ticks = NS_TO_JIFFIES(rq->user_ns); ++ cpustat[CPUTIME_USER] += (__force u64)TICK_APPROX_NS * ticks; ++ rq->user_ns %= JIFFY_NS; ++ } ++ } ++ acct_update_integrals(p); ++} ++ ++/* ++ * This is called on clock ticks. ++ * Bank in p->sched_time the ns elapsed since the last tick or switch. ++ * CPU scheduler quota accounting is also performed here in microseconds. ++ */ ++static void update_cpu_clock_tick(struct rq *rq, struct task_struct *p) ++{ ++ s64 account_ns = rq->niffies - p->last_ran; ++ struct task_struct *idle = rq->idle; ++ ++ /* Accurate tick timekeeping */ ++ if (user_mode(get_irq_regs())) ++ pc_user_time(rq, p, account_ns); ++ else if (p != idle || (irq_count() != HARDIRQ_OFFSET)) { ++ pc_system_time(rq, p, HARDIRQ_OFFSET, account_ns); ++ } else ++ pc_idle_time(rq, idle, account_ns); ++ ++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */ ++ if (p->policy != SCHED_FIFO && p != idle) ++ p->time_slice -= NS_TO_US(account_ns); ++ ++ p->last_ran = rq->niffies; ++} ++ ++/* ++ * This is called on context switches. ++ * Bank in p->sched_time the ns elapsed since the last tick or switch. ++ * CPU scheduler quota accounting is also performed here in microseconds. ++ */ ++static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p) ++{ ++ s64 account_ns = rq->niffies - p->last_ran; ++ struct task_struct *idle = rq->idle; ++ ++ /* Accurate subtick timekeeping */ ++ if (p != idle) ++ pc_user_time(rq, p, account_ns); ++ else ++ pc_idle_time(rq, idle, account_ns); ++ ++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */ ++ if (p->policy != SCHED_FIFO && p != idle) ++ p->time_slice -= NS_TO_US(account_ns); ++} ++ ++/* ++ * Return any ns on the sched_clock that have not yet been accounted in ++ * @p in case that task is currently running. ++ * ++ * Called with task_rq_lock(p) held. ++ */ ++static inline u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) ++{ ++ u64 ns = 0; ++ ++ /* ++ * Must be ->curr _and_ ->on_rq. If dequeued, we would ++ * project cycles that may never be accounted to this ++ * thread, breaking clock_gettime(). ++ */ ++ if (p == rq->curr && task_on_rq_queued(p)) { ++ update_clocks(rq); ++ ns = rq->niffies - p->last_ran; ++ } ++ ++ return ns; ++} ++ ++/* ++ * Return accounted runtime for the task. ++ * Return separately the current's pending runtime that have not been ++ * accounted yet. ++ * ++ */ ++unsigned long long task_sched_runtime(struct task_struct *p) ++{ ++ struct rq_flags rf; ++ struct rq *rq; ++ u64 ns; ++ ++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) ++ /* ++ * 64-bit doesn't need locks to atomically read a 64-bit value. ++ * So we have a optimisation chance when the task's delta_exec is 0. ++ * Reading ->on_cpu is racy, but this is ok. ++ * ++ * If we race with it leaving CPU, we'll take a lock. So we're correct. ++ * If we race with it entering CPU, unaccounted time is 0. This is ++ * indistinguishable from the read occurring a few cycles earlier. ++ * If we see ->on_cpu without ->on_rq, the task is leaving, and has ++ * been accounted, so we're correct here as well. ++ */ ++ if (!p->on_cpu || !task_on_rq_queued(p)) ++ return tsk_seruntime(p); ++#endif ++ ++ rq = task_rq_lock(p, &rf); ++ ns = p->sched_time + do_task_delta_exec(p, rq); ++ task_rq_unlock(rq, p, &rf); ++ ++ return ns; ++} ++ ++/* ++ * Functions to test for when SCHED_ISO tasks have used their allocated ++ * quota as real time scheduling and convert them back to SCHED_NORMAL. All ++ * data is modified only by the local runqueue during scheduler_tick with ++ * interrupts disabled. ++ */ ++ ++/* ++ * Test if SCHED_ISO tasks have run longer than their alloted period as RT ++ * tasks and set the refractory flag if necessary. There is 10% hysteresis ++ * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a ++ * slow division. ++ */ ++static inline void iso_tick(struct rq *rq) ++{ ++ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - 1) / ISO_PERIOD; ++ rq->iso_ticks += 100; ++ if (rq->iso_ticks > ISO_PERIOD * sched_iso_cpu) { ++ rq->iso_refractory = true; ++ if (unlikely(rq->iso_ticks > ISO_PERIOD * 100)) ++ rq->iso_ticks = ISO_PERIOD * 100; ++ } ++} ++ ++/* No SCHED_ISO task was running so decrease rq->iso_ticks */ ++static inline void no_iso_tick(struct rq *rq, int ticks) ++{ ++ if (rq->iso_ticks > 0 || rq->iso_refractory) { ++ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - ticks) / ISO_PERIOD; ++ if (rq->iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128)) { ++ rq->iso_refractory = false; ++ if (unlikely(rq->iso_ticks < 0)) ++ rq->iso_ticks = 0; ++ } ++ } ++} ++ ++/* This manages tasks that have run out of timeslice during a scheduler_tick */ ++static void task_running_tick(struct rq *rq) ++{ ++ struct task_struct *p = rq->curr; ++ ++ /* ++ * If a SCHED_ISO task is running we increment the iso_ticks. In ++ * order to prevent SCHED_ISO tasks from causing starvation in the ++ * presence of true RT tasks we account those as iso_ticks as well. ++ */ ++ if (rt_task(p) || task_running_iso(p)) ++ iso_tick(rq); ++ else ++ no_iso_tick(rq, 1); ++ ++ /* SCHED_FIFO tasks never run out of timeslice. */ ++ if (p->policy == SCHED_FIFO) ++ return; ++ ++ if (iso_task(p)) { ++ if (task_running_iso(p)) { ++ if (rq->iso_refractory) { ++ /* ++ * SCHED_ISO task is running as RT and limit ++ * has been hit. Force it to reschedule as ++ * SCHED_NORMAL by zeroing its time_slice ++ */ ++ p->time_slice = 0; ++ } ++ } else if (!rq->iso_refractory) { ++ /* Can now run again ISO. Reschedule to pick up prio */ ++ goto out_resched; ++ } ++ } ++ ++ /* ++ * Tasks that were scheduled in the first half of a tick are not ++ * allowed to run into the 2nd half of the next tick if they will ++ * run out of time slice in the interim. Otherwise, if they have ++ * less than RESCHED_US μs of time slice left they will be rescheduled. ++ * Dither is used as a backup for when hrexpiry is disabled or high res ++ * timers not configured in. ++ */ ++ if (p->time_slice - rq->dither >= RESCHED_US) ++ return; ++out_resched: ++ rq_lock(rq); ++ __set_tsk_resched(p); ++ rq_unlock(rq); ++} ++ ++static inline void task_tick(struct rq *rq) ++{ ++ if (!rq_idle(rq)) ++ task_running_tick(rq); ++ else if (rq->last_jiffy > rq->last_scheduler_tick) ++ no_iso_tick(rq, rq->last_jiffy - rq->last_scheduler_tick); ++} ++ ++#ifdef CONFIG_NO_HZ_FULL ++/* ++ * We can stop the timer tick any time highres timers are active since ++ * we rely entirely on highres timeouts for task expiry rescheduling. ++ */ ++static void sched_stop_tick(struct rq *rq, int cpu) ++{ ++ if (!hrexpiry_enabled(rq)) ++ return; ++ if (!tick_nohz_full_enabled()) ++ return; ++ if (!tick_nohz_full_cpu(cpu)) ++ return; ++ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); ++} ++ ++static inline void sched_start_tick(struct rq *rq, int cpu) ++{ ++ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); ++} ++ ++struct tick_work { ++ int cpu; ++ struct delayed_work work; ++}; ++ ++static struct tick_work __percpu *tick_work_cpu; ++ ++static void sched_tick_remote(struct work_struct *work) ++{ ++ struct delayed_work *dwork = to_delayed_work(work); ++ struct tick_work *twork = container_of(dwork, struct tick_work, work); ++ int cpu = twork->cpu; ++ struct rq *rq = cpu_rq(cpu); ++ struct task_struct *curr; ++ u64 delta; ++ ++ /* ++ * Handle the tick only if it appears the remote CPU is running in full ++ * dynticks mode. The check is racy by nature, but missing a tick or ++ * having one too much is no big deal because the scheduler tick updates ++ * statistics and checks timeslices in a time-independent way, regardless ++ * of when exactly it is running. ++ */ ++ if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu)) ++ goto out_requeue; ++ ++ rq_lock_irq(rq); ++ curr = rq->curr; ++ if (is_idle_task(curr)) ++ goto out_unlock; ++ ++ update_rq_clock(rq); ++ delta = rq_clock_task(rq) - curr->last_ran; ++ ++ /* ++ * Make sure the next tick runs within a reasonable ++ * amount of time. ++ */ ++ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); ++ task_tick(rq); ++ ++out_unlock: ++ rq_unlock_irq(rq, NULL); ++ ++out_requeue: ++ /* ++ * Run the remote tick once per second (1Hz). This arbitrary ++ * frequency is large enough to avoid overload but short enough ++ * to keep scheduler internal stats reasonably up to date. ++ */ ++ queue_delayed_work(system_unbound_wq, dwork, HZ); ++} ++ ++static void sched_tick_start(int cpu) ++{ ++ struct tick_work *twork; ++ ++ if (housekeeping_cpu(cpu, HK_FLAG_TICK)) ++ return; ++ ++ WARN_ON_ONCE(!tick_work_cpu); ++ ++ twork = per_cpu_ptr(tick_work_cpu, cpu); ++ twork->cpu = cpu; ++ INIT_DELAYED_WORK(&twork->work, sched_tick_remote); ++ queue_delayed_work(system_unbound_wq, &twork->work, HZ); ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++static void sched_tick_stop(int cpu) ++{ ++ struct tick_work *twork; ++ ++ if (housekeeping_cpu(cpu, HK_FLAG_TICK)) ++ return; ++ ++ WARN_ON_ONCE(!tick_work_cpu); ++ ++ twork = per_cpu_ptr(tick_work_cpu, cpu); ++ cancel_delayed_work_sync(&twork->work); ++} ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++int __init sched_tick_offload_init(void) ++{ ++ tick_work_cpu = alloc_percpu(struct tick_work); ++ BUG_ON(!tick_work_cpu); ++ ++ return 0; ++} ++ ++#else /* !CONFIG_NO_HZ_FULL */ ++static inline void sched_stop_tick(struct rq *rq, int cpu) {} ++static inline void sched_start_tick(struct rq *rq, int cpu) {} ++static inline void sched_tick_start(int cpu) { } ++static inline void sched_tick_stop(int cpu) { } ++#endif ++ ++/* ++ * This function gets called by the timer code, with HZ frequency. ++ * We call it with interrupts disabled. ++ */ ++void scheduler_tick(void) ++{ ++ int cpu __maybe_unused = smp_processor_id(); ++ struct rq *rq = cpu_rq(cpu); ++ ++ sched_clock_tick(); ++ update_clocks(rq); ++ update_load_avg(rq, 0); ++ update_cpu_clock_tick(rq, rq->curr); ++ task_tick(rq); ++ rq->last_scheduler_tick = rq->last_jiffy; ++ rq->last_tick = rq->clock; ++ psi_task_tick(rq); ++ perf_event_task_tick(); ++ sched_stop_tick(rq, cpu); ++} ++ ++#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ ++ defined(CONFIG_TRACE_PREEMPT_TOGGLE)) ++/* ++ * If the value passed in is equal to the current preempt count ++ * then we just disabled preemption. Start timing the latency. ++ */ ++static inline void preempt_latency_start(int val) ++{ ++ if (preempt_count() == val) { ++ unsigned long ip = get_lock_parent_ip(); ++#ifdef CONFIG_DEBUG_PREEMPT ++ current->preempt_disable_ip = ip; ++#endif ++ trace_preempt_off(CALLER_ADDR0, ip); ++ } ++} ++ ++void preempt_count_add(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) ++ return; ++#endif ++ __preempt_count_add(val); ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Spinlock count overflowing soon? ++ */ ++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= ++ PREEMPT_MASK - 10); ++#endif ++ preempt_latency_start(val); ++} ++EXPORT_SYMBOL(preempt_count_add); ++NOKPROBE_SYMBOL(preempt_count_add); ++ ++/* ++ * If the value passed in equals to the current preempt count ++ * then we just enabled preemption. Stop timing the latency. ++ */ ++static inline void preempt_latency_stop(int val) ++{ ++ if (preempt_count() == val) ++ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); ++} ++ ++void preempt_count_sub(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) ++ return; ++ /* ++ * Is the spinlock portion underflowing? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && ++ !(preempt_count() & PREEMPT_MASK))) ++ return; ++#endif ++ ++ preempt_latency_stop(val); ++ __preempt_count_sub(val); ++} ++EXPORT_SYMBOL(preempt_count_sub); ++NOKPROBE_SYMBOL(preempt_count_sub); ++ ++#else ++static inline void preempt_latency_start(int val) { } ++static inline void preempt_latency_stop(int val) { } ++#endif ++ ++static inline unsigned long get_preempt_disable_ip(struct task_struct *p) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ return p->preempt_disable_ip; ++#else ++ return 0; ++#endif ++} ++ ++/* ++ * The time_slice is only refilled when it is empty and that is when we set a ++ * new deadline. Make sure update_clocks has been called recently to update ++ * rq->niffies. ++ */ ++static void time_slice_expired(struct task_struct *p, struct rq *rq) ++{ ++ p->time_slice = timeslice(); ++ p->deadline = rq->niffies + task_deadline_diff(p); ++#ifdef CONFIG_SMT_NICE ++ if (!p->mm) ++ p->smt_bias = 0; ++ else if (rt_task(p)) ++ p->smt_bias = 1 << 30; ++ else if (task_running_iso(p)) ++ p->smt_bias = 1 << 29; ++ else if (idleprio_task(p)) { ++ if (task_running_idle(p)) ++ p->smt_bias = 0; ++ else ++ p->smt_bias = 1; ++ } else if (--p->smt_bias < 1) ++ p->smt_bias = MAX_PRIO - p->static_prio; ++#endif ++} ++ ++/* ++ * Timeslices below RESCHED_US are considered as good as expired as there's no ++ * point rescheduling when there's so little time left. SCHED_BATCH tasks ++ * have been flagged be not latency sensitive and likely to be fully CPU ++ * bound so every time they're rescheduled they have their time_slice ++ * refilled, but get a new later deadline to have little effect on ++ * SCHED_NORMAL tasks. ++ ++ */ ++static inline void check_deadline(struct task_struct *p, struct rq *rq) ++{ ++ if (p->time_slice < RESCHED_US || batch_task(p)) ++ time_slice_expired(p, rq); ++} ++ ++/* ++ * Task selection with skiplists is a simple matter of picking off the first ++ * task in the sorted list, an O(1) operation. The lookup is amortised O(1) ++ * being bound to the number of processors. ++ * ++ * Runqueues are selectively locked based on their unlocked data and then ++ * unlocked if not needed. At most 3 locks will be held at any time and are ++ * released as soon as they're no longer needed. All balancing between CPUs ++ * is thus done here in an extremely simple first come best fit manner. ++ * ++ * This iterates over runqueues in cache locality order. In interactive mode ++ * it iterates over all CPUs and finds the task with the best key/deadline. ++ * In non-interactive mode it will only take a task if it's from the current ++ * runqueue or a runqueue with more tasks than the current one with a better ++ * key/deadline. ++ */ ++#ifdef CONFIG_SMP ++static inline struct task_struct ++*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle) ++{ ++ struct rq *locked = NULL, *chosen = NULL; ++ struct task_struct *edt = idle; ++ int i, best_entries = 0; ++ u64 best_key = ~0ULL; ++ ++ for (i = 0; i < total_runqueues; i++) { ++ struct rq *other_rq = rq_order(rq, i); ++ skiplist_node *next; ++ int entries; ++ ++ entries = other_rq->sl->entries; ++ /* ++ * Check for queued entres lockless first. The local runqueue ++ * is locked so entries will always be accurate. ++ */ ++ if (!sched_interactive) { ++ /* ++ * Don't reschedule balance across nodes unless the CPU ++ * is idle. ++ */ ++ if (edt != idle && rq->cpu_locality[other_rq->cpu] > 3) ++ break; ++ if (entries <= best_entries) ++ continue; ++ } else if (!entries) ++ continue; ++ ++ /* if (i) implies other_rq != rq */ ++ if (i) { ++ /* Check for best id queued lockless first */ ++ if (other_rq->best_key >= best_key) ++ continue; ++ ++ if (unlikely(!trylock_rq(rq, other_rq))) ++ continue; ++ ++ /* Need to reevaluate entries after locking */ ++ entries = other_rq->sl->entries; ++ if (unlikely(!entries)) { ++ unlock_rq(other_rq); ++ continue; ++ } ++ } ++ ++ next = other_rq->node; ++ /* ++ * In interactive mode we check beyond the best entry on other ++ * runqueues if we can't get the best for smt or affinity ++ * reasons. ++ */ ++ while ((next = next->next[0]) != other_rq->node) { ++ struct task_struct *p; ++ u64 key = next->key; ++ ++ /* Reevaluate key after locking */ ++ if (key >= best_key) ++ break; ++ ++ p = next->value; ++ if (!smt_schedule(p, rq)) { ++ if (i && !sched_interactive) ++ break; ++ continue; ++ } ++ ++ if (sched_other_cpu(p, cpu)) { ++ if (sched_interactive || !i) ++ continue; ++ break; ++ } ++ /* Make sure affinity is ok */ ++ if (i) { ++ /* From this point on p is the best so far */ ++ if (locked) ++ unlock_rq(locked); ++ chosen = locked = other_rq; ++ } ++ best_entries = entries; ++ best_key = key; ++ edt = p; ++ break; ++ } ++ /* rq->preempting is a hint only as the state may have changed ++ * since it was set with the resched call but if we have met ++ * the condition we can break out here. */ ++ if (edt == rq->preempting) ++ break; ++ if (i && other_rq != chosen) ++ unlock_rq(other_rq); ++ } ++ ++ if (likely(edt != idle)) ++ take_task(rq, cpu, edt); ++ ++ if (locked) ++ unlock_rq(locked); ++ ++ rq->preempting = NULL; ++ ++ return edt; ++} ++#else /* CONFIG_SMP */ ++static inline struct task_struct ++*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle) ++{ ++ struct task_struct *edt; ++ ++ if (unlikely(!rq->sl->entries)) ++ return idle; ++ edt = rq->node->next[0]->value; ++ take_task(rq, cpu, edt); ++ return edt; ++} ++#endif /* CONFIG_SMP */ ++ ++/* ++ * Print scheduling while atomic bug: ++ */ ++static noinline void __schedule_bug(struct task_struct *prev) ++{ ++ /* Save this before calling printk(), since that will clobber it */ ++ unsigned long preempt_disable_ip = get_preempt_disable_ip(current); ++ ++ if (oops_in_progress) ++ return; ++ ++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", ++ prev->comm, prev->pid, preempt_count()); ++ ++ debug_show_held_locks(prev); ++ print_modules(); ++ if (irqs_disabled()) ++ print_irqtrace_events(prev); ++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) ++ && in_atomic_preempt_off()) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(preempt_disable_ip); ++ pr_cont("\n"); ++ } ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++ ++/* ++ * Various schedule()-time debugging checks and statistics: ++ */ ++static inline void schedule_debug(struct task_struct *prev) ++{ ++#ifdef CONFIG_SCHED_STACK_END_CHECK ++ if (task_stack_end_corrupted(prev)) ++ panic("corrupted stack end detected inside scheduler\n"); ++#endif ++ ++ if (unlikely(in_atomic_preempt_off())) { ++ __schedule_bug(prev); ++ preempt_count_set(PREEMPT_DISABLED); ++ } ++ rcu_sleep_check(); ++ ++ profile_hit(SCHED_PROFILING, __builtin_return_address(0)); ++ ++ schedstat_inc(this_rq()->sched_count); ++} ++ ++/* ++ * The currently running task's information is all stored in rq local data ++ * which is only modified by the local CPU. ++ */ ++static inline void set_rq_task(struct rq *rq, struct task_struct *p) ++{ ++ if (p == rq->idle || p->policy == SCHED_FIFO) ++ hrexpiry_clear(rq); ++ else ++ hrexpiry_start(rq, US_TO_NS(p->time_slice)); ++ if (rq->clock - rq->last_tick > HALF_JIFFY_NS) ++ rq->dither = 0; ++ else ++ rq->dither = rq_dither(rq); ++ ++ rq->rq_deadline = p->deadline; ++ rq->rq_prio = p->prio; ++#ifdef CONFIG_SMT_NICE ++ rq->rq_mm = p->mm; ++ rq->rq_smt_bias = p->smt_bias; ++#endif ++} ++ ++#ifdef CONFIG_SMT_NICE ++static void check_no_siblings(struct rq __maybe_unused *this_rq) {} ++static void wake_no_siblings(struct rq __maybe_unused *this_rq) {} ++static void (*check_siblings)(struct rq *this_rq) = &check_no_siblings; ++static void (*wake_siblings)(struct rq *this_rq) = &wake_no_siblings; ++ ++/* Iterate over smt siblings when we've scheduled a process on cpu and decide ++ * whether they should continue running or be descheduled. */ ++static void check_smt_siblings(struct rq *this_rq) ++{ ++ int other_cpu; ++ ++ for_each_cpu(other_cpu, &this_rq->thread_mask) { ++ struct task_struct *p; ++ struct rq *rq; ++ ++ rq = cpu_rq(other_cpu); ++ if (rq_idle(rq)) ++ continue; ++ p = rq->curr; ++ if (!smt_schedule(p, this_rq)) ++ resched_curr(rq); ++ } ++} ++ ++static void wake_smt_siblings(struct rq *this_rq) ++{ ++ int other_cpu; ++ ++ for_each_cpu(other_cpu, &this_rq->thread_mask) { ++ struct rq *rq; ++ ++ rq = cpu_rq(other_cpu); ++ if (rq_idle(rq)) ++ resched_idle(rq); ++ } ++} ++#else ++static void check_siblings(struct rq __maybe_unused *this_rq) {} ++static void wake_siblings(struct rq __maybe_unused *this_rq) {} ++#endif ++ ++/* ++ * schedule() is the main scheduler function. ++ * ++ * The main means of driving the scheduler and thus entering this function are: ++ * ++ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. ++ * ++ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return ++ * paths. For example, see arch/x86/entry_64.S. ++ * ++ * To drive preemption between tasks, the scheduler sets the flag in timer ++ * interrupt handler scheduler_tick(). ++ * ++ * 3. Wakeups don't really cause entry into schedule(). They add a ++ * task to the run-queue and that's it. ++ * ++ * Now, if the new task added to the run-queue preempts the current ++ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets ++ * called on the nearest possible occasion: ++ * ++ * - If the kernel is preemptible (CONFIG_PREEMPT=y): ++ * ++ * - in syscall or exception context, at the next outmost ++ * preempt_enable(). (this might be as soon as the wake_up()'s ++ * spin_unlock()!) ++ * ++ * - in IRQ context, return from interrupt-handler to ++ * preemptible context ++ * ++ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) ++ * then at the next: ++ * ++ * - cond_resched() call ++ * - explicit schedule() call ++ * - return from syscall or exception to user-space ++ * - return from interrupt-handler to user-space ++ * ++ * WARNING: must be called with preemption disabled! ++ */ ++static void __sched notrace __schedule(bool preempt) ++{ ++ struct task_struct *prev, *next, *idle; ++ unsigned long *switch_count; ++ bool deactivate = false; ++ struct rq *rq; ++ u64 niffies; ++ int cpu; ++ ++ cpu = smp_processor_id(); ++ rq = cpu_rq(cpu); ++ prev = rq->curr; ++ idle = rq->idle; ++ ++ schedule_debug(prev); ++ ++ local_irq_disable(); ++ rcu_note_context_switch(preempt); ++ ++ /* ++ * Make sure that signal_pending_state()->signal_pending() below ++ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) ++ * done by the caller to avoid the race with signal_wake_up(). ++ * ++ * The membarrier system call requires a full memory barrier ++ * after coming from user-space, before storing to rq->curr. ++ */ ++ rq_lock(rq); ++ smp_mb__after_spinlock(); ++#ifdef CONFIG_SMP ++ if (rq->preempt) { ++ /* ++ * Make sure resched_curr hasn't triggered a preemption ++ * locklessly on a task that has since scheduled away. Spurious ++ * wakeup of idle is okay though. ++ */ ++ if (unlikely(preempt && prev != idle && !test_tsk_need_resched(prev))) { ++ rq->preempt = NULL; ++ clear_preempt_need_resched(); ++ rq_unlock_irq(rq, NULL); ++ return; ++ } ++ rq->preempt = NULL; ++ } ++#endif ++ ++ switch_count = &prev->nivcsw; ++ if (!preempt && prev->state) { ++ if (signal_pending_state(prev->state, prev)) { ++ prev->state = TASK_RUNNING; ++ } else { ++ deactivate = true; ++ prev->on_rq = 0; ++ ++ if (prev->in_iowait) { ++ atomic_inc(&rq->nr_iowait); ++ delayacct_blkio_start(); ++ } ++ ++ /* ++ * If a worker is going to sleep, notify and ++ * ask workqueue whether it wants to wake up a ++ * task to maintain concurrency. If so, wake ++ * up the task. ++ */ ++ if (prev->flags & PF_WQ_WORKER) { ++ struct task_struct *to_wakeup; ++ ++ to_wakeup = wq_worker_sleeping(prev); ++ if (to_wakeup) ++ try_to_wake_up_local(to_wakeup); ++ } ++ } ++ switch_count = &prev->nvcsw; ++ } ++ ++ /* ++ * Store the niffy value here for use by the next task's last_ran ++ * below to avoid losing niffies due to update_clocks being called ++ * again after this point. ++ */ ++ update_clocks(rq); ++ niffies = rq->niffies; ++ update_cpu_clock_switch(rq, prev); ++ ++ clear_tsk_need_resched(prev); ++ clear_preempt_need_resched(); ++ ++ if (idle != prev) { ++ check_deadline(prev, rq); ++ return_task(prev, rq, cpu, deactivate); ++ } ++ ++ next = earliest_deadline_task(rq, cpu, idle); ++ if (likely(next->prio != PRIO_LIMIT)) ++ clear_cpuidle_map(cpu); ++ else { ++ set_cpuidle_map(cpu); ++ update_load_avg(rq, 0); ++ } ++ ++ set_rq_task(rq, next); ++ next->last_ran = niffies; ++ ++ if (likely(prev != next)) { ++ /* ++ * Don't reschedule an idle task or deactivated tasks ++ */ ++ if (prev == idle) { ++ rq->nr_running++; ++ if (rt_task(next)) ++ rq->rt_nr_running++; ++ } else if (!deactivate) ++ resched_suitable_idle(prev); ++ if (unlikely(next == idle)) { ++ rq->nr_running--; ++ if (rt_task(prev)) ++ rq->rt_nr_running--; ++ wake_siblings(rq); ++ } else ++ check_siblings(rq); ++ rq->nr_switches++; ++ rq->curr = next; ++ /* ++ * The membarrier system call requires each architecture ++ * to have a full memory barrier after updating ++ * rq->curr, before returning to user-space. ++ * ++ * Here are the schemes providing that barrier on the ++ * various architectures: ++ * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. ++ * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. ++ * - finish_lock_switch() for weakly-ordered ++ * architectures where spin_unlock is a full barrier, ++ * - switch_to() for arm64 (weakly-ordered, spin_unlock ++ * is a RELEASE barrier), ++ */ ++ ++*switch_count; ++ ++ trace_sched_switch(preempt, prev, next); ++ context_switch(rq, prev, next); /* unlocks the rq */ ++ } else { ++ check_siblings(rq); ++ rq_unlock(rq); ++ do_pending_softirq(rq, next); ++ local_irq_enable(); ++ } ++} ++ ++void __noreturn do_task_dead(void) ++{ ++ /* Causes final put_task_struct in finish_task_switch(). */ ++ set_special_state(TASK_DEAD); ++ ++ /* Tell freezer to ignore us: */ ++ current->flags |= PF_NOFREEZE; ++ __schedule(false); ++ BUG(); ++ ++ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ ++ for (;;) ++ cpu_relax(); ++} ++ ++static inline void sched_submit_work(struct task_struct *tsk) ++{ ++ if (!tsk->state || tsk_is_pi_blocked(tsk) || ++ preempt_count() || ++ signal_pending_state(tsk->state, tsk)) ++ return; ++ ++ /* ++ * If we are going to sleep and we have plugged IO queued, ++ * make sure to submit it to avoid deadlocks. ++ */ ++ if (blk_needs_flush_plug(tsk)) ++ blk_schedule_flush_plug(tsk); ++} ++ ++asmlinkage __visible void __sched schedule(void) ++{ ++ struct task_struct *tsk = current; ++ ++ sched_submit_work(tsk); ++ do { ++ preempt_disable(); ++ __schedule(false); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); ++} ++ ++EXPORT_SYMBOL(schedule); ++ ++/* ++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted ++ * state (have scheduled out non-voluntarily) by making sure that all ++ * tasks have either left the run queue or have gone into user space. ++ * As idle tasks do not do either, they must not ever be preempted ++ * (schedule out non-voluntarily). ++ * ++ * schedule_idle() is similar to schedule_preempt_disable() except that it ++ * never enables preemption because it does not call sched_submit_work(). ++ */ ++void __sched schedule_idle(void) ++{ ++ /* ++ * As this skips calling sched_submit_work(), which the idle task does ++ * regardless because that function is a nop when the task is in a ++ * TASK_RUNNING state, make sure this isn't used someplace that the ++ * current task can be in any other state. Note, idle is always in the ++ * TASK_RUNNING state. ++ */ ++ WARN_ON_ONCE(current->state); ++ do { ++ __schedule(false); ++ } while (need_resched()); ++} ++ ++#ifdef CONFIG_CONTEXT_TRACKING ++asmlinkage __visible void __sched schedule_user(void) ++{ ++ /* ++ * If we come here after a random call to set_need_resched(), ++ * or we have been woken up remotely but the IPI has not yet arrived, ++ * we haven't yet exited the RCU idle mode. Do it here manually until ++ * we find a better solution. ++ * ++ * NB: There are buggy callers of this function. Ideally we ++ * should warn if prev_state != IN_USER, but that will trigger ++ * too frequently to make sense yet. ++ */ ++ enum ctx_state prev_state = exception_enter(); ++ schedule(); ++ exception_exit(prev_state); ++} ++#endif ++ ++/** ++ * schedule_preempt_disabled - called with preemption disabled ++ * ++ * Returns with preemption disabled. Note: preempt_count must be 1 ++ */ ++void __sched schedule_preempt_disabled(void) ++{ ++ sched_preempt_enable_no_resched(); ++ schedule(); ++ preempt_disable(); ++} ++ ++static void __sched notrace preempt_schedule_common(void) ++{ ++ do { ++ /* ++ * Because the function tracer can trace preempt_count_sub() ++ * and it also uses preempt_enable/disable_notrace(), if ++ * NEED_RESCHED is set, the preempt_enable_notrace() called ++ * by the function tracer will call this function again and ++ * cause infinite recursion. ++ * ++ * Preemption must be disabled here before the function ++ * tracer can trace. Break up preempt_disable() into two ++ * calls. One to disable preemption without fear of being ++ * traced. The other to still record the preemption latency, ++ * which can also be traced by the function tracer. ++ */ ++ preempt_disable_notrace(); ++ preempt_latency_start(1); ++ __schedule(true); ++ preempt_latency_stop(1); ++ preempt_enable_no_resched_notrace(); ++ ++ /* ++ * Check again in case we missed a preemption opportunity ++ * between schedule and now. ++ */ ++ } while (need_resched()); ++} ++ ++#ifdef CONFIG_PREEMPT ++/* ++ * this is the entry point to schedule() from in-kernel preemption ++ * off of preempt_enable. Kernel preemptions off return from interrupt ++ * occur there and call schedule directly. ++ */ ++asmlinkage __visible void __sched notrace preempt_schedule(void) ++{ ++ /* ++ * If there is a non-zero preempt_count or interrupts are disabled, ++ * we do not want to preempt the current task. Just return.. ++ */ ++ if (likely(!preemptible())) ++ return; ++ ++ preempt_schedule_common(); ++} ++NOKPROBE_SYMBOL(preempt_schedule); ++EXPORT_SYMBOL(preempt_schedule); ++ ++/** ++ * preempt_schedule_notrace - preempt_schedule called by tracing ++ * ++ * The tracing infrastructure uses preempt_enable_notrace to prevent ++ * recursion and tracing preempt enabling caused by the tracing ++ * infrastructure itself. But as tracing can happen in areas coming ++ * from userspace or just about to enter userspace, a preempt enable ++ * can occur before user_exit() is called. This will cause the scheduler ++ * to be called when the system is still in usermode. ++ * ++ * To prevent this, the preempt_enable_notrace will use this function ++ * instead of preempt_schedule() to exit user context if needed before ++ * calling the scheduler. ++ */ ++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) ++{ ++ enum ctx_state prev_ctx; ++ ++ if (likely(!preemptible())) ++ return; ++ ++ do { ++ /* ++ * Because the function tracer can trace preempt_count_sub() ++ * and it also uses preempt_enable/disable_notrace(), if ++ * NEED_RESCHED is set, the preempt_enable_notrace() called ++ * by the function tracer will call this function again and ++ * cause infinite recursion. ++ * ++ * Preemption must be disabled here before the function ++ * tracer can trace. Break up preempt_disable() into two ++ * calls. One to disable preemption without fear of being ++ * traced. The other to still record the preemption latency, ++ * which can also be traced by the function tracer. ++ */ ++ preempt_disable_notrace(); ++ preempt_latency_start(1); ++ /* ++ * Needs preempt disabled in case user_exit() is traced ++ * and the tracer calls preempt_enable_notrace() causing ++ * an infinite recursion. ++ */ ++ prev_ctx = exception_enter(); ++ __schedule(true); ++ exception_exit(prev_ctx); ++ ++ preempt_latency_stop(1); ++ preempt_enable_no_resched_notrace(); ++ } while (need_resched()); ++} ++EXPORT_SYMBOL_GPL(preempt_schedule_notrace); ++ ++#endif /* CONFIG_PREEMPT */ ++ ++/* ++ * this is the entry point to schedule() from kernel preemption ++ * off of irq context. ++ * Note, that this is called and return with irqs disabled. This will ++ * protect us against recursive calling from irq. ++ */ ++asmlinkage __visible void __sched preempt_schedule_irq(void) ++{ ++ enum ctx_state prev_state; ++ ++ /* Catch callers which need to be fixed */ ++ BUG_ON(preempt_count() || !irqs_disabled()); ++ ++ prev_state = exception_enter(); ++ ++ do { ++ preempt_disable(); ++ local_irq_enable(); ++ __schedule(true); ++ local_irq_disable(); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); ++ ++ exception_exit(prev_state); ++} ++ ++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, ++ void *key) ++{ ++ return try_to_wake_up(curr->private, mode, wake_flags); ++} ++EXPORT_SYMBOL(default_wake_function); ++ ++#ifdef CONFIG_RT_MUTEXES ++ ++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) ++{ ++ if (pi_task) ++ prio = min(prio, pi_task->prio); ++ ++ return prio; ++} ++ ++static inline int rt_effective_prio(struct task_struct *p, int prio) ++{ ++ struct task_struct *pi_task = rt_mutex_get_top_task(p); ++ ++ return __rt_effective_prio(pi_task, prio); ++} ++ ++/* ++ * rt_mutex_setprio - set the current priority of a task ++ * @p: task to boost ++ * @pi_task: donor task ++ * ++ * This function changes the 'effective' priority of a task. It does ++ * not touch ->normal_prio like __setscheduler(). ++ * ++ * Used by the rt_mutex code to implement priority inheritance ++ * logic. Call site only calls if the priority of the task changed. ++ */ ++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) ++{ ++ int prio, oldprio; ++ struct rq *rq; ++ ++ /* XXX used to be waiter->prio, not waiter->task->prio */ ++ prio = __rt_effective_prio(pi_task, p->normal_prio); ++ ++ /* ++ * If nothing changed; bail early. ++ */ ++ if (p->pi_top_task == pi_task && prio == p->prio) ++ return; ++ ++ rq = __task_rq_lock(p, NULL); ++ update_rq_clock(rq); ++ /* ++ * Set under pi_lock && rq->lock, such that the value can be used under ++ * either lock. ++ * ++ * Note that there is loads of tricky to make this pointer cache work ++ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to ++ * ensure a task is de-boosted (pi_task is set to NULL) before the ++ * task is allowed to run again (and can exit). This ensures the pointer ++ * points to a blocked task -- which guaratees the task is present. ++ */ ++ p->pi_top_task = pi_task; ++ ++ /* ++ * For FIFO/RR we only need to set prio, if that matches we're done. ++ */ ++ if (prio == p->prio) ++ goto out_unlock; ++ ++ /* ++ * Idle task boosting is a nono in general. There is one ++ * exception, when PREEMPT_RT and NOHZ is active: ++ * ++ * The idle task calls get_next_timer_interrupt() and holds ++ * the timer wheel base->lock on the CPU and another CPU wants ++ * to access the timer (probably to cancel it). We can safely ++ * ignore the boosting request, as the idle CPU runs this code ++ * with interrupts disabled and will complete the lock ++ * protected section without being interrupted. So there is no ++ * real need to boost. ++ */ ++ if (unlikely(p == rq->idle)) { ++ WARN_ON(p != rq->curr); ++ WARN_ON(p->pi_blocked_on); ++ goto out_unlock; ++ } ++ ++ trace_sched_pi_setprio(p, pi_task); ++ oldprio = p->prio; ++ p->prio = prio; ++ if (task_running(rq, p)){ ++ if (prio > oldprio) ++ resched_task(p); ++ } else if (task_queued(p)) { ++ dequeue_task(rq, p, DEQUEUE_SAVE); ++ enqueue_task(rq, p, ENQUEUE_RESTORE); ++ if (prio < oldprio) ++ try_preempt(p, rq); ++ } ++out_unlock: ++ __task_rq_unlock(rq, NULL); ++} ++#else ++static inline int rt_effective_prio(struct task_struct *p, int prio) ++{ ++ return prio; ++} ++#endif ++ ++/* ++ * Adjust the deadline for when the priority is to change, before it's ++ * changed. ++ */ ++static inline void adjust_deadline(struct task_struct *p, int new_prio) ++{ ++ p->deadline += static_deadline_diff(new_prio) - task_deadline_diff(p); ++} ++ ++void set_user_nice(struct task_struct *p, long nice) ++{ ++ int new_static, old_static; ++ struct rq_flags rf; ++ struct rq *rq; ++ ++ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) ++ return; ++ new_static = NICE_TO_PRIO(nice); ++ /* ++ * We have to be careful, if called from sys_setpriority(), ++ * the task might be in the middle of scheduling on another CPU. ++ */ ++ rq = task_rq_lock(p, &rf); ++ update_rq_clock(rq); ++ ++ /* ++ * The RT priorities are set via sched_setscheduler(), but we still ++ * allow the 'normal' nice value to be set - but as expected ++ * it wont have any effect on scheduling until the task is ++ * not SCHED_NORMAL/SCHED_BATCH: ++ */ ++ if (has_rt_policy(p)) { ++ p->static_prio = new_static; ++ goto out_unlock; ++ } ++ ++ adjust_deadline(p, new_static); ++ old_static = p->static_prio; ++ p->static_prio = new_static; ++ p->prio = effective_prio(p); ++ ++ if (task_queued(p)) { ++ dequeue_task(rq, p, DEQUEUE_SAVE); ++ enqueue_task(rq, p, ENQUEUE_RESTORE); ++ if (new_static < old_static) ++ try_preempt(p, rq); ++ } else if (task_running(rq, p)) { ++ set_rq_task(rq, p); ++ if (old_static < new_static) ++ resched_task(p); ++ } ++out_unlock: ++ task_rq_unlock(rq, p, &rf); ++} ++EXPORT_SYMBOL(set_user_nice); ++ ++/* ++ * can_nice - check if a task can reduce its nice value ++ * @p: task ++ * @nice: nice value ++ */ ++int can_nice(const struct task_struct *p, const int nice) ++{ ++ /* Convert nice value [19,-20] to rlimit style value [1,40] */ ++ int nice_rlim = nice_to_rlimit(nice); ++ ++ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || ++ capable(CAP_SYS_NICE)); ++} ++ ++#ifdef __ARCH_WANT_SYS_NICE ++ ++/* ++ * sys_nice - change the priority of the current process. ++ * @increment: priority increment ++ * ++ * sys_setpriority is a more generic, but much slower function that ++ * does similar things. ++ */ ++SYSCALL_DEFINE1(nice, int, increment) ++{ ++ long nice, retval; ++ ++ /* ++ * Setpriority might change our priority at the same moment. ++ * We don't have to worry. Conceptually one call occurs first ++ * and we have a single winner. ++ */ ++ ++ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); ++ nice = task_nice(current) + increment; ++ ++ nice = clamp_val(nice, MIN_NICE, MAX_NICE); ++ if (increment < 0 && !can_nice(current, nice)) ++ return -EPERM; ++ ++ retval = security_task_setnice(current, nice); ++ if (retval) ++ return retval; ++ ++ set_user_nice(current, nice); ++ return 0; ++} ++ ++#endif ++ ++/** ++ * task_prio - return the priority value of a given task. ++ * @p: the task in question. ++ * ++ * Return: The priority value as seen by users in /proc. ++ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes ++ * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO). ++ */ ++int task_prio(const struct task_struct *p) ++{ ++ int delta, prio = p->prio - MAX_RT_PRIO; ++ ++ /* rt tasks and iso tasks */ ++ if (prio <= 0) ++ goto out; ++ ++ /* Convert to ms to avoid overflows */ ++ delta = NS_TO_MS(p->deadline - task_rq(p)->niffies); ++ if (unlikely(delta < 0)) ++ delta = 0; ++ delta = delta * 40 / ms_longest_deadline_diff(); ++ if (delta <= 80) ++ prio += delta; ++ if (idleprio_task(p)) ++ prio += 40; ++out: ++ return prio; ++} ++ ++/** ++ * idle_cpu - is a given CPU idle currently? ++ * @cpu: the processor in question. ++ * ++ * Return: 1 if the CPU is currently idle. 0 otherwise. ++ */ ++int idle_cpu(int cpu) ++{ ++ return cpu_curr(cpu) == cpu_rq(cpu)->idle; ++} ++ ++/** ++ * available_idle_cpu - is a given CPU idle for enqueuing work. ++ * @cpu: the CPU in question. ++ * ++ * Return: 1 if the CPU is currently idle. 0 otherwise. ++ */ ++int available_idle_cpu(int cpu) ++{ ++ if (!idle_cpu(cpu)) ++ return 0; ++ ++ if (vcpu_is_preempted(cpu)) ++ return 0; ++ ++ return 1; ++} ++ ++/** ++ * idle_task - return the idle task for a given CPU. ++ * @cpu: the processor in question. ++ * ++ * Return: The idle task for the CPU @cpu. ++ */ ++struct task_struct *idle_task(int cpu) ++{ ++ return cpu_rq(cpu)->idle; ++} ++ ++/** ++ * find_process_by_pid - find a process with a matching PID value. ++ * @pid: the pid in question. ++ * ++ * The task of @pid, if found. %NULL otherwise. ++ */ ++static inline struct task_struct *find_process_by_pid(pid_t pid) ++{ ++ return pid ? find_task_by_vpid(pid) : current; ++} ++ ++/* Actually do priority change: must hold rq lock. */ ++static void __setscheduler(struct task_struct *p, struct rq *rq, int policy, ++ int prio, bool keep_boost) ++{ ++ int oldrtprio, oldprio; ++ ++ p->policy = policy; ++ oldrtprio = p->rt_priority; ++ p->rt_priority = prio; ++ p->normal_prio = normal_prio(p); ++ oldprio = p->prio; ++ /* ++ * Keep a potential priority boosting if called from ++ * sched_setscheduler(). ++ */ ++ p->prio = normal_prio(p); ++ if (keep_boost) ++ p->prio = rt_effective_prio(p, p->prio); ++ ++ if (task_running(rq, p)) { ++ set_rq_task(rq, p); ++ resched_task(p); ++ } else if (task_queued(p)) { ++ dequeue_task(rq, p, DEQUEUE_SAVE); ++ enqueue_task(rq, p, ENQUEUE_RESTORE); ++ if (p->prio < oldprio || p->rt_priority > oldrtprio) ++ try_preempt(p, rq); ++ } ++} ++ ++/* ++ * Check the target process has a UID that matches the current process's ++ */ ++static bool check_same_owner(struct task_struct *p) ++{ ++ const struct cred *cred = current_cred(), *pcred; ++ bool match; ++ ++ rcu_read_lock(); ++ pcred = __task_cred(p); ++ match = (uid_eq(cred->euid, pcred->euid) || ++ uid_eq(cred->euid, pcred->uid)); ++ rcu_read_unlock(); ++ return match; ++} ++ ++static int __sched_setscheduler(struct task_struct *p, ++ const struct sched_attr *attr, ++ bool user, bool pi) ++{ ++ int retval, policy = attr->sched_policy, oldpolicy = -1, priority = attr->sched_priority; ++ unsigned long rlim_rtprio = 0; ++ struct rq_flags rf; ++ int reset_on_fork; ++ struct rq *rq; ++ ++ /* The pi code expects interrupts enabled */ ++ BUG_ON(pi && in_interrupt()); ++ ++ if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) { ++ unsigned long lflags; ++ ++ if (!lock_task_sighand(p, &lflags)) ++ return -ESRCH; ++ rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); ++ unlock_task_sighand(p, &lflags); ++ if (rlim_rtprio) ++ goto recheck; ++ /* ++ * If the caller requested an RT policy without having the ++ * necessary rights, we downgrade the policy to SCHED_ISO. ++ * We also set the parameter to zero to pass the checks. ++ */ ++ policy = SCHED_ISO; ++ priority = 0; ++ } ++recheck: ++ /* Double check policy once rq lock held */ ++ if (policy < 0) { ++ reset_on_fork = p->sched_reset_on_fork; ++ policy = oldpolicy = p->policy; ++ } else { ++ reset_on_fork = !!(policy & SCHED_RESET_ON_FORK); ++ policy &= ~SCHED_RESET_ON_FORK; ++ ++ if (!SCHED_RANGE(policy)) ++ return -EINVAL; ++ } ++ ++ if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) ++ return -EINVAL; ++ ++ /* ++ * Valid priorities for SCHED_FIFO and SCHED_RR are ++ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and ++ * SCHED_BATCH is 0. ++ */ ++ if (priority < 0 || ++ (p->mm && priority > MAX_USER_RT_PRIO - 1) || ++ (!p->mm && priority > MAX_RT_PRIO - 1)) ++ return -EINVAL; ++ if (is_rt_policy(policy) != (priority != 0)) ++ return -EINVAL; ++ ++ /* ++ * Allow unprivileged RT tasks to decrease priority: ++ */ ++ if (user && !capable(CAP_SYS_NICE)) { ++ if (is_rt_policy(policy)) { ++ unsigned long rlim_rtprio = ++ task_rlimit(p, RLIMIT_RTPRIO); ++ ++ /* Can't set/change the rt policy */ ++ if (policy != p->policy && !rlim_rtprio) ++ return -EPERM; ++ ++ /* Can't increase priority */ ++ if (priority > p->rt_priority && ++ priority > rlim_rtprio) ++ return -EPERM; ++ } else { ++ switch (p->policy) { ++ /* ++ * Can only downgrade policies but not back to ++ * SCHED_NORMAL ++ */ ++ case SCHED_ISO: ++ if (policy == SCHED_ISO) ++ goto out; ++ if (policy != SCHED_NORMAL) ++ return -EPERM; ++ break; ++ case SCHED_BATCH: ++ if (policy == SCHED_BATCH) ++ goto out; ++ if (policy != SCHED_IDLEPRIO) ++ return -EPERM; ++ break; ++ case SCHED_IDLEPRIO: ++ if (policy == SCHED_IDLEPRIO) ++ goto out; ++ return -EPERM; ++ default: ++ break; ++ } ++ } ++ ++ /* Can't change other user's priorities */ ++ if (!check_same_owner(p)) ++ return -EPERM; ++ ++ /* Normal users shall not reset the sched_reset_on_fork flag: */ ++ if (p->sched_reset_on_fork && !reset_on_fork) ++ return -EPERM; ++ } ++ ++ if (user) { ++ retval = security_task_setscheduler(p); ++ if (retval) ++ return retval; ++ } ++ ++ /* ++ * Make sure no PI-waiters arrive (or leave) while we are ++ * changing the priority of the task: ++ * ++ * To be able to change p->policy safely, the runqueue lock must be ++ * held. ++ */ ++ rq = task_rq_lock(p, &rf); ++ update_rq_clock(rq); ++ ++ /* ++ * Changing the policy of the stop threads its a very bad idea: ++ */ ++ if (p == rq->stop) { ++ task_rq_unlock(rq, p, &rf); ++ return -EINVAL; ++ } ++ ++ /* ++ * If not changing anything there's no need to proceed further: ++ */ ++ if (unlikely(policy == p->policy && (!is_rt_policy(policy) || ++ priority == p->rt_priority))) { ++ task_rq_unlock(rq, p, &rf); ++ return 0; ++ } ++ ++ /* Re-check policy now with rq lock held */ ++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { ++ policy = oldpolicy = -1; ++ task_rq_unlock(rq, p, &rf); ++ goto recheck; ++ } ++ p->sched_reset_on_fork = reset_on_fork; ++ ++ __setscheduler(p, rq, policy, priority, pi); ++ task_rq_unlock(rq, p, &rf); ++ ++ if (pi) ++ rt_mutex_adjust_pi(p); ++out: ++ return 0; ++} ++ ++static int _sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param, bool check) ++{ ++ struct sched_attr attr = { ++ .sched_policy = policy, ++ .sched_priority = param->sched_priority, ++ .sched_nice = PRIO_TO_NICE(p->static_prio), ++ }; ++ ++ return __sched_setscheduler(p, &attr, check, true); ++} ++/** ++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Return: 0 on success. An error code otherwise. ++ * ++ * NOTE that the task may be already dead. ++ */ ++int sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return _sched_setscheduler(p, policy, param, true); ++} ++ ++EXPORT_SYMBOL_GPL(sched_setscheduler); ++ ++int sched_setattr(struct task_struct *p, const struct sched_attr *attr) ++{ ++ return __sched_setscheduler(p, attr, true, true); ++} ++EXPORT_SYMBOL_GPL(sched_setattr); ++ ++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) ++{ ++ return __sched_setscheduler(p, attr, false, true); ++} ++ ++/** ++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Just like sched_setscheduler, only don't bother checking if the ++ * current context has permission. For example, this is needed in ++ * stop_machine(): we create temporary high priority worker threads, ++ * but our caller might not have that capability. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++int sched_setscheduler_nocheck(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return _sched_setscheduler(p, policy, param, false); ++} ++EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); ++ ++static int ++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) ++{ ++ struct sched_param lparam; ++ struct task_struct *p; ++ int retval; ++ ++ if (!param || pid < 0) ++ return -EINVAL; ++ if (copy_from_user(&lparam, param, sizeof(struct sched_param))) ++ return -EFAULT; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (p != NULL) ++ retval = sched_setscheduler(p, policy, &lparam); ++ rcu_read_unlock(); ++ ++ return retval; ++} ++ ++/* ++ * Mimics kernel/events/core.c perf_copy_attr(). ++ */ ++static int sched_copy_attr(struct sched_attr __user *uattr, ++ struct sched_attr *attr) ++{ ++ u32 size; ++ int ret; ++ ++ if (!access_ok(uattr, SCHED_ATTR_SIZE_VER0)) ++ return -EFAULT; ++ ++ /* Zero the full structure, so that a short copy will be nice: */ ++ memset(attr, 0, sizeof(*attr)); ++ ++ ret = get_user(size, &uattr->size); ++ if (ret) ++ return ret; ++ ++ /* Bail out on silly large: */ ++ if (size > PAGE_SIZE) ++ goto err_size; ++ ++ /* ABI compatibility quirk: */ ++ if (!size) ++ size = SCHED_ATTR_SIZE_VER0; ++ ++ if (size < SCHED_ATTR_SIZE_VER0) ++ goto err_size; ++ ++ /* ++ * If we're handed a bigger struct than we know of, ++ * ensure all the unknown bits are 0 - i.e. new ++ * user-space does not rely on any kernel feature ++ * extensions we dont know about yet. ++ */ ++ if (size > sizeof(*attr)) { ++ unsigned char __user *addr; ++ unsigned char __user *end; ++ unsigned char val; ++ ++ addr = (void __user *)uattr + sizeof(*attr); ++ end = (void __user *)uattr + size; ++ ++ for (; addr < end; addr++) { ++ ret = get_user(val, addr); ++ if (ret) ++ return ret; ++ if (val) ++ goto err_size; ++ } ++ size = sizeof(*attr); ++ } ++ ++ ret = copy_from_user(attr, uattr, size); ++ if (ret) ++ return -EFAULT; ++ ++ /* ++ * XXX: Do we want to be lenient like existing syscalls; or do we want ++ * to be strict and return an error on out-of-bounds values? ++ */ ++ attr->sched_nice = clamp(attr->sched_nice, -20, 19); ++ ++ /* sched/core.c uses zero here but we already know ret is zero */ ++ return 0; ++ ++err_size: ++ put_user(sizeof(*attr), &uattr->size); ++ return -E2BIG; ++} ++ ++/* ++ * sched_setparam() passes in -1 for its policy, to let the functions ++ * it calls know not to change it. ++ */ ++#define SETPARAM_POLICY -1 ++ ++/** ++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority ++ * @pid: the pid in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) ++{ ++ if (policy < 0) ++ return -EINVAL; ++ ++ return do_sched_setscheduler(pid, policy, param); ++} ++ ++/** ++ * sys_sched_setparam - set/change the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the new RT priority. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ return do_sched_setscheduler(pid, SETPARAM_POLICY, param); ++} ++ ++/** ++ * sys_sched_setattr - same as above, but with extended sched_attr ++ * @pid: the pid in question. ++ * @uattr: structure containing the extended parameters. ++ */ ++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, ++ unsigned int, flags) ++{ ++ struct sched_attr attr; ++ struct task_struct *p; ++ int retval; ++ ++ if (!uattr || pid < 0 || flags) ++ return -EINVAL; ++ ++ retval = sched_copy_attr(uattr, &attr); ++ if (retval) ++ return retval; ++ ++ if ((int)attr.sched_policy < 0) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (p != NULL) ++ retval = sched_setattr(p, &attr); ++ rcu_read_unlock(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread ++ * @pid: the pid in question. ++ * ++ * Return: On success, the policy of the thread. Otherwise, a negative error ++ * code. ++ */ ++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) ++{ ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (pid < 0) ++ goto out_nounlock; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (p) { ++ retval = security_task_getscheduler(p); ++ if (!retval) ++ retval = p->policy; ++ } ++ rcu_read_unlock(); ++ ++out_nounlock: ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the RT priority. ++ * ++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error ++ * code. ++ */ ++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ struct sched_param lp = { .sched_priority = 0 }; ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (!param || pid < 0) ++ goto out_nounlock; ++ ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ if (has_rt_policy(p)) ++ lp.sched_priority = p->rt_priority; ++ rcu_read_unlock(); ++ ++ /* ++ * This one might sleep, we cannot do it with a spinlock held ... ++ */ ++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; ++ ++out_nounlock: ++ return retval; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++static int sched_read_attr(struct sched_attr __user *uattr, ++ struct sched_attr *attr, ++ unsigned int usize) ++{ ++ int ret; ++ ++ if (!access_ok(uattr, usize)) ++ return -EFAULT; ++ ++ /* ++ * If we're handed a smaller struct than we know of, ++ * ensure all the unknown bits are 0 - i.e. old ++ * user-space does not get uncomplete information. ++ */ ++ if (usize < sizeof(*attr)) { ++ unsigned char *addr; ++ unsigned char *end; ++ ++ addr = (void *)attr + usize; ++ end = (void *)attr + sizeof(*attr); ++ ++ for (; addr < end; addr++) { ++ if (*addr) ++ return -EFBIG; ++ } ++ ++ attr->size = usize; ++ } ++ ++ ret = copy_to_user(uattr, attr, attr->size); ++ if (ret) ++ return -EFAULT; ++ ++ /* sched/core.c uses zero here but we already know ret is zero */ ++ return ret; ++} ++ ++/** ++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr ++ * @pid: the pid in question. ++ * @uattr: structure containing the extended parameters. ++ * @size: sizeof(attr) for fwd/bwd comp. ++ * @flags: for future extension. ++ */ ++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, ++ unsigned int, size, unsigned int, flags) ++{ ++ struct sched_attr attr = { ++ .size = sizeof(struct sched_attr), ++ }; ++ struct task_struct *p; ++ int retval; ++ ++ if (!uattr || pid < 0 || size > PAGE_SIZE || ++ size < SCHED_ATTR_SIZE_VER0 || flags) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ attr.sched_policy = p->policy; ++ if (rt_task(p)) ++ attr.sched_priority = p->rt_priority; ++ else ++ attr.sched_nice = task_nice(p); ++ ++ rcu_read_unlock(); ++ ++ retval = sched_read_attr(uattr, &attr, size); ++ return retval; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) ++{ ++ cpumask_var_t cpus_allowed, new_mask; ++ struct task_struct *p; ++ int retval; ++ ++ rcu_read_lock(); ++ ++ p = find_process_by_pid(pid); ++ if (!p) { ++ rcu_read_unlock(); ++ return -ESRCH; ++ } ++ ++ /* Prevent p going away */ ++ get_task_struct(p); ++ rcu_read_unlock(); ++ ++ if (p->flags & PF_NO_SETAFFINITY) { ++ retval = -EINVAL; ++ goto out_put_task; ++ } ++ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { ++ retval = -ENOMEM; ++ goto out_put_task; ++ } ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { ++ retval = -ENOMEM; ++ goto out_free_cpus_allowed; ++ } ++ retval = -EPERM; ++ if (!check_same_owner(p)) { ++ rcu_read_lock(); ++ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { ++ rcu_read_unlock(); ++ goto out_unlock; ++ } ++ rcu_read_unlock(); ++ } ++ ++ retval = security_task_setscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ cpuset_cpus_allowed(p, cpus_allowed); ++ cpumask_and(new_mask, in_mask, cpus_allowed); ++again: ++ retval = __set_cpus_allowed_ptr(p, new_mask, true); ++ ++ if (!retval) { ++ cpuset_cpus_allowed(p, cpus_allowed); ++ if (!cpumask_subset(new_mask, cpus_allowed)) { ++ /* ++ * We must have raced with a concurrent cpuset ++ * update. Just reset the cpus_allowed to the ++ * cpuset's cpus_allowed ++ */ ++ cpumask_copy(new_mask, cpus_allowed); ++ goto again; ++ } ++ } ++out_unlock: ++ free_cpumask_var(new_mask); ++out_free_cpus_allowed: ++ free_cpumask_var(cpus_allowed); ++out_put_task: ++ put_task_struct(p); ++ return retval; ++} ++ ++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, ++ cpumask_t *new_mask) ++{ ++ if (len < cpumask_size()) ++ cpumask_clear(new_mask); ++ else if (len > cpumask_size()) ++ len = cpumask_size(); ++ ++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; ++} ++ ++ ++/** ++ * sys_sched_setaffinity - set the CPU affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to the new CPU mask ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ cpumask_var_t new_mask; ++ int retval; ++ ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); ++ if (retval == 0) ++ retval = sched_setaffinity(pid, new_mask); ++ free_cpumask_var(new_mask); ++ return retval; ++} ++ ++long sched_getaffinity(pid_t pid, cpumask_t *mask) ++{ ++ struct task_struct *p; ++ unsigned long flags; ++ int retval; ++ ++ get_online_cpus(); ++ rcu_read_lock(); ++ ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++out_unlock: ++ rcu_read_unlock(); ++ put_online_cpus(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getaffinity - get the CPU affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to hold the current CPU mask ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ int ret; ++ cpumask_var_t mask; ++ ++ if ((len * BITS_PER_BYTE) < nr_cpu_ids) ++ return -EINVAL; ++ if (len & (sizeof(unsigned long)-1)) ++ return -EINVAL; ++ ++ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ ret = sched_getaffinity(pid, mask); ++ if (ret == 0) { ++ unsigned int retlen = min(len, cpumask_size()); ++ ++ if (copy_to_user(user_mask_ptr, mask, retlen)) ++ ret = -EFAULT; ++ else ++ ret = retlen; ++ } ++ free_cpumask_var(mask); ++ ++ return ret; ++} ++ ++/** ++ * sys_sched_yield - yield the current processor to other threads. ++ * ++ * This function yields the current CPU to other tasks. It does this by ++ * scheduling away the current task. If it still has the earliest deadline ++ * it will be scheduled again as the next task. ++ * ++ * Return: 0. ++ */ ++static void do_sched_yield(void) ++{ ++ struct rq *rq; ++ ++ if (!sched_yield_type) ++ return; ++ ++ local_irq_disable(); ++ rq = this_rq(); ++ rq_lock(rq); ++ ++ if (sched_yield_type > 1) ++ time_slice_expired(current, rq); ++ schedstat_inc(rq->yld_count); ++ ++ /* ++ * Since we are going to call schedule() anyway, there's ++ * no need to preempt or enable interrupts: ++ */ ++ preempt_disable(); ++ rq_unlock(rq); ++ sched_preempt_enable_no_resched(); ++ ++ schedule(); ++} ++ ++SYSCALL_DEFINE0(sched_yield) ++{ ++ do_sched_yield(); ++ return 0; ++} ++ ++#ifndef CONFIG_PREEMPT ++int __sched _cond_resched(void) ++{ ++ if (should_resched(0)) { ++ preempt_schedule_common(); ++ return 1; ++ } ++ rcu_all_qs(); ++ return 0; ++} ++EXPORT_SYMBOL(_cond_resched); ++#endif ++ ++/* ++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock, ++ * call schedule, and on return reacquire the lock. ++ * ++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level ++ * operations here to prevent schedule() from being called twice (once via ++ * spin_unlock(), once by hand). ++ */ ++int __cond_resched_lock(spinlock_t *lock) ++{ ++ int resched = should_resched(PREEMPT_LOCK_OFFSET); ++ int ret = 0; ++ ++ lockdep_assert_held(lock); ++ ++ if (spin_needbreak(lock) || resched) { ++ spin_unlock(lock); ++ if (resched) ++ preempt_schedule_common(); ++ else ++ cpu_relax(); ++ ret = 1; ++ spin_lock(lock); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(__cond_resched_lock); ++ ++/** ++ * yield - yield the current processor to other threads. ++ * ++ * Do not ever use this function, there's a 99% chance you're doing it wrong. ++ * ++ * The scheduler is at all times free to pick the calling task as the most ++ * eligible task to run, if removing the yield() call from your code breaks ++ * it, its already broken. ++ * ++ * Typical broken usage is: ++ * ++ * while (!event) ++ * yield(); ++ * ++ * where one assumes that yield() will let 'the other' process run that will ++ * make event true. If the current task is a SCHED_FIFO task that will never ++ * happen. Never use yield() as a progress guarantee!! ++ * ++ * If you want to use yield() to wait for something, use wait_event(). ++ * If you want to use yield() to be 'nice' for others, use cond_resched(). ++ * If you still want to use yield(), do not! ++ */ ++void __sched yield(void) ++{ ++ set_current_state(TASK_RUNNING); ++ do_sched_yield(); ++} ++EXPORT_SYMBOL(yield); ++ ++/** ++ * yield_to - yield the current processor to another thread in ++ * your thread group, or accelerate that thread toward the ++ * processor it's on. ++ * @p: target task ++ * @preempt: whether task preemption is allowed or not ++ * ++ * It's the caller's job to ensure that the target task struct ++ * can't go away on us before we can do any checks. ++ * ++ * Return: ++ * true (>0) if we indeed boosted the target task. ++ * false (0) if we failed to boost the target. ++ * -ESRCH if there's no task to yield to. ++ */ ++int __sched yield_to(struct task_struct *p, bool preempt) ++{ ++ struct task_struct *rq_p; ++ struct rq *rq, *p_rq; ++ unsigned long flags; ++ int yielded = 0; ++ ++ local_irq_save(flags); ++ rq = this_rq(); ++ ++again: ++ p_rq = task_rq(p); ++ /* ++ * If we're the only runnable task on the rq and target rq also ++ * has only one task, there's absolutely no point in yielding. ++ */ ++ if (task_running(p_rq, p) || p->state) { ++ yielded = -ESRCH; ++ goto out_irq; ++ } ++ ++ double_rq_lock(rq, p_rq); ++ if (unlikely(task_rq(p) != p_rq)) { ++ double_rq_unlock(rq, p_rq); ++ goto again; ++ } ++ ++ yielded = 1; ++ schedstat_inc(rq->yld_count); ++ rq_p = rq->curr; ++ if (p->deadline > rq_p->deadline) ++ p->deadline = rq_p->deadline; ++ p->time_slice += rq_p->time_slice; ++ if (p->time_slice > timeslice()) ++ p->time_slice = timeslice(); ++ time_slice_expired(rq_p, rq); ++ if (preempt && rq != p_rq) ++ resched_task(p_rq->curr); ++ double_rq_unlock(rq, p_rq); ++out_irq: ++ local_irq_restore(flags); ++ ++ if (yielded > 0) ++ schedule(); ++ return yielded; ++} ++EXPORT_SYMBOL_GPL(yield_to); ++ ++int io_schedule_prepare(void) ++{ ++ int old_iowait = current->in_iowait; ++ ++ current->in_iowait = 1; ++ blk_schedule_flush_plug(current); ++ ++ return old_iowait; ++} ++ ++void io_schedule_finish(int token) ++{ ++ current->in_iowait = token; ++} ++ ++/* ++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so ++ * that process accounting knows that this is a task in IO wait state. ++ * ++ * But don't do that if it is a deliberate, throttling IO wait (this task ++ * has set its backing_dev_info: the queue against which it should throttle) ++ */ ++ ++long __sched io_schedule_timeout(long timeout) ++{ ++ int token; ++ long ret; ++ ++ token = io_schedule_prepare(); ++ ret = schedule_timeout(timeout); ++ io_schedule_finish(token); ++ ++ return ret; ++} ++EXPORT_SYMBOL(io_schedule_timeout); ++ ++void io_schedule(void) ++{ ++ int token; ++ ++ token = io_schedule_prepare(); ++ schedule(); ++ io_schedule_finish(token); ++} ++EXPORT_SYMBOL(io_schedule); ++ ++/** ++ * sys_sched_get_priority_max - return maximum RT priority. ++ * @policy: scheduling class. ++ * ++ * Return: On success, this syscall returns the maximum ++ * rt_priority that can be used by a given scheduling class. ++ * On failure, a negative error code is returned. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_max, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = MAX_USER_RT_PRIO-1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_ISO: ++ case SCHED_IDLEPRIO: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * sys_sched_get_priority_min - return minimum RT priority. ++ * @policy: scheduling class. ++ * ++ * Return: On success, this syscall returns the minimum ++ * rt_priority that can be used by a given scheduling class. ++ * On failure, a negative error code is returned. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_min, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = 1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_ISO: ++ case SCHED_IDLEPRIO: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) ++{ ++ struct task_struct *p; ++ unsigned int time_slice; ++ struct rq_flags rf; ++ struct rq *rq; ++ int retval; ++ ++ if (pid < 0) ++ return -EINVAL; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ rq = task_rq_lock(p, &rf); ++ time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p)); ++ task_rq_unlock(rq, p, &rf); ++ ++ rcu_read_unlock(); ++ *t = ns_to_timespec64(time_slice); ++ return 0; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++/** ++ * sys_sched_rr_get_interval - return the default timeslice of a process. ++ * @pid: pid of the process. ++ * @interval: userspace pointer to the timeslice value. ++ * ++ * this syscall writes the default timeslice value of a given process ++ * into the user-space timespec buffer. A value of '0' means infinity. ++ * ++ * Return: On success, 0 and the timeslice is in @interval. Otherwise, ++ * an error code. ++ */ ++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, ++ struct __kernel_timespec __user *, interval) ++{ ++ struct timespec64 t; ++ int retval = sched_rr_get_interval(pid, &t); ++ ++ if (retval == 0) ++ retval = put_timespec64(&t, interval); ++ ++ return retval; ++} ++ ++#ifdef CONFIG_COMPAT_32BIT_TIME ++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, ++ struct old_timespec32 __user *, interval) ++{ ++ struct timespec64 t; ++ int retval = sched_rr_get_interval(pid, &t); ++ ++ if (retval == 0) ++ retval = put_old_timespec32(&t, interval); ++ return retval; ++} ++#endif ++ ++void sched_show_task(struct task_struct *p) ++{ ++ unsigned long free = 0; ++ int ppid; ++ ++ if (!try_get_task_stack(p)) ++ return; ++ ++ printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p)); ++ ++ if (p->state == TASK_RUNNING) ++ printk(KERN_CONT " running task "); ++#ifdef CONFIG_DEBUG_STACK_USAGE ++ free = stack_not_used(p); ++#endif ++ ppid = 0; ++ rcu_read_lock(); ++ if (pid_alive(p)) ++ ppid = task_pid_nr(rcu_dereference(p->real_parent)); ++ rcu_read_unlock(); ++ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, ++ task_pid_nr(p), ppid, ++ (unsigned long)task_thread_info(p)->flags); ++ ++ print_worker_info(KERN_INFO, p); ++ show_stack(p, NULL); ++ put_task_stack(p); ++} ++EXPORT_SYMBOL_GPL(sched_show_task); ++ ++static inline bool ++state_filter_match(unsigned long state_filter, struct task_struct *p) ++{ ++ /* no filter, everything matches */ ++ if (!state_filter) ++ return true; ++ ++ /* filter, but doesn't match */ ++ if (!(p->state & state_filter)) ++ return false; ++ ++ /* ++ * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows ++ * TASK_KILLABLE). ++ */ ++ if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) ++ return false; ++ ++ return true; ++} ++ ++void show_state_filter(unsigned long state_filter) ++{ ++ struct task_struct *g, *p; ++ ++#if BITS_PER_LONG == 32 ++ printk(KERN_INFO ++ " task PC stack pid father\n"); ++#else ++ printk(KERN_INFO ++ " task PC stack pid father\n"); ++#endif ++ rcu_read_lock(); ++ for_each_process_thread(g, p) { ++ /* ++ * reset the NMI-timeout, listing all files on a slow ++ * console might take a lot of time: ++ * Also, reset softlockup watchdogs on all CPUs, because ++ * another CPU might be blocked waiting for us to process ++ * an IPI. ++ */ ++ touch_nmi_watchdog(); ++ touch_all_softlockup_watchdogs(); ++ if (state_filter_match(state_filter, p)) ++ sched_show_task(p); ++ } ++ ++ rcu_read_unlock(); ++ /* ++ * Only show locks if all tasks are dumped: ++ */ ++ if (!state_filter) ++ debug_show_all_locks(); ++} ++ ++void dump_cpu_task(int cpu) ++{ ++ pr_info("Task dump for CPU %d:\n", cpu); ++ sched_show_task(cpu_curr(cpu)); ++} ++ ++#ifdef CONFIG_SMP ++void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ cpumask_copy(&p->cpus_allowed, new_mask); ++ p->nr_cpus_allowed = cpumask_weight(new_mask); ++} ++ ++void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ struct rq *rq = task_rq(p); ++ ++ lockdep_assert_held(&p->pi_lock); ++ ++ cpumask_copy(&p->cpus_allowed, new_mask); ++ ++ if (task_queued(p)) { ++ /* ++ * Because __kthread_bind() calls this on blocked tasks without ++ * holding rq->lock. ++ */ ++ lockdep_assert_held(rq->lock); ++ } ++} ++ ++/* ++ * Calling do_set_cpus_allowed from outside the scheduler code should not be ++ * called on a running or queued task. We should be holding pi_lock. ++ */ ++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ __do_set_cpus_allowed(p, new_mask); ++ if (needs_other_cpu(p, task_cpu(p))) { ++ struct rq *rq; ++ ++ rq = __task_rq_lock(p, NULL); ++ set_task_cpu(p, valid_task_cpu(p)); ++ resched_task(p); ++ __task_rq_unlock(rq, NULL); ++ } ++} ++#endif ++ ++/** ++ * init_idle - set up an idle thread for a given CPU ++ * @idle: task in question ++ * @cpu: cpu the idle task belongs to ++ * ++ * NOTE: this function does not set the idle thread's NEED_RESCHED ++ * flag, to make booting more robust. ++ */ ++void init_idle(struct task_struct *idle, int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&idle->pi_lock, flags); ++ raw_spin_lock(rq->lock); ++ idle->last_ran = rq->niffies; ++ time_slice_expired(idle, rq); ++ idle->state = TASK_RUNNING; ++ /* Setting prio to illegal value shouldn't matter when never queued */ ++ idle->prio = PRIO_LIMIT; ++ ++ kasan_unpoison_task_stack(idle); ++ ++#ifdef CONFIG_SMP ++ /* ++ * It's possible that init_idle() gets called multiple times on a task, ++ * in that case do_set_cpus_allowed() will not do the right thing. ++ * ++ * And since this is boot we can forgo the serialisation. ++ */ ++ set_cpus_allowed_common(idle, cpumask_of(cpu)); ++#ifdef CONFIG_SMT_NICE ++ idle->smt_bias = 0; ++#endif ++#endif ++ set_rq_task(rq, idle); ++ ++ /* Silence PROVE_RCU */ ++ rcu_read_lock(); ++ set_task_cpu(idle, cpu); ++ rcu_read_unlock(); ++ ++ rq->curr = rq->idle = idle; ++ idle->on_rq = TASK_ON_RQ_QUEUED; ++ raw_spin_unlock(rq->lock); ++ raw_spin_unlock_irqrestore(&idle->pi_lock, flags); ++ ++ /* Set the preempt count _outside_ the spinlocks! */ ++ init_idle_preempt_count(idle, cpu); ++ ++ ftrace_graph_init_idle_task(idle, cpu); ++ vtime_init_idle(idle, cpu); ++#ifdef CONFIG_SMP ++ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); ++#endif ++} ++ ++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur, ++ const struct cpumask __maybe_unused *trial) ++{ ++ return 1; ++} ++ ++int task_can_attach(struct task_struct *p, ++ const struct cpumask *cs_cpus_allowed) ++{ ++ int ret = 0; ++ ++ /* ++ * Kthreads which disallow setaffinity shouldn't be moved ++ * to a new cpuset; we don't want to change their CPU ++ * affinity and isolating such threads by their set of ++ * allowed nodes is unnecessary. Thus, cpusets are not ++ * applicable for such threads. This prevents checking for ++ * success of set_cpus_allowed_ptr() on all attached tasks ++ * before cpus_allowed may be changed. ++ */ ++ if (p->flags & PF_NO_SETAFFINITY) ++ ret = -EINVAL; ++ ++ return ret; ++} ++ ++void resched_cpu(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ struct rq_flags rf; ++ ++ rq_lock_irqsave(rq, &rf); ++ if (cpu_online(cpu) || cpu == smp_processor_id()) ++ resched_curr(rq); ++ rq_unlock_irqrestore(rq, &rf); ++} ++ ++#ifdef CONFIG_SMP ++#ifdef CONFIG_NO_HZ_COMMON ++void nohz_balance_enter_idle(int cpu) ++{ ++} ++ ++void select_nohz_load_balancer(int stop_tick) ++{ ++} ++ ++void set_cpu_sd_state_idle(void) {} ++ ++/* ++ * In the semi idle case, use the nearest busy CPU for migrating timers ++ * from an idle CPU. This is good for power-savings. ++ * ++ * We don't do similar optimization for completely idle system, as ++ * selecting an idle CPU will add more delays to the timers than intended ++ * (as that CPU's timer base may not be uptodate wrt jiffies etc). ++ */ ++int get_nohz_timer_target(void) ++{ ++ int i, cpu = smp_processor_id(); ++ struct sched_domain *sd; ++ ++ if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER)) ++ return cpu; ++ ++ rcu_read_lock(); ++ for_each_domain(cpu, sd) { ++ for_each_cpu(i, sched_domain_span(sd)) { ++ if (cpu == i) ++ continue; ++ ++ if (!idle_cpu(i) && housekeeping_cpu(i, HK_FLAG_TIMER)) { ++ cpu = i; ++ cpu = i; ++ goto unlock; ++ } ++ } ++ } ++ ++ if (!housekeeping_cpu(cpu, HK_FLAG_TIMER)) ++ cpu = housekeeping_any_cpu(HK_FLAG_TIMER); ++unlock: ++ rcu_read_unlock(); ++ return cpu; ++} ++ ++/* ++ * When add_timer_on() enqueues a timer into the timer wheel of an ++ * idle CPU then this timer might expire before the next timer event ++ * which is scheduled to wake up that CPU. In case of a completely ++ * idle system the next event might even be infinite time into the ++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and ++ * leaves the inner idle loop so the newly added timer is taken into ++ * account when the CPU goes back to idle and evaluates the timer ++ * wheel for the next timer event. ++ */ ++void wake_up_idle_cpu(int cpu) ++{ ++ if (cpu == smp_processor_id()) ++ return; ++ ++ if (set_nr_and_not_polling(cpu_rq(cpu)->idle)) ++ smp_sched_reschedule(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++static bool wake_up_full_nohz_cpu(int cpu) ++{ ++ /* ++ * We just need the target to call irq_exit() and re-evaluate ++ * the next tick. The nohz full kick at least implies that. ++ * If needed we can still optimize that later with an ++ * empty IRQ. ++ */ ++ if (cpu_is_offline(cpu)) ++ return true; /* Don't try to wake offline CPUs. */ ++ if (tick_nohz_full_cpu(cpu)) { ++ if (cpu != smp_processor_id() || ++ tick_nohz_tick_stopped()) ++ tick_nohz_full_kick_cpu(cpu); ++ return true; ++ } ++ ++ return false; ++} ++ ++/* ++ * Wake up the specified CPU. If the CPU is going offline, it is the ++ * caller's responsibility to deal with the lost wakeup, for example, ++ * by hooking into the CPU_DEAD notifier like timers and hrtimers do. ++ */ ++void wake_up_nohz_cpu(int cpu) ++{ ++ if (!wake_up_full_nohz_cpu(cpu)) ++ wake_up_idle_cpu(cpu); ++} ++#endif /* CONFIG_NO_HZ_COMMON */ ++ ++/* ++ * Change a given task's CPU affinity. Migrate the thread to a ++ * proper CPU and schedule it away if the CPU it's executing on ++ * is removed from the allowed bitmask. ++ * ++ * NOTE: the caller must have a valid reference to the task, the ++ * task must not exit() & deallocate itself prematurely. The ++ * call is not atomic; no spinlocks may be held. ++ */ ++static int __set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, bool check) ++{ ++ const struct cpumask *cpu_valid_mask = cpu_active_mask; ++ bool queued = false, running_wrong = false, kthread; ++ struct cpumask old_mask; ++ struct rq_flags rf; ++ int cpu, ret = 0; ++ struct rq *rq; ++ ++ rq = task_rq_lock(p, &rf); ++ update_rq_clock(rq); ++ ++ kthread = !!(p->flags & PF_KTHREAD); ++ if (kthread) { ++ /* ++ * Kernel threads are allowed on online && !active CPUs ++ */ ++ cpu_valid_mask = cpu_online_mask; ++ } ++ ++ /* ++ * Must re-check here, to close a race against __kthread_bind(), ++ * sched_setaffinity() is not guaranteed to observe the flag. ++ */ ++ if (check && (p->flags & PF_NO_SETAFFINITY)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ cpumask_copy(&old_mask, &p->cpus_allowed); ++ if (cpumask_equal(&old_mask, new_mask)) ++ goto out; ++ ++ if (!cpumask_intersects(new_mask, cpu_valid_mask)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ queued = task_queued(p); ++ __do_set_cpus_allowed(p, new_mask); ++ ++ if (kthread) { ++ /* ++ * For kernel threads that do indeed end up on online && ++ * !active we want to ensure they are strict per-CPU threads. ++ */ ++ WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && ++ !cpumask_intersects(new_mask, cpu_active_mask) && ++ p->nr_cpus_allowed != 1); ++ } ++ ++ /* Can the task run on the task's current CPU? If so, we're done */ ++ if (cpumask_test_cpu(task_cpu(p), new_mask)) ++ goto out; ++ ++ if (task_running(rq, p)) { ++ /* Task is running on the wrong cpu now, reschedule it. */ ++ if (rq == this_rq()) { ++ cpu = cpumask_any_and(cpu_valid_mask, new_mask); ++ set_task_cpu(p, cpu); ++ set_tsk_need_resched(p); ++ running_wrong = true; ++ } else ++ resched_task(p); ++ } else { ++ cpu = cpumask_any_and(cpu_valid_mask, new_mask); ++ if (queued) { ++ /* ++ * Switch runqueue locks after dequeueing the task ++ * here while still holding the pi_lock to be holding ++ * the correct lock for enqueueing. ++ */ ++ dequeue_task(rq, p, 0); ++ rq_unlock(rq); ++ ++ rq = cpu_rq(cpu); ++ rq_lock(rq); ++ } ++ set_task_cpu(p, cpu); ++ if (queued) ++ enqueue_task(rq, p, 0); ++ } ++ if (queued) ++ try_preempt(p, rq); ++ if (running_wrong) ++ preempt_disable(); ++out: ++ task_rq_unlock(rq, p, &rf); ++ ++ if (running_wrong) { ++ __schedule(true); ++ preempt_enable(); ++ } ++ ++ return ret; ++} ++ ++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ return __set_cpus_allowed_ptr(p, new_mask, false); ++} ++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); ++ ++#ifdef CONFIG_HOTPLUG_CPU ++/* ++ * Run through task list and find tasks affined to the dead cpu, then remove ++ * that cpu from the list, enable cpu0 and set the zerobound flag. Must hold ++ * cpu 0 and src_cpu's runqueue locks. ++ */ ++static void bind_zero(int src_cpu) ++{ ++ struct task_struct *p, *t; ++ struct rq *rq0; ++ int bound = 0; ++ ++ if (src_cpu == 0) ++ return; ++ ++ rq0 = cpu_rq(0); ++ ++ do_each_thread(t, p) { ++ if (cpumask_test_cpu(src_cpu, &p->cpus_allowed)) { ++ bool local = (task_cpu(p) == src_cpu); ++ struct rq *rq = task_rq(p); ++ ++ /* task_running is the cpu stopper thread */ ++ if (local && task_running(rq, p)) ++ continue; ++ atomic_clear_cpu(src_cpu, &p->cpus_allowed); ++ atomic_set_cpu(0, &p->cpus_allowed); ++ p->zerobound = true; ++ bound++; ++ if (local) { ++ bool queued = task_queued(p); ++ ++ if (queued) ++ dequeue_task(rq, p, 0); ++ set_task_cpu(p, 0); ++ if (queued) ++ enqueue_task(rq0, p, 0); ++ } ++ } ++ } while_each_thread(t, p); ++ ++ if (bound) { ++ printk(KERN_INFO "Removed affinity for %d processes to cpu %d\n", ++ bound, src_cpu); ++ } ++} ++ ++/* Find processes with the zerobound flag and reenable their affinity for the ++ * CPU coming alive. */ ++static void unbind_zero(int src_cpu) ++{ ++ int unbound = 0, zerobound = 0; ++ struct task_struct *p, *t; ++ ++ if (src_cpu == 0) ++ return; ++ ++ do_each_thread(t, p) { ++ if (!p->mm) ++ p->zerobound = false; ++ if (p->zerobound) { ++ unbound++; ++ cpumask_set_cpu(src_cpu, &p->cpus_allowed); ++ /* Once every CPU affinity has been re-enabled, remove ++ * the zerobound flag */ ++ if (cpumask_subset(cpu_possible_mask, &p->cpus_allowed)) { ++ p->zerobound = false; ++ zerobound++; ++ } ++ } ++ } while_each_thread(t, p); ++ ++ if (unbound) { ++ printk(KERN_INFO "Added affinity for %d processes to cpu %d\n", ++ unbound, src_cpu); ++ } ++ if (zerobound) { ++ printk(KERN_INFO "Released forced binding to cpu0 for %d processes\n", ++ zerobound); ++ } ++} ++ ++/* ++ * Ensure that the idle task is using init_mm right before its cpu goes ++ * offline. ++ */ ++void idle_task_exit(void) ++{ ++ struct mm_struct *mm = current->active_mm; ++ ++ BUG_ON(cpu_online(smp_processor_id())); ++ ++ if (mm != &init_mm) { ++ switch_mm(mm, &init_mm, current); ++ current->active_mm = &init_mm; ++ finish_arch_post_lock_switch(); ++ } ++ mmdrop(mm); ++} ++#else /* CONFIG_HOTPLUG_CPU */ ++static void unbind_zero(int src_cpu) {} ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++void sched_set_stop_task(int cpu, struct task_struct *stop) ++{ ++ struct sched_param stop_param = { .sched_priority = STOP_PRIO }; ++ struct sched_param start_param = { .sched_priority = 0 }; ++ struct task_struct *old_stop = cpu_rq(cpu)->stop; ++ ++ if (stop) { ++ /* ++ * Make it appear like a SCHED_FIFO task, its something ++ * userspace knows about and won't get confused about. ++ * ++ * Also, it will make PI more or less work without too ++ * much confusion -- but then, stop work should not ++ * rely on PI working anyway. ++ */ ++ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param); ++ } ++ ++ cpu_rq(cpu)->stop = stop; ++ ++ if (old_stop) { ++ /* ++ * Reset it back to a normal scheduling policy so that ++ * it can die in pieces. ++ */ ++ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param); ++ } ++} ++ ++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) ++ ++static struct ctl_table sd_ctl_dir[] = { ++ { ++ .procname = "sched_domain", ++ .mode = 0555, ++ }, ++ {} ++}; ++ ++static struct ctl_table sd_ctl_root[] = { ++ { ++ .procname = "kernel", ++ .mode = 0555, ++ .child = sd_ctl_dir, ++ }, ++ {} ++}; ++ ++static struct ctl_table *sd_alloc_ctl_entry(int n) ++{ ++ struct ctl_table *entry = ++ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); ++ ++ return entry; ++} ++ ++static void sd_free_ctl_entry(struct ctl_table **tablep) ++{ ++ struct ctl_table *entry; ++ ++ /* ++ * In the intermediate directories, both the child directory and ++ * procname are dynamically allocated and could fail but the mode ++ * will always be set. In the lowest directory the names are ++ * static strings and all have proc handlers. ++ */ ++ for (entry = *tablep; entry->mode; entry++) { ++ if (entry->child) ++ sd_free_ctl_entry(&entry->child); ++ if (entry->proc_handler == NULL) ++ kfree(entry->procname); ++ } ++ ++ kfree(*tablep); ++ *tablep = NULL; ++} ++ ++#define CPU_LOAD_IDX_MAX 5 ++static int min_load_idx = 0; ++static int max_load_idx = CPU_LOAD_IDX_MAX-1; ++ ++static void ++set_table_entry(struct ctl_table *entry, ++ const char *procname, void *data, int maxlen, ++ umode_t mode, proc_handler *proc_handler, ++ bool load_idx) ++{ ++ entry->procname = procname; ++ entry->data = data; ++ entry->maxlen = maxlen; ++ entry->mode = mode; ++ entry->proc_handler = proc_handler; ++ ++ if (load_idx) { ++ entry->extra1 = &min_load_idx; ++ entry->extra2 = &max_load_idx; ++ } ++} ++ ++static struct ctl_table * ++sd_alloc_ctl_domain_table(struct sched_domain *sd) ++{ ++ struct ctl_table *table = sd_alloc_ctl_entry(14); ++ ++ if (table == NULL) ++ return NULL; ++ ++ set_table_entry(&table[0], "min_interval", &sd->min_interval, ++ sizeof(long), 0644, proc_doulongvec_minmax, false); ++ set_table_entry(&table[1], "max_interval", &sd->max_interval, ++ sizeof(long), 0644, proc_doulongvec_minmax, false); ++ set_table_entry(&table[2], "busy_idx", &sd->busy_idx, ++ sizeof(int), 0644, proc_dointvec_minmax, true); ++ set_table_entry(&table[3], "idle_idx", &sd->idle_idx, ++ sizeof(int), 0644, proc_dointvec_minmax, true); ++ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, ++ sizeof(int), 0644, proc_dointvec_minmax, true); ++ set_table_entry(&table[5], "wake_idx", &sd->wake_idx, ++ sizeof(int), 0644, proc_dointvec_minmax, true); ++ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, ++ sizeof(int), 0644, proc_dointvec_minmax, true); ++ set_table_entry(&table[7], "busy_factor", &sd->busy_factor, ++ sizeof(int), 0644, proc_dointvec_minmax, false); ++ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, ++ sizeof(int), 0644, proc_dointvec_minmax, false); ++ set_table_entry(&table[9], "cache_nice_tries", ++ &sd->cache_nice_tries, ++ sizeof(int), 0644, proc_dointvec_minmax, false); ++ set_table_entry(&table[10], "flags", &sd->flags, ++ sizeof(int), 0644, proc_dointvec_minmax, false); ++ set_table_entry(&table[11], "max_newidle_lb_cost", ++ &sd->max_newidle_lb_cost, ++ sizeof(long), 0644, proc_doulongvec_minmax, false); ++ set_table_entry(&table[12], "name", sd->name, ++ CORENAME_MAX_SIZE, 0444, proc_dostring, false); ++ /* &table[13] is terminator */ ++ ++ return table; ++} ++ ++static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) ++{ ++ struct ctl_table *entry, *table; ++ struct sched_domain *sd; ++ int domain_num = 0, i; ++ char buf[32]; ++ ++ for_each_domain(cpu, sd) ++ domain_num++; ++ entry = table = sd_alloc_ctl_entry(domain_num + 1); ++ if (table == NULL) ++ return NULL; ++ ++ i = 0; ++ for_each_domain(cpu, sd) { ++ snprintf(buf, 32, "domain%d", i); ++ entry->procname = kstrdup(buf, GFP_KERNEL); ++ entry->mode = 0555; ++ entry->child = sd_alloc_ctl_domain_table(sd); ++ entry++; ++ i++; ++ } ++ return table; ++} ++ ++static cpumask_var_t sd_sysctl_cpus; ++static struct ctl_table_header *sd_sysctl_header; ++ ++void register_sched_domain_sysctl(void) ++{ ++ static struct ctl_table *cpu_entries; ++ static struct ctl_table **cpu_idx; ++ char buf[32]; ++ int i; ++ ++ if (!cpu_entries) { ++ cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1); ++ if (!cpu_entries) ++ return; ++ ++ WARN_ON(sd_ctl_dir[0].child); ++ sd_ctl_dir[0].child = cpu_entries; ++ } ++ ++ if (!cpu_idx) { ++ struct ctl_table *e = cpu_entries; ++ ++ cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL); ++ if (!cpu_idx) ++ return; ++ ++ /* deal with sparse possible map */ ++ for_each_possible_cpu(i) { ++ cpu_idx[i] = e; ++ e++; ++ } ++ } ++ ++ if (!cpumask_available(sd_sysctl_cpus)) { ++ if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL)) ++ return; ++ ++ /* init to possible to not have holes in @cpu_entries */ ++ cpumask_copy(sd_sysctl_cpus, cpu_possible_mask); ++ } ++ ++ for_each_cpu(i, sd_sysctl_cpus) { ++ struct ctl_table *e = cpu_idx[i]; ++ ++ if (e->child) ++ sd_free_ctl_entry(&e->child); ++ ++ if (!e->procname) { ++ snprintf(buf, 32, "cpu%d", i); ++ e->procname = kstrdup(buf, GFP_KERNEL); ++ } ++ e->mode = 0555; ++ e->child = sd_alloc_ctl_cpu_table(i); ++ ++ __cpumask_clear_cpu(i, sd_sysctl_cpus); ++ } ++ ++ WARN_ON(sd_sysctl_header); ++ sd_sysctl_header = register_sysctl_table(sd_ctl_root); ++} ++ ++void dirty_sched_domain_sysctl(int cpu) ++{ ++ if (cpumask_available(sd_sysctl_cpus)) ++ __cpumask_set_cpu(cpu, sd_sysctl_cpus); ++} ++ ++/* may be called multiple times per register */ ++void unregister_sched_domain_sysctl(void) ++{ ++ unregister_sysctl_table(sd_sysctl_header); ++ sd_sysctl_header = NULL; ++} ++#endif /* CONFIG_SYSCTL */ ++ ++void set_rq_online(struct rq *rq) ++{ ++ if (!rq->online) { ++ cpumask_set_cpu(cpu_of(rq), rq->rd->online); ++ rq->online = true; ++ } ++} ++ ++void set_rq_offline(struct rq *rq) ++{ ++ if (rq->online) { ++ int cpu = cpu_of(rq); ++ ++ cpumask_clear_cpu(cpu, rq->rd->online); ++ rq->online = false; ++ clear_cpuidle_map(cpu); ++ } ++} ++ ++/* ++ * used to mark begin/end of suspend/resume: ++ */ ++static int num_cpus_frozen; ++ ++/* ++ * Update cpusets according to cpu_active mask. If cpusets are ++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper ++ * around partition_sched_domains(). ++ * ++ * If we come here as part of a suspend/resume, don't touch cpusets because we ++ * want to restore it back to its original state upon resume anyway. ++ */ ++static void cpuset_cpu_active(void) ++{ ++ if (cpuhp_tasks_frozen) { ++ /* ++ * num_cpus_frozen tracks how many CPUs are involved in suspend ++ * resume sequence. As long as this is not the last online ++ * operation in the resume sequence, just build a single sched ++ * domain, ignoring cpusets. ++ */ ++ partition_sched_domains(1, NULL, NULL); ++ if (--num_cpus_frozen) ++ return; ++ /* ++ * This is the last CPU online operation. So fall through and ++ * restore the original sched domains by considering the ++ * cpuset configurations. ++ */ ++ cpuset_force_rebuild(); ++ } ++ ++ cpuset_update_active_cpus(); ++} ++ ++static int cpuset_cpu_inactive(unsigned int cpu) ++{ ++ if (!cpuhp_tasks_frozen) { ++ cpuset_update_active_cpus(); ++ } else { ++ num_cpus_frozen++; ++ partition_sched_domains(1, NULL, NULL); ++ } ++ return 0; ++} ++ ++int sched_cpu_activate(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ struct rq_flags rf; ++ ++ set_cpu_active(cpu, true); ++ ++ if (sched_smp_initialized) { ++ sched_domains_numa_masks_set(cpu); ++ cpuset_cpu_active(); ++ } ++ ++ /* ++ * Put the rq online, if not already. This happens: ++ * ++ * 1) In the early boot process, because we build the real domains ++ * after all CPUs have been brought up. ++ * ++ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the ++ * domains. ++ */ ++ rq_lock_irqsave(rq, &rf); ++ if (rq->rd) { ++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); ++ set_rq_online(rq); ++ } ++ unbind_zero(cpu); ++ rq_unlock_irqrestore(rq, &rf); ++ ++ return 0; ++} ++ ++int sched_cpu_deactivate(unsigned int cpu) ++{ ++ int ret; ++ ++ set_cpu_active(cpu, false); ++ /* ++ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU ++ * users of this state to go away such that all new such users will ++ * observe it. ++ * ++ * Do sync before park smpboot threads to take care the rcu boost case. ++ */ ++ synchronize_rcu(); ++ ++ if (!sched_smp_initialized) ++ return 0; ++ ++ ret = cpuset_cpu_inactive(cpu); ++ if (ret) { ++ set_cpu_active(cpu, true); ++ return ret; ++ } ++ sched_domains_numa_masks_clear(cpu); ++ return 0; ++} ++ ++int sched_cpu_starting(unsigned int cpu) ++{ ++ sched_tick_start(cpu); ++ return 0; ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++int sched_cpu_dying(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ /* Handle pending wakeups and then migrate everything off */ ++ sched_ttwu_pending(); ++ sched_tick_stop(cpu); ++ ++ local_irq_save(flags); ++ double_rq_lock(rq, cpu_rq(0)); ++ if (rq->rd) { ++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); ++ set_rq_offline(rq); ++ } ++ bind_zero(cpu); ++ double_rq_unlock(rq, cpu_rq(0)); ++ sched_start_tick(rq, cpu); ++ hrexpiry_clear(rq); ++ local_irq_restore(flags); ++ ++ return 0; ++} ++#endif ++ ++#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC) ++/* ++ * Cheaper version of the below functions in case support for SMT and MC is ++ * compiled in but CPUs have no siblings. ++ */ ++static bool sole_cpu_idle(struct rq *rq) ++{ ++ return rq_idle(rq); ++} ++#endif ++#ifdef CONFIG_SCHED_SMT ++static const cpumask_t *thread_cpumask(int cpu) ++{ ++ return topology_sibling_cpumask(cpu); ++} ++/* All this CPU's SMT siblings are idle */ ++static bool siblings_cpu_idle(struct rq *rq) ++{ ++ return cpumask_subset(&rq->thread_mask, &cpu_idle_map); ++} ++#endif ++#ifdef CONFIG_SCHED_MC ++static const cpumask_t *core_cpumask(int cpu) ++{ ++ return topology_core_cpumask(cpu); ++} ++/* All this CPU's shared cache siblings are idle */ ++static bool cache_cpu_idle(struct rq *rq) ++{ ++ return cpumask_subset(&rq->core_mask, &cpu_idle_map); ++} ++#endif ++ ++enum sched_domain_level { ++ SD_LV_NONE = 0, ++ SD_LV_SIBLING, ++ SD_LV_MC, ++ SD_LV_BOOK, ++ SD_LV_CPU, ++ SD_LV_NODE, ++ SD_LV_ALLNODES, ++ SD_LV_MAX ++}; ++ ++void __init sched_init_smp(void) ++{ ++ struct rq *rq, *other_rq, *leader = cpu_rq(0); ++ struct sched_domain *sd; ++ int cpu, other_cpu, i; ++#ifdef CONFIG_SCHED_SMT ++ bool smt_threads = false; ++#endif ++ sched_init_numa(); ++ ++ /* ++ * There's no userspace yet to cause hotplug operations; hence all the ++ * cpu masks are stable and all blatant races in the below code cannot ++ * happen. ++ */ ++ mutex_lock(&sched_domains_mutex); ++ sched_init_domains(cpu_active_mask); ++ mutex_unlock(&sched_domains_mutex); ++ ++ /* Move init over to a non-isolated CPU */ ++ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) ++ BUG(); ++ ++ local_irq_disable(); ++ mutex_lock(&sched_domains_mutex); ++ lock_all_rqs(); ++ /* ++ * Set up the relative cache distance of each online cpu from each ++ * other in a simple array for quick lookup. Locality is determined ++ * by the closest sched_domain that CPUs are separated by. CPUs with ++ * shared cache in SMT and MC are treated as local. Separate CPUs ++ * (within the same package or physically) within the same node are ++ * treated as not local. CPUs not even in the same domain (different ++ * nodes) are treated as very distant. ++ */ ++ for_each_online_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ ++ /* First check if this cpu is in the same node */ ++ for_each_domain(cpu, sd) { ++ if (sd->level > SD_LV_MC) ++ continue; ++ if (rqshare != RQSHARE_ALL) ++ leader = NULL; ++ /* Set locality to local node if not already found lower */ ++ for_each_cpu(other_cpu, sched_domain_span(sd)) { ++ if (rqshare >= RQSHARE_SMP) { ++ other_rq = cpu_rq(other_cpu); ++ ++ /* Set the smp_leader to the first CPU */ ++ if (!leader) ++ leader = rq; ++ other_rq->smp_leader = leader; ++ } ++ ++ if (rq->cpu_locality[other_cpu] > 3) ++ rq->cpu_locality[other_cpu] = 3; ++ } ++ } ++ ++ /* ++ * Each runqueue has its own function in case it doesn't have ++ * siblings of its own allowing mixed topologies. ++ */ ++#ifdef CONFIG_SCHED_MC ++ leader = NULL; ++ if (cpumask_weight(core_cpumask(cpu)) > 1) { ++ cpumask_copy(&rq->core_mask, core_cpumask(cpu)); ++ cpumask_clear_cpu(cpu, &rq->core_mask); ++ for_each_cpu(other_cpu, core_cpumask(cpu)) { ++ if (rqshare == RQSHARE_MC) { ++ other_rq = cpu_rq(other_cpu); ++ ++ /* Set the mc_leader to the first CPU */ ++ if (!leader) ++ leader = rq; ++ other_rq->mc_leader = leader; ++ } ++ if (rq->cpu_locality[other_cpu] > 2) ++ rq->cpu_locality[other_cpu] = 2; ++ } ++ rq->cache_idle = cache_cpu_idle; ++ } ++#endif ++#ifdef CONFIG_SCHED_SMT ++ leader = NULL; ++ if (cpumask_weight(thread_cpumask(cpu)) > 1) { ++ cpumask_copy(&rq->thread_mask, thread_cpumask(cpu)); ++ cpumask_clear_cpu(cpu, &rq->thread_mask); ++ for_each_cpu(other_cpu, thread_cpumask(cpu)) { ++ if (rqshare == RQSHARE_SMT) { ++ other_rq = cpu_rq(other_cpu); ++ ++ /* Set the smt_leader to the first CPU */ ++ if (!leader) ++ leader = rq; ++ other_rq->smt_leader = leader; ++ } ++ if (rq->cpu_locality[other_cpu] > 1) ++ rq->cpu_locality[other_cpu] = 1; ++ } ++ rq->siblings_idle = siblings_cpu_idle; ++ smt_threads = true; ++ } ++#endif ++ } ++ ++#ifdef CONFIG_SMT_NICE ++ if (smt_threads) { ++ check_siblings = &check_smt_siblings; ++ wake_siblings = &wake_smt_siblings; ++ smt_schedule = &smt_should_schedule; ++ } ++#endif ++ unlock_all_rqs(); ++ mutex_unlock(&sched_domains_mutex); ++ ++ for_each_online_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ ++ for_each_online_cpu(other_cpu) { ++ if (other_cpu <= cpu) ++ continue; ++ printk(KERN_DEBUG "MuQSS locality CPU %d to %d: %d\n", cpu, other_cpu, rq->cpu_locality[other_cpu]); ++ } ++ } ++ ++ for_each_online_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ leader = rq->smp_leader; ++ ++ rq_lock(rq); ++ if (leader && rq != leader) { ++ printk(KERN_INFO "Sharing SMP runqueue from CPU %d to CPU %d\n", ++ leader->cpu, rq->cpu); ++ kfree(rq->node); ++ kfree(rq->sl); ++ kfree(rq->lock); ++ rq->node = leader->node; ++ rq->sl = leader->sl; ++ rq->lock = leader->lock; ++ barrier(); ++ /* To make up for not unlocking the freed runlock */ ++ preempt_enable(); ++ } else ++ rq_unlock(rq); ++ } ++ ++#ifdef CONFIG_SCHED_MC ++ for_each_online_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ leader = rq->mc_leader; ++ ++ rq_lock(rq); ++ if (leader && rq != leader) { ++ printk(KERN_INFO "Sharing MC runqueue from CPU %d to CPU %d\n", ++ leader->cpu, rq->cpu); ++ kfree(rq->node); ++ kfree(rq->sl); ++ kfree(rq->lock); ++ rq->node = leader->node; ++ rq->sl = leader->sl; ++ rq->lock = leader->lock; ++ barrier(); ++ /* To make up for not unlocking the freed runlock */ ++ preempt_enable(); ++ } else ++ rq_unlock(rq); ++ } ++#endif /* CONFIG_SCHED_MC */ ++ ++#ifdef CONFIG_SCHED_SMT ++ for_each_online_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ ++ leader = rq->smt_leader; ++ ++ rq_lock(rq); ++ if (leader && rq != leader) { ++ printk(KERN_INFO "Sharing SMT runqueue from CPU %d to CPU %d\n", ++ leader->cpu, rq->cpu); ++ kfree(rq->node); ++ kfree(rq->sl); ++ kfree(rq->lock); ++ rq->node = leader->node; ++ rq->sl = leader->sl; ++ rq->lock = leader->lock; ++ barrier(); ++ /* To make up for not unlocking the freed runlock */ ++ preempt_enable(); ++ } else ++ rq_unlock(rq); ++ } ++#endif /* CONFIG_SCHED_SMT */ ++ ++ local_irq_enable(); ++ ++ total_runqueues = 0; ++ for_each_possible_cpu(cpu) { ++ int locality, total_rqs = 0, total_cpus = 0; ++ ++ rq = cpu_rq(cpu); ++ if ( ++#ifdef CONFIG_SCHED_MC ++ (rq->mc_leader == rq) && ++#endif ++#ifdef CONFIG_SCHED_SMT ++ (rq->smt_leader == rq) && ++#endif ++ (rq->smp_leader == rq)) ++ total_runqueues++; ++ ++ for (locality = 0; locality <= 4; locality++) { ++ int test_cpu; ++ ++ for_each_possible_cpu(test_cpu) { ++ /* Work from each CPU up instead of every rq ++ * starting at CPU 0. Orders are better matched ++ * if the top half CPUs count down instead. */ ++ if (cpu < num_possible_cpus() / 2) ++ other_cpu = cpu + test_cpu; ++ else ++ other_cpu = cpu - test_cpu; ++ if (other_cpu < 0) ++ other_cpu += num_possible_cpus(); ++ else ++ other_cpu %= num_possible_cpus(); ++ other_rq = cpu_rq(other_cpu); ++ ++ if (rq->cpu_locality[other_cpu] == locality) { ++ rq->cpu_order[total_cpus++] = other_rq; ++ if ( ++ ++#ifdef CONFIG_SCHED_MC ++ (other_rq->mc_leader == other_rq) && ++#endif ++#ifdef CONFIG_SCHED_SMT ++ (other_rq->smt_leader == other_rq) && ++#endif ++ (other_rq->smp_leader == other_rq)) ++ rq->rq_order[total_rqs++] = other_rq; ++ } ++ } ++ } ++ } ++ ++ for_each_possible_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ for (i = 0; i < total_runqueues; i++) { ++ printk(KERN_DEBUG "CPU %d RQ order %d RQ %d\n", cpu, i, ++ rq->rq_order[i]->cpu); ++ } ++ } ++ for_each_possible_cpu(cpu) { ++ rq = cpu_rq(cpu); ++ for (i = 0; i < num_possible_cpus(); i++) { ++ printk(KERN_DEBUG "CPU %d CPU order %d RQ %d\n", cpu, i, ++ rq->cpu_order[i]->cpu); ++ } ++ } ++ switch (rqshare) { ++ case RQSHARE_ALL: ++ /* This should only ever read 1 */ ++ printk(KERN_INFO "MuQSS runqueue share type ALL total runqueues: %d\n", ++ total_runqueues); ++ break; ++ case RQSHARE_SMP: ++ printk(KERN_INFO "MuQSS runqueue share type SMP total runqueues: %d\n", ++ total_runqueues); ++ break; ++ case RQSHARE_MC: ++ printk(KERN_INFO "MuQSS runqueue share type MC total runqueues: %d\n", ++ total_runqueues); ++ break; ++ case RQSHARE_SMT: ++ printk(KERN_INFO "MuQSS runqueue share type SMT total runqueues: %d\n", ++ total_runqueues); ++ break; ++ case RQSHARE_NONE: ++ printk(KERN_INFO "MuQSS runqueue share type NONE total runqueues: %d\n", ++ total_runqueues); ++ break; ++ } ++ ++ sched_smp_initialized = true; ++} ++#else ++void __init sched_init_smp(void) ++{ ++ sched_smp_initialized = true; ++} ++#endif /* CONFIG_SMP */ ++ ++int in_sched_functions(unsigned long addr) ++{ ++ return in_lock_functions(addr) || ++ (addr >= (unsigned long)__sched_text_start ++ && addr < (unsigned long)__sched_text_end); ++} ++ ++#ifdef CONFIG_CGROUP_SCHED ++/* task group related information */ ++struct task_group { ++ struct cgroup_subsys_state css; ++ ++ struct rcu_head rcu; ++ struct list_head list; ++ ++ struct task_group *parent; ++ struct list_head siblings; ++ struct list_head children; ++}; ++ ++/* ++ * Default task group. ++ * Every task in system belongs to this group at bootup. ++ */ ++struct task_group root_task_group; ++LIST_HEAD(task_groups); ++ ++/* Cacheline aligned slab cache for task_group */ ++static struct kmem_cache *task_group_cache __read_mostly; ++#endif /* CONFIG_CGROUP_SCHED */ ++ ++void __init sched_init(void) ++{ ++#ifdef CONFIG_SMP ++ int cpu_ids; ++#endif ++ int i; ++ struct rq *rq; ++ ++ wait_bit_init(); ++ ++ prio_ratios[0] = 128; ++ for (i = 1 ; i < NICE_WIDTH ; i++) ++ prio_ratios[i] = prio_ratios[i - 1] * 11 / 10; ++ ++ skiplist_node_init(&init_task.node); ++ ++#ifdef CONFIG_SMP ++ init_defrootdomain(); ++ cpumask_clear(&cpu_idle_map); ++#else ++ uprq = &per_cpu(runqueues, 0); ++#endif ++ ++#ifdef CONFIG_CGROUP_SCHED ++ task_group_cache = KMEM_CACHE(task_group, 0); ++ ++ list_add(&root_task_group.list, &task_groups); ++ INIT_LIST_HEAD(&root_task_group.children); ++ INIT_LIST_HEAD(&root_task_group.siblings); ++#endif /* CONFIG_CGROUP_SCHED */ ++ for_each_possible_cpu(i) { ++ rq = cpu_rq(i); ++ rq->node = kmalloc(sizeof(skiplist_node), GFP_ATOMIC); ++ skiplist_init(rq->node); ++ rq->sl = new_skiplist(rq->node); ++ rq->lock = kmalloc(sizeof(raw_spinlock_t), GFP_ATOMIC); ++ raw_spin_lock_init(rq->lock); ++ rq->nr_running = 0; ++ rq->nr_uninterruptible = 0; ++ rq->nr_switches = 0; ++ rq->clock = rq->old_clock = rq->last_niffy = rq->niffies = 0; ++ rq->last_jiffy = jiffies; ++ rq->user_ns = rq->nice_ns = rq->softirq_ns = rq->system_ns = ++ rq->iowait_ns = rq->idle_ns = 0; ++ rq->dither = 0; ++ set_rq_task(rq, &init_task); ++ rq->iso_ticks = 0; ++ rq->iso_refractory = false; ++#ifdef CONFIG_SMP ++ rq->smp_leader = rq; ++#ifdef CONFIG_SCHED_MC ++ rq->mc_leader = rq; ++#endif ++#ifdef CONFIG_SCHED_SMT ++ rq->smt_leader = rq; ++#endif ++ rq->sd = NULL; ++ rq->rd = NULL; ++ rq->online = false; ++ rq->cpu = i; ++ rq_attach_root(rq, &def_root_domain); ++#endif ++ init_rq_hrexpiry(rq); ++ atomic_set(&rq->nr_iowait, 0); ++ } ++ ++#ifdef CONFIG_SMP ++ cpu_ids = i; ++ /* ++ * Set the base locality for cpu cache distance calculation to ++ * "distant" (3). Make sure the distance from a CPU to itself is 0. ++ */ ++ for_each_possible_cpu(i) { ++ int j; ++ ++ rq = cpu_rq(i); ++#ifdef CONFIG_SCHED_SMT ++ rq->siblings_idle = sole_cpu_idle; ++#endif ++#ifdef CONFIG_SCHED_MC ++ rq->cache_idle = sole_cpu_idle; ++#endif ++ rq->cpu_locality = kmalloc(cpu_ids * sizeof(int *), GFP_ATOMIC); ++ for_each_possible_cpu(j) { ++ if (i == j) ++ rq->cpu_locality[j] = 0; ++ else ++ rq->cpu_locality[j] = 4; ++ } ++ rq->rq_order = kmalloc(cpu_ids * sizeof(struct rq *), GFP_ATOMIC); ++ rq->cpu_order = kmalloc(cpu_ids * sizeof(struct rq *), GFP_ATOMIC); ++ rq->rq_order[0] = rq->cpu_order[0] = rq; ++ for (j = 1; j < cpu_ids; j++) ++ rq->rq_order[j] = rq->cpu_order[j] = cpu_rq(j); ++ } ++#endif ++ ++ /* ++ * The boot idle thread does lazy MMU switching as well: ++ */ ++ mmgrab(&init_mm); ++ enter_lazy_tlb(&init_mm, current); ++ ++ /* ++ * Make us the idle thread. Technically, schedule() should not be ++ * called from this thread, however somewhere below it might be, ++ * but because we are the idle thread, we just pick up running again ++ * when this runqueue becomes "idle". ++ */ ++ init_idle(current, smp_processor_id()); ++ ++#ifdef CONFIG_SMP ++ idle_thread_set_boot_cpu(); ++#endif /* SMP */ ++ ++ init_schedstats(); ++ ++ psi_init(); ++} ++ ++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP ++static inline int preempt_count_equals(int preempt_offset) ++{ ++ int nested = preempt_count() + rcu_preempt_depth(); ++ ++ return (nested == preempt_offset); ++} ++ ++void __might_sleep(const char *file, int line, int preempt_offset) ++{ ++ /* ++ * Blocking primitives will set (and therefore destroy) current->state, ++ * since we will exit with TASK_RUNNING make sure we enter with it, ++ * otherwise we will destroy state. ++ */ ++ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, ++ "do not call blocking ops when !TASK_RUNNING; " ++ "state=%lx set at [<%p>] %pS\n", ++ current->state, ++ (void *)current->task_state_change, ++ (void *)current->task_state_change); ++ ++ ___might_sleep(file, line, preempt_offset); ++} ++EXPORT_SYMBOL(__might_sleep); ++ ++void __cant_sleep(const char *file, int line, int preempt_offset) ++{ ++ static unsigned long prev_jiffy; ++ ++ if (irqs_disabled()) ++ return; ++ ++ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) ++ return; ++ ++ if (preempt_count() > preempt_offset) ++ return; ++ ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); ++ printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), ++ current->pid, current->comm); ++ ++ debug_show_held_locks(current); ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++EXPORT_SYMBOL_GPL(__cant_sleep); ++ ++void ___might_sleep(const char *file, int line, int preempt_offset) ++{ ++ /* Ratelimiting timestamp: */ ++ static unsigned long prev_jiffy; ++ ++ unsigned long preempt_disable_ip; ++ ++ /* WARN_ON_ONCE() by default, no rate limit required: */ ++ rcu_sleep_check(); ++ ++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && ++ !is_idle_task(current)) || ++ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || ++ oops_in_progress) ++ return; ++ ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ /* Save this before calling printk(), since that will clobber it: */ ++ preempt_disable_ip = get_preempt_disable_ip(current); ++ ++ printk(KERN_ERR ++ "BUG: sleeping function called from invalid context at %s:%d\n", ++ file, line); ++ printk(KERN_ERR ++ "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), ++ current->pid, current->comm); ++ ++ if (task_stack_end_corrupted(current)) ++ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); ++ ++ debug_show_held_locks(current); ++ if (irqs_disabled()) ++ print_irqtrace_events(current); ++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) ++ && !preempt_count_equals(preempt_offset)) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(preempt_disable_ip); ++ pr_cont("\n"); ++ } ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++EXPORT_SYMBOL(___might_sleep); ++#endif ++ ++#ifdef CONFIG_MAGIC_SYSRQ ++static inline void normalise_rt_tasks(void) ++{ ++ struct task_struct *g, *p; ++ struct rq_flags rf; ++ struct rq *rq; ++ ++ read_lock(&tasklist_lock); ++ for_each_process_thread(g, p) { ++ /* ++ * Only normalize user tasks: ++ */ ++ if (p->flags & PF_KTHREAD) ++ continue; ++ ++ if (!rt_task(p) && !iso_task(p)) ++ continue; ++ ++ rq = task_rq_lock(p, &rf); ++ __setscheduler(p, rq, SCHED_NORMAL, 0, false); ++ task_rq_unlock(rq, p, &rf); ++ } ++ read_unlock(&tasklist_lock); ++} ++ ++void normalize_rt_tasks(void) ++{ ++ normalise_rt_tasks(); ++} ++#endif /* CONFIG_MAGIC_SYSRQ */ ++ ++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) ++/* ++ * These functions are only useful for the IA64 MCA handling, or kdb. ++ * ++ * They can only be called when the whole system has been ++ * stopped - every CPU needs to be quiescent, and no scheduling ++ * activity can take place. Using them for anything else would ++ * be a serious bug, and as a result, they aren't even visible ++ * under any other configuration. ++ */ ++ ++/** ++ * curr_task - return the current task for a given CPU. ++ * @cpu: the processor in question. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ * ++ * Return: The current task for @cpu. ++ */ ++struct task_struct *curr_task(int cpu) ++{ ++ return cpu_curr(cpu); ++} ++ ++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ ++ ++#ifdef CONFIG_IA64 ++/** ++ * set_curr_task - set the current task for a given CPU. ++ * @cpu: the processor in question. ++ * @p: the task pointer to set. ++ * ++ * Description: This function must only be used when non-maskable interrupts ++ * are serviced on a separate stack. It allows the architecture to switch the ++ * notion of the current task on a CPU in a non-blocking manner. This function ++ * must be called with all CPU's synchronised, and interrupts disabled, the ++ * and caller must save the original value of the current task (see ++ * curr_task() above) and restore that value before reenabling interrupts and ++ * re-starting the system. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ */ ++void ia64_set_curr_task(int cpu, struct task_struct *p) ++{ ++ cpu_curr(cpu) = p; ++} ++ ++#endif ++ ++void init_idle_bootup_task(struct task_struct *idle) ++{} ++ ++#ifdef CONFIG_SCHED_DEBUG ++__read_mostly bool sched_debug_enabled; ++ ++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, ++ struct seq_file *m) ++{} ++ ++void proc_sched_set_task(struct task_struct *p) ++{} ++#endif ++ ++#ifdef CONFIG_CGROUP_SCHED ++static void sched_free_group(struct task_group *tg) ++{ ++ kmem_cache_free(task_group_cache, tg); ++} ++ ++/* allocate runqueue etc for a new task group */ ++struct task_group *sched_create_group(struct task_group *parent) ++{ ++ struct task_group *tg; ++ ++ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); ++ if (!tg) ++ return ERR_PTR(-ENOMEM); ++ ++ return tg; ++} ++ ++void sched_online_group(struct task_group *tg, struct task_group *parent) ++{ ++} ++ ++/* rcu callback to free various structures associated with a task group */ ++static void sched_free_group_rcu(struct rcu_head *rhp) ++{ ++ /* Now it should be safe to free those cfs_rqs */ ++ sched_free_group(container_of(rhp, struct task_group, rcu)); ++} ++ ++void sched_destroy_group(struct task_group *tg) ++{ ++ /* Wait for possible concurrent references to cfs_rqs complete */ ++ call_rcu(&tg->rcu, sched_free_group_rcu); ++} ++ ++void sched_offline_group(struct task_group *tg) ++{ ++} ++ ++static inline struct task_group *css_tg(struct cgroup_subsys_state *css) ++{ ++ return css ? container_of(css, struct task_group, css) : NULL; ++} ++ ++static struct cgroup_subsys_state * ++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) ++{ ++ struct task_group *parent = css_tg(parent_css); ++ struct task_group *tg; ++ ++ if (!parent) { ++ /* This is early initialization for the top cgroup */ ++ return &root_task_group.css; ++ } ++ ++ tg = sched_create_group(parent); ++ if (IS_ERR(tg)) ++ return ERR_PTR(-ENOMEM); ++ return &tg->css; ++} ++ ++/* Expose task group only after completing cgroup initialization */ ++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ struct task_group *parent = css_tg(css->parent); ++ ++ if (parent) ++ sched_online_group(tg, parent); ++ return 0; ++} ++ ++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ ++ sched_offline_group(tg); ++} ++ ++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ ++ /* ++ * Relies on the RCU grace period between css_released() and this. ++ */ ++ sched_free_group(tg); ++} ++ ++static void cpu_cgroup_fork(struct task_struct *task) ++{ ++} ++ ++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) ++{ ++ return 0; ++} ++ ++static void cpu_cgroup_attach(struct cgroup_taskset *tset) ++{ ++} ++ ++static struct cftype cpu_legacy_files[] = { ++ { } /* Terminate */ ++}; ++ ++static struct cftype cpu_files[] = { ++ { } /* terminate */ ++}; ++ ++static int cpu_extra_stat_show(struct seq_file *sf, ++ struct cgroup_subsys_state *css) ++{ ++ return 0; ++} ++ ++struct cgroup_subsys cpu_cgrp_subsys = { ++ .css_alloc = cpu_cgroup_css_alloc, ++ .css_online = cpu_cgroup_css_online, ++ .css_released = cpu_cgroup_css_released, ++ .css_free = cpu_cgroup_css_free, ++ .css_extra_stat_show = cpu_extra_stat_show, ++ .fork = cpu_cgroup_fork, ++ .can_attach = cpu_cgroup_can_attach, ++ .attach = cpu_cgroup_attach, ++ .legacy_cftypes = cpu_files, ++ .legacy_cftypes = cpu_legacy_files, ++ .dfl_cftypes = cpu_files, ++ .early_init = true, ++ .threaded = true, ++}; ++#endif /* CONFIG_CGROUP_SCHED */ ++ ++#undef CREATE_TRACE_POINTS +diff -Nur a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h +--- a/kernel/sched/MuQSS.h 1970-01-01 01:00:00.000000000 +0100 ++++ b/kernel/sched/MuQSS.h 2019-07-07 09:17:41.261241813 +0100 +@@ -0,0 +1,957 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef MUQSS_SCHED_H ++#define MUQSS_SCHED_H ++ ++#include <linux/sched/clock.h> ++#include <linux/sched/cpufreq.h> ++#include <linux/sched/cputime.h> ++#include <linux/sched/debug.h> ++#include <linux/sched/hotplug.h> ++#include <linux/sched/init.h> ++#include <linux/sched/isolation.h> ++#include <linux/sched/mm.h> ++#include <linux/sched/nohz.h> ++#include <linux/sched/signal.h> ++#include <linux/sched/smt.h> ++#include <linux/sched/stat.h> ++#include <linux/sched/task.h> ++#include <linux/sched/task_stack.h> ++#include <linux/sched/topology.h> ++#include <linux/sched/wake_q.h> ++ ++#include <uapi/linux/sched/types.h> ++ ++#include <linux/cgroup.h> ++#include <linux/cpufreq.h> ++#include <linux/cpuidle.h> ++#include <linux/cpuset.h> ++#include <linux/ctype.h> ++#include <linux/energy_model.h> ++#include <linux/freezer.h> ++#include <linux/interrupt.h> ++#include <linux/kernel_stat.h> ++#include <linux/kthread.h> ++#include <linux/membarrier.h> ++#include <linux/livepatch.h> ++#include <linux/proc_fs.h> ++#include <linux/psi.h> ++#include <linux/sched.h> ++#include <linux/slab.h> ++#include <linux/skip_list.h> ++#include <linux/stop_machine.h> ++#include <linux/suspend.h> ++#include <linux/swait.h> ++#include <linux/syscalls.h> ++#include <linux/tick.h> ++#include <linux/tsacct_kern.h> ++#include <linux/u64_stats_sync.h> ++ ++#ifdef CONFIG_PARAVIRT ++#include <asm/paravirt.h> ++#endif ++ ++#include "cpupri.h" ++ ++#ifdef CONFIG_SCHED_DEBUG ++# define SCHED_WARN_ON(x) WARN_ONCE(x, #x) ++#else ++# define SCHED_WARN_ON(x) ((void)(x)) ++#endif ++ ++/* task_struct::on_rq states: */ ++#define TASK_ON_RQ_QUEUED 1 ++#define TASK_ON_RQ_MIGRATING 2 ++ ++struct rq; ++ ++#ifdef CONFIG_SMP ++ ++static inline bool sched_asym_prefer(int a, int b) ++{ ++ return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); ++} ++ ++struct perf_domain { ++ struct em_perf_domain *em_pd; ++ struct perf_domain *next; ++ struct rcu_head rcu; ++}; ++ ++/* Scheduling group status flags */ ++#define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ ++#define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ ++ ++/* ++ * We add the notion of a root-domain which will be used to define per-domain ++ * variables. Each exclusive cpuset essentially defines an island domain by ++ * fully partitioning the member cpus from any other cpuset. Whenever a new ++ * exclusive cpuset is created, we also create and attach a new root-domain ++ * object. ++ * ++ */ ++struct root_domain { ++ atomic_t refcount; ++ atomic_t rto_count; ++ struct rcu_head rcu; ++ cpumask_var_t span; ++ cpumask_var_t online; ++ ++ /* ++ * Indicate pullable load on at least one CPU, e.g: ++ * - More than one runnable task ++ * - Running task is misfit ++ */ ++ int overload; ++ ++ /* Indicate one or more cpus over-utilized (tipping point) */ ++ int overutilized; ++ ++ /* ++ * The bit corresponding to a CPU gets set here if such CPU has more ++ * than one runnable -deadline task (as it is below for RT tasks). ++ */ ++ cpumask_var_t dlo_mask; ++ atomic_t dlo_count; ++ /* Replace unused CFS structures with void */ ++ //struct dl_bw dl_bw; ++ //struct cpudl cpudl; ++ void *dl_bw; ++ void *cpudl; ++ ++ /* ++ * The "RT overload" flag: it gets set if a CPU has more than ++ * one runnable RT task. ++ */ ++ cpumask_var_t rto_mask; ++ //struct cpupri cpupri; ++ void *cpupri; ++ ++ unsigned long max_cpu_capacity; ++ ++ /* ++ * NULL-terminated list of performance domains intersecting with the ++ * CPUs of the rd. Protected by RCU. ++ */ ++ struct perf_domain *pd; ++}; ++ ++extern struct root_domain def_root_domain; ++extern struct mutex sched_domains_mutex; ++ ++extern void init_defrootdomain(void); ++extern int sched_init_domains(const struct cpumask *cpu_map); ++extern void rq_attach_root(struct rq *rq, struct root_domain *rd); ++ ++static inline void cpupri_cleanup(void __maybe_unused *cpupri) ++{ ++} ++ ++static inline void cpudl_cleanup(void __maybe_unused *cpudl) ++{ ++} ++ ++static inline void init_dl_bw(void __maybe_unused *dl_bw) ++{ ++} ++ ++static inline int cpudl_init(void __maybe_unused *dl_bw) ++{ ++ return 0; ++} ++ ++static inline int cpupri_init(void __maybe_unused *cpupri) ++{ ++ return 0; ++} ++#endif /* CONFIG_SMP */ ++ ++/* ++ * This is the main, per-CPU runqueue data structure. ++ * This data should only be modified by the local cpu. ++ */ ++struct rq { ++ raw_spinlock_t *lock; ++ raw_spinlock_t *orig_lock; ++ ++ struct task_struct *curr, *idle, *stop; ++ struct mm_struct *prev_mm; ++ ++ unsigned int nr_running; ++ /* ++ * This is part of a global counter where only the total sum ++ * over all CPUs matters. A task can increase this counter on ++ * one CPU and if it got migrated afterwards it may decrease ++ * it on another CPU. Always updated under the runqueue lock: ++ */ ++ unsigned long nr_uninterruptible; ++ u64 nr_switches; ++ ++ /* Stored data about rq->curr to work outside rq lock */ ++ u64 rq_deadline; ++ int rq_prio; ++ ++ /* Best queued id for use outside lock */ ++ u64 best_key; ++ ++ unsigned long last_scheduler_tick; /* Last jiffy this RQ ticked */ ++ unsigned long last_jiffy; /* Last jiffy this RQ updated rq clock */ ++ u64 niffies; /* Last time this RQ updated rq clock */ ++ u64 last_niffy; /* Last niffies as updated by local clock */ ++ u64 last_jiffy_niffies; /* Niffies @ last_jiffy */ ++ ++ u64 load_update; /* When we last updated load */ ++ unsigned long load_avg; /* Rolling load average */ ++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ ++ u64 irq_load_update; /* When we last updated IRQ load */ ++ unsigned long irq_load_avg; /* Rolling IRQ load average */ ++#endif ++#ifdef CONFIG_SMT_NICE ++ struct mm_struct *rq_mm; ++ int rq_smt_bias; /* Policy/nice level bias across smt siblings */ ++#endif ++ /* Accurate timekeeping data */ ++ unsigned long user_ns, nice_ns, irq_ns, softirq_ns, system_ns, ++ iowait_ns, idle_ns; ++ atomic_t nr_iowait; ++ ++ skiplist_node *node; ++ skiplist *sl; ++#ifdef CONFIG_SMP ++ struct task_struct *preempt; /* Preempt triggered on this task */ ++ struct task_struct *preempting; /* Hint only, what task is preempting */ ++ ++ int cpu; /* cpu of this runqueue */ ++ bool online; ++ ++ struct root_domain *rd; ++ struct sched_domain *sd; ++ ++ unsigned long cpu_capacity_orig; ++ ++ int *cpu_locality; /* CPU relative cache distance */ ++ struct rq **rq_order; /* Shared RQs ordered by relative cache distance */ ++ struct rq **cpu_order; /* RQs of discrete CPUs ordered by distance */ ++ ++ struct rq *smp_leader; /* First physical CPU per node */ ++#ifdef CONFIG_SCHED_SMT ++ struct rq *smt_leader; /* First logical CPU in SMT siblings */ ++ cpumask_t thread_mask; ++ bool (*siblings_idle)(struct rq *rq); ++ /* See if all smt siblings are idle */ ++#endif /* CONFIG_SCHED_SMT */ ++#ifdef CONFIG_SCHED_MC ++ struct rq *mc_leader; /* First logical CPU in MC siblings */ ++ cpumask_t core_mask; ++ bool (*cache_idle)(struct rq *rq); ++ /* See if all cache siblings are idle */ ++#endif /* CONFIG_SCHED_MC */ ++#endif /* CONFIG_SMP */ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ u64 prev_irq_time; ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++#ifdef CONFIG_PARAVIRT ++ u64 prev_steal_time; ++#endif /* CONFIG_PARAVIRT */ ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ u64 prev_steal_time_rq; ++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */ ++ ++ u64 clock, old_clock, last_tick; ++ /* Ensure that all clocks are in the same cache line */ ++ u64 clock_task ____cacheline_aligned; ++ int dither; ++ ++ int iso_ticks; ++ bool iso_refractory; ++ ++#ifdef CONFIG_HIGH_RES_TIMERS ++ struct hrtimer hrexpiry_timer; ++#endif ++ ++ int rt_nr_running; /* Number real time tasks running */ ++#ifdef CONFIG_SCHEDSTATS ++ ++ /* latency stats */ ++ struct sched_info rq_sched_info; ++ unsigned long long rq_cpu_time; ++ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ ++ ++ /* sys_sched_yield() stats */ ++ unsigned int yld_count; ++ ++ /* schedule() stats */ ++ unsigned int sched_switch; ++ unsigned int sched_count; ++ unsigned int sched_goidle; ++ ++ /* try_to_wake_up() stats */ ++ unsigned int ttwu_count; ++ unsigned int ttwu_local; ++#endif /* CONFIG_SCHEDSTATS */ ++ ++#ifdef CONFIG_SMP ++ struct llist_head wake_list; ++#endif ++ ++#ifdef CONFIG_CPU_IDLE ++ /* Must be inspected within a rcu lock section */ ++ struct cpuidle_state *idle_state; ++#endif ++}; ++ ++struct rq_flags { ++ unsigned long flags; ++}; ++ ++#ifdef CONFIG_SMP ++struct rq *cpu_rq(int cpu); ++#endif ++ ++#ifndef CONFIG_SMP ++extern struct rq *uprq; ++#define cpu_rq(cpu) (uprq) ++#define this_rq() (uprq) ++#define raw_rq() (uprq) ++#define task_rq(p) (uprq) ++#define cpu_curr(cpu) ((uprq)->curr) ++#else /* CONFIG_SMP */ ++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++#define this_rq() this_cpu_ptr(&runqueues) ++#define raw_rq() raw_cpu_ptr(&runqueues) ++#define task_rq(p) cpu_rq(task_cpu(p)) ++#endif /* CONFIG_SMP */ ++ ++static inline int task_current(struct rq *rq, struct task_struct *p) ++{ ++ return rq->curr == p; ++} ++ ++static inline int task_running(struct rq *rq, struct task_struct *p) ++{ ++#ifdef CONFIG_SMP ++ return p->on_cpu; ++#else ++ return task_current(rq, p); ++#endif ++} ++ ++static inline int task_on_rq_queued(struct task_struct *p) ++{ ++ return p->on_rq == TASK_ON_RQ_QUEUED; ++} ++ ++static inline int task_on_rq_migrating(struct task_struct *p) ++{ ++ return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; ++} ++ ++static inline void rq_lock(struct rq *rq) ++ __acquires(rq->lock) ++{ ++ raw_spin_lock(rq->lock); ++} ++ ++static inline void rq_unlock(struct rq *rq) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock(rq->lock); ++} ++ ++static inline void rq_lock_irq(struct rq *rq) ++ __acquires(rq->lock) ++{ ++ raw_spin_lock_irq(rq->lock); ++} ++ ++static inline void rq_unlock_irq(struct rq *rq, struct rq_flags __always_unused *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock_irq(rq->lock); ++} ++ ++static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ raw_spin_lock_irqsave(rq->lock, rf->flags); ++} ++ ++static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock_irqrestore(rq->lock, rf->flags); ++} ++ ++static inline struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(p->pi_lock) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ while (42) { ++ raw_spin_lock_irqsave(&p->pi_lock, rf->flags); ++ rq = task_rq(p); ++ raw_spin_lock(rq->lock); ++ if (likely(rq == task_rq(p))) ++ break; ++ raw_spin_unlock(rq->lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); ++ } ++ return rq; ++} ++ ++static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) ++ __releases(rq->lock) ++ __releases(p->pi_lock) ++{ ++ rq_unlock(rq); ++ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); ++} ++ ++static inline struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags __always_unused *rf) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ lockdep_assert_held(&p->pi_lock); ++ ++ while (42) { ++ rq = task_rq(p); ++ raw_spin_lock(rq->lock); ++ if (likely(rq == task_rq(p))) ++ break; ++ raw_spin_unlock(rq->lock); ++ } ++ return rq; ++} ++ ++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags __always_unused *rf) ++{ ++ rq_unlock(rq); ++} ++ ++static inline struct rq * ++this_rq_lock_irq(struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ local_irq_disable(); ++ rq = this_rq(); ++ rq_lock(rq); ++ return rq; ++} ++ ++/* ++ * {de,en}queue flags: Most not used on MuQSS. ++ * ++ * DEQUEUE_SLEEP - task is no longer runnable ++ * ENQUEUE_WAKEUP - task just became runnable ++ * ++ * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks ++ * are in a known state which allows modification. Such pairs ++ * should preserve as much state as possible. ++ * ++ * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location ++ * in the runqueue. ++ * ++ * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) ++ * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) ++ * ENQUEUE_MIGRATED - the task was migrated during wakeup ++ * ++ */ ++ ++#define DEQUEUE_SLEEP 0x01 ++#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */ ++ ++#define ENQUEUE_WAKEUP 0x01 ++#define ENQUEUE_RESTORE 0x02 ++ ++#ifdef CONFIG_SMP ++#define ENQUEUE_MIGRATED 0x40 ++#else ++#define ENQUEUE_MIGRATED 0x00 ++#endif ++ ++static inline u64 __rq_clock_broken(struct rq *rq) ++{ ++ return READ_ONCE(rq->clock); ++} ++ ++static inline u64 rq_clock(struct rq *rq) ++{ ++ lockdep_assert_held(rq->lock); ++ ++ return rq->clock; ++} ++ ++static inline u64 rq_clock_task(struct rq *rq) ++{ ++ lockdep_assert_held(rq->lock); ++ ++ return rq->clock_task; ++} ++ ++#ifdef CONFIG_NUMA ++enum numa_topology_type { ++ NUMA_DIRECT, ++ NUMA_GLUELESS_MESH, ++ NUMA_BACKPLANE, ++}; ++extern enum numa_topology_type sched_numa_topology_type; ++extern int sched_max_numa_distance; ++extern bool find_numa_distance(int distance); ++ ++extern void sched_init_numa(void); ++extern void sched_domains_numa_masks_set(unsigned int cpu); ++extern void sched_domains_numa_masks_clear(unsigned int cpu); ++#else ++static inline void sched_init_numa(void) { } ++static inline void sched_domains_numa_masks_set(unsigned int cpu) { } ++static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } ++#endif ++ ++extern struct mutex sched_domains_mutex; ++extern struct static_key_false sched_schedstats; ++ ++#define rcu_dereference_check_sched_domain(p) \ ++ rcu_dereference_check((p), \ ++ lockdep_is_held(&sched_domains_mutex)) ++ ++#ifdef CONFIG_SMP ++ ++/* ++ * The domain tree (rq->sd) is protected by RCU's quiescent state transition. ++ * See destroy_sched_domains: call_rcu for details. ++ * ++ * The domain tree of any CPU may only be accessed from within ++ * preempt-disabled sections. ++ */ ++#define for_each_domain(cpu, __sd) \ ++ for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ ++ __sd; __sd = __sd->parent) ++ ++#define for_each_lower_domain(sd) for (; sd; sd = sd->child) ++ ++/** ++ * highest_flag_domain - Return highest sched_domain containing flag. ++ * @cpu: The cpu whose highest level of sched domain is to ++ * be returned. ++ * @flag: The flag to check for the highest sched_domain ++ * for the given cpu. ++ * ++ * Returns the highest sched_domain of a cpu which contains the given flag. ++ */ ++static inline struct sched_domain *highest_flag_domain(int cpu, int flag) ++{ ++ struct sched_domain *sd, *hsd = NULL; ++ ++ for_each_domain(cpu, sd) { ++ if (!(sd->flags & flag)) ++ break; ++ hsd = sd; ++ } ++ ++ return hsd; ++} ++ ++static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) ++{ ++ struct sched_domain *sd; ++ ++ for_each_domain(cpu, sd) { ++ if (sd->flags & flag) ++ break; ++ } ++ ++ return sd; ++} ++ ++DECLARE_PER_CPU(struct sched_domain *, sd_llc); ++DECLARE_PER_CPU(int, sd_llc_size); ++DECLARE_PER_CPU(int, sd_llc_id); ++DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); ++DECLARE_PER_CPU(struct sched_domain *, sd_numa); ++DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing); ++DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity); ++ ++struct sched_group_capacity { ++ atomic_t ref; ++ /* ++ * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity ++ * for a single CPU. ++ */ ++ unsigned long capacity; ++ unsigned long min_capacity; /* Min per-CPU capacity in group */ ++ unsigned long max_capacity; /* Max per-CPU capacity in group */ ++ unsigned long next_update; ++ int imbalance; /* XXX unrelated to capacity but shared group state */ ++ ++#ifdef CONFIG_SCHED_DEBUG ++ int id; ++#endif ++ ++ unsigned long cpumask[0]; /* balance mask */ ++}; ++ ++struct sched_group { ++ struct sched_group *next; /* Must be a circular list */ ++ atomic_t ref; ++ ++ unsigned int group_weight; ++ struct sched_group_capacity *sgc; ++ int asym_prefer_cpu; /* cpu of highest priority in group */ ++ ++ /* ++ * The CPUs this group covers. ++ * ++ * NOTE: this field is variable length. (Allocated dynamically ++ * by attaching extra space to the end of the structure, ++ * depending on how many CPUs the kernel has booted up with) ++ */ ++ unsigned long cpumask[0]; ++}; ++ ++static inline struct cpumask *sched_group_span(struct sched_group *sg) ++{ ++ return to_cpumask(sg->cpumask); ++} ++ ++/* ++ * See build_balance_mask(). ++ */ ++static inline struct cpumask *group_balance_mask(struct sched_group *sg) ++{ ++ return to_cpumask(sg->sgc->cpumask); ++} ++ ++/** ++ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. ++ * @group: The group whose first cpu is to be returned. ++ */ ++static inline unsigned int group_first_cpu(struct sched_group *group) ++{ ++ return cpumask_first(sched_group_span(group)); ++} ++ ++ ++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) ++void register_sched_domain_sysctl(void); ++void dirty_sched_domain_sysctl(int cpu); ++void unregister_sched_domain_sysctl(void); ++#else ++static inline void register_sched_domain_sysctl(void) ++{ ++} ++static inline void dirty_sched_domain_sysctl(int cpu) ++{ ++} ++static inline void unregister_sched_domain_sysctl(void) ++{ ++} ++#endif ++ ++extern void sched_ttwu_pending(void); ++extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); ++extern void set_rq_online (struct rq *rq); ++extern void set_rq_offline(struct rq *rq); ++extern bool sched_smp_initialized; ++ ++static inline void update_group_capacity(struct sched_domain *sd, int cpu) ++{ ++} ++ ++static inline void trigger_load_balance(struct rq *rq) ++{ ++} ++ ++#define sched_feat(x) 0 ++ ++#else /* CONFIG_SMP */ ++ ++static inline void sched_ttwu_pending(void) { } ++ ++#endif /* CONFIG_SMP */ ++ ++#ifdef CONFIG_CPU_IDLE ++static inline void idle_set_state(struct rq *rq, ++ struct cpuidle_state *idle_state) ++{ ++ rq->idle_state = idle_state; ++} ++ ++static inline struct cpuidle_state *idle_get_state(struct rq *rq) ++{ ++ SCHED_WARN_ON(!rcu_read_lock_held()); ++ return rq->idle_state; ++} ++#else ++static inline void idle_set_state(struct rq *rq, ++ struct cpuidle_state *idle_state) ++{ ++} ++ ++static inline struct cpuidle_state *idle_get_state(struct rq *rq) ++{ ++ return NULL; ++} ++#endif ++ ++#ifdef CONFIG_SCHED_DEBUG ++extern bool sched_debug_enabled; ++#endif ++ ++extern void schedule_idle(void); ++ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++struct irqtime { ++ u64 total; ++ u64 tick_delta; ++ u64 irq_start_time; ++ struct u64_stats_sync sync; ++}; ++ ++DECLARE_PER_CPU(struct irqtime, cpu_irqtime); ++ ++/* ++ * Returns the irqtime minus the softirq time computed by ksoftirqd. ++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime ++ * and never move forward. ++ */ ++static inline u64 irq_time_read(int cpu) ++{ ++ struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); ++ unsigned int seq; ++ u64 total; ++ ++ do { ++ seq = __u64_stats_fetch_begin(&irqtime->sync); ++ total = irqtime->total; ++ } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); ++ ++ return total; ++} ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++ ++#ifdef CONFIG_SMP ++static inline int cpu_of(struct rq *rq) ++{ ++ return rq->cpu; ++} ++#else /* CONFIG_SMP */ ++static inline int cpu_of(struct rq *rq) ++{ ++ return 0; ++} ++#endif ++ ++#ifdef CONFIG_CPU_FREQ ++DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); ++ ++static inline void cpufreq_trigger(struct rq *rq, unsigned int flags) ++{ ++ struct update_util_data *data; ++ ++ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, ++ cpu_of(rq))); ++ ++ if (data) ++ data->func(data, rq->niffies, flags); ++} ++#else ++static inline void cpufreq_trigger(struct rq *rq, unsigned int flag) ++{ ++} ++#endif /* CONFIG_CPU_FREQ */ ++ ++#ifdef arch_scale_freq_capacity ++#ifndef arch_scale_freq_invariant ++#define arch_scale_freq_invariant() (true) ++#endif ++#else /* arch_scale_freq_capacity */ ++#define arch_scale_freq_invariant() (false) ++#endif ++ ++/* ++ * This should only be called when current == rq->idle. Dodgy workaround for ++ * when softirqs are pending and we are in the idle loop. Setting current to ++ * resched will kick us out of the idle loop and the softirqs will be serviced ++ * on our next pass through schedule(). ++ */ ++static inline bool softirq_pending(int cpu) ++{ ++ if (likely(!local_softirq_pending())) ++ return false; ++ set_tsk_need_resched(current); ++ return true; ++} ++ ++#ifdef CONFIG_64BIT ++static inline u64 read_sum_exec_runtime(struct task_struct *t) ++{ ++ return tsk_seruntime(t); ++} ++#else ++static inline u64 read_sum_exec_runtime(struct task_struct *t) ++{ ++ struct rq_flags rf; ++ u64 ns; ++ struct rq *rq; ++ ++ rq = task_rq_lock(t, &rf); ++ ns = tsk_seruntime(t); ++ task_rq_unlock(rq, t, &rf); ++ ++ return ns; ++} ++#endif ++ ++#ifndef arch_scale_freq_capacity ++static __always_inline ++unsigned long arch_scale_freq_capacity(int cpu) ++{ ++ return SCHED_CAPACITY_SCALE; ++} ++#endif ++ ++#ifdef CONFIG_NO_HZ_FULL ++extern bool sched_can_stop_tick(struct rq *rq); ++extern int __init sched_tick_offload_init(void); ++ ++/* ++ * Tick may be needed by tasks in the runqueue depending on their policy and ++ * requirements. If tick is needed, lets send the target an IPI to kick it out of ++ * nohz mode if necessary. ++ */ ++static inline void sched_update_tick_dependency(struct rq *rq) ++{ ++ int cpu; ++ ++ if (!tick_nohz_full_enabled()) ++ return; ++ ++ cpu = cpu_of(rq); ++ ++ if (!tick_nohz_full_cpu(cpu)) ++ return; ++ ++ if (sched_can_stop_tick(rq)) ++ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); ++ else ++ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); ++} ++#else ++static inline int sched_tick_offload_init(void) { return 0; } ++static inline void sched_update_tick_dependency(struct rq *rq) { } ++#endif ++ ++#define SCHED_FLAG_SUGOV 0x10000000 ++ ++static inline bool rt_rq_is_runnable(struct rq *rt_rq) ++{ ++ return rt_rq->rt_nr_running; ++} ++ ++#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL ++/** ++ * enum schedutil_type - CPU utilization type ++ * @FREQUENCY_UTIL: Utilization used to select frequency ++ * @ENERGY_UTIL: Utilization used during energy calculation ++ * ++ * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time ++ * need to be aggregated differently depending on the usage made of them. This ++ * enum is used within schedutil_freq_util() to differentiate the types of ++ * utilization expected by the callers, and adjust the aggregation accordingly. ++ */ ++enum schedutil_type { ++ FREQUENCY_UTIL, ++ ENERGY_UTIL, ++}; ++ ++unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs, ++ unsigned long max, enum schedutil_type type); ++ ++static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs) ++{ ++ unsigned long max = arch_scale_cpu_capacity(NULL, cpu); ++ ++ return schedutil_freq_util(cpu, cfs, max, ENERGY_UTIL); ++} ++ ++static inline unsigned long cpu_bw_dl(struct rq *rq) ++{ ++ return 0; ++} ++ ++static inline unsigned long cpu_util_dl(struct rq *rq) ++{ ++ return 0; ++} ++ ++static inline unsigned long cpu_util_cfs(struct rq *rq) ++{ ++ unsigned long ret = READ_ONCE(rq->load_avg); ++ ++ if (ret > SCHED_CAPACITY_SCALE) ++ ret = SCHED_CAPACITY_SCALE; ++ return ret; ++} ++ ++static inline unsigned long cpu_util_rt(struct rq *rq) ++{ ++ unsigned long ret = READ_ONCE(rq->rt_nr_running); ++ ++ if (ret > SCHED_CAPACITY_SCALE) ++ ret = SCHED_CAPACITY_SCALE; ++ return ret; ++} ++ ++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ ++static inline unsigned long cpu_util_irq(struct rq *rq) ++{ ++ unsigned long ret = READ_ONCE(rq->irq_load_avg); ++ ++ if (ret > SCHED_CAPACITY_SCALE) ++ ret = SCHED_CAPACITY_SCALE; ++ return ret; ++} ++ ++static inline ++unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) ++{ ++ util *= (max - irq); ++ util /= max; ++ ++ return util; ++ ++} ++#else ++static inline unsigned long cpu_util_irq(struct rq *rq) ++{ ++ return 0; ++} ++ ++static inline ++unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) ++{ ++ return util; ++} ++#endif ++#endif ++ ++#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) ++#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) ++ ++DECLARE_STATIC_KEY_FALSE(sched_energy_present); ++ ++static inline bool sched_energy_enabled(void) ++{ ++ return static_branch_unlikely(&sched_energy_present); ++} ++ ++#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ ++ ++#define perf_domain_span(pd) NULL ++static inline bool sched_energy_enabled(void) { return false; } ++ ++#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ ++#endif /* MUQSS_SCHED_H */ +diff -Nur a/kernel/sched/sched.h b/kernel/sched/sched.h +--- a/kernel/sched/sched.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/sched.h 2019-07-07 09:17:41.261241813 +0100 +@@ -2,6 +2,19 @@ + /* + * Scheduler internal types and methods: + */ ++#ifdef CONFIG_SCHED_MUQSS ++#include "MuQSS.h" ++ ++/* Begin compatibility wrappers for MuQSS/CFS differences */ ++#define rq_rt_nr_running(rq) ((rq)->rt_nr_running) ++#define rq_h_nr_running(rq) ((rq)->nr_running) ++ ++#else /* CONFIG_SCHED_MUQSS */ ++ ++#define rq_rt_nr_running(rq) ((rq)->rt.rt_nr_running) ++#define rq_h_nr_running(rq) ((rq)->cfs.h_nr_running) ++ ++ + #include <linux/sched.h> + + #include <linux/sched/autogroup.h> +@@ -2341,3 +2354,30 @@ + static inline bool sched_energy_enabled(void) { return false; } + + #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ ++ ++/* MuQSS compatibility functions */ ++static inline bool softirq_pending(int cpu) ++{ ++ return false; ++} ++ ++#ifdef CONFIG_64BIT ++static inline u64 read_sum_exec_runtime(struct task_struct *t) ++{ ++ return t->se.sum_exec_runtime; ++} ++#else ++static inline u64 read_sum_exec_runtime(struct task_struct *t) ++{ ++ u64 ns; ++ struct rq_flags rf; ++ struct rq *rq; ++ ++ rq = task_rq_lock(t, &rf); ++ ns = t->se.sum_exec_runtime; ++ task_rq_unlock(rq, t, &rf); ++ ++ return ns; ++} ++#endif ++#endif /* CONFIG_SCHED_MUQSS */ +diff -Nur a/kernel/sched/topology.c b/kernel/sched/topology.c +--- a/kernel/sched/topology.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/sched/topology.c 2019-07-07 09:17:41.261241813 +0100 +@@ -442,7 +442,11 @@ + struct root_domain *old_rd = NULL; + unsigned long flags; + ++#ifdef CONFIG_SCHED_MUQSS ++ raw_spin_lock_irqsave(rq->lock, flags); ++#else + raw_spin_lock_irqsave(&rq->lock, flags); ++#endif + + if (rq->rd) { + old_rd = rq->rd; +@@ -468,7 +472,11 @@ + if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) + set_rq_online(rq); + ++#ifdef CONFIG_SCHED_MUQSS ++ raw_spin_unlock_irqrestore(rq->lock, flags); ++#else + raw_spin_unlock_irqrestore(&rq->lock, flags); ++#endif + + if (old_rd) + call_rcu(&old_rd->rcu, free_rootdomain); +diff -Nur a/kernel/skip_list.c b/kernel/skip_list.c +--- a/kernel/skip_list.c 1970-01-01 01:00:00.000000000 +0100 ++++ b/kernel/skip_list.c 2019-07-07 09:17:41.261241813 +0100 +@@ -0,0 +1,148 @@ ++/* ++ Copyright (C) 2011,2016 Con Kolivas. ++ ++ Code based on example originally by William Pugh. ++ ++Skip Lists are a probabilistic alternative to balanced trees, as ++described in the June 1990 issue of CACM and were invented by ++William Pugh in 1987. ++ ++A couple of comments about this implementation: ++The routine randomLevel has been hard-coded to generate random ++levels using p=0.25. It can be easily changed. ++ ++The insertion routine has been implemented so as to use the ++dirty hack described in the CACM paper: if a random level is ++generated that is more than the current maximum level, the ++current maximum level plus one is used instead. ++ ++Levels start at zero and go up to MaxLevel (which is equal to ++MaxNumberOfLevels-1). ++ ++The routines defined in this file are: ++ ++init: defines slnode ++ ++new_skiplist: returns a new, empty list ++ ++randomLevel: Returns a random level based on a u64 random seed passed to it. ++In MuQSS, the "niffy" time is used for this purpose. ++ ++insert(l,key, value): inserts the binding (key, value) into l. This operation ++occurs in O(log n) time. ++ ++delnode(slnode, l, node): deletes any binding of key from the l based on the ++actual node value. This operation occurs in O(k) time where k is the ++number of levels of the node in question (max 8). The original delete ++function occurred in O(log n) time and involved a search. ++ ++MuQSS Notes: In this implementation of skiplists, there are bidirectional ++next/prev pointers and the insert function returns a pointer to the actual ++node the value is stored. The key here is chosen by the scheduler so as to ++sort tasks according to the priority list requirements and is no longer used ++by the scheduler after insertion. The scheduler lookup, however, occurs in ++O(1) time because it is always the first item in the level 0 linked list. ++Since the task struct stores a copy of the node pointer upon skiplist_insert, ++it can also remove it much faster than the original implementation with the ++aid of prev<->next pointer manipulation and no searching. ++ ++*/ ++ ++#include <linux/slab.h> ++#include <linux/skip_list.h> ++ ++#define MaxNumberOfLevels 8 ++#define MaxLevel (MaxNumberOfLevels - 1) ++ ++void skiplist_init(skiplist_node *slnode) ++{ ++ int i; ++ ++ slnode->key = 0xFFFFFFFFFFFFFFFF; ++ slnode->level = 0; ++ slnode->value = NULL; ++ for (i = 0; i < MaxNumberOfLevels; i++) ++ slnode->next[i] = slnode->prev[i] = slnode; ++} ++ ++skiplist *new_skiplist(skiplist_node *slnode) ++{ ++ skiplist *l = kzalloc(sizeof(skiplist), GFP_ATOMIC); ++ ++ BUG_ON(!l); ++ l->header = slnode; ++ return l; ++} ++ ++void free_skiplist(skiplist *l) ++{ ++ skiplist_node *p, *q; ++ ++ p = l->header; ++ do { ++ q = p->next[0]; ++ p->next[0]->prev[0] = q->prev[0]; ++ skiplist_node_init(p); ++ p = q; ++ } while (p != l->header); ++ kfree(l); ++} ++ ++void skiplist_node_init(skiplist_node *node) ++{ ++ memset(node, 0, sizeof(skiplist_node)); ++} ++ ++static inline unsigned int randomLevel(const long unsigned int randseed) ++{ ++ return find_first_bit(&randseed, MaxLevel) / 2; ++} ++ ++void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed) ++{ ++ skiplist_node *update[MaxNumberOfLevels]; ++ skiplist_node *p, *q; ++ int k = l->level; ++ ++ p = l->header; ++ do { ++ while (q = p->next[k], q->key <= key) ++ p = q; ++ update[k] = p; ++ } while (--k >= 0); ++ ++ ++l->entries; ++ k = randomLevel(randseed); ++ if (k > l->level) { ++ k = ++l->level; ++ update[k] = l->header; ++ } ++ ++ node->level = k; ++ node->key = key; ++ node->value = value; ++ do { ++ p = update[k]; ++ node->next[k] = p->next[k]; ++ p->next[k] = node; ++ node->prev[k] = p; ++ node->next[k]->prev[k] = node; ++ } while (--k >= 0); ++} ++ ++void skiplist_delete(skiplist *l, skiplist_node *node) ++{ ++ int k, m = node->level; ++ ++ for (k = 0; k <= m; k++) { ++ node->prev[k]->next[k] = node->next[k]; ++ node->next[k]->prev[k] = node->prev[k]; ++ } ++ skiplist_node_init(node); ++ if (m == l->level) { ++ while (l->header->next[m] == l->header && l->header->prev[m] == l->header && m > 0) ++ m--; ++ l->level = m; ++ } ++ l->entries--; ++} +diff -Nur a/kernel/sysctl.c b/kernel/sysctl.c +--- a/kernel/sysctl.c 2019-07-07 09:08:19.152348621 +0100 ++++ b/kernel/sysctl.c 2019-07-07 09:23:47.863548280 +0100 +@@ -141,6 +141,12 @@ + static unsigned long long_max __read_only = LONG_MAX; + static int one_hundred __read_only = 100; + static int one_thousand __read_only = 1000; ++#ifdef CONFIG_SCHED_MUQSS ++extern int rr_interval; ++extern int sched_interactive; ++extern int sched_iso_cpu; ++extern int sched_yield_type; ++#endif + #ifdef CONFIG_PRINTK + static int ten_thousand __read_only = 10000; + #endif +@@ -316,7 +322,7 @@ + { } + }; + +-#ifdef CONFIG_SCHED_DEBUG ++#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_MUQSS) + static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */ + static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */ + static int min_wakeup_granularity_ns __read_only; /* 0 usecs */ +@@ -333,6 +339,7 @@ + #endif + + static struct ctl_table kern_table[] = { ++#ifndef CONFIG_SCHED_MUQSS + { + .procname = "sched_child_runs_first", + .data = &sysctl_sched_child_runs_first, +@@ -498,6 +505,7 @@ + .extra2 = &one, + }, + #endif ++#endif /* !CONFIG_SCHED_MUQSS */ + #ifdef CONFIG_PROVE_LOCKING + { + .procname = "prove_locking", +@@ -1110,6 +1118,44 @@ + .proc_handler = proc_dointvec, + }, + #endif ++#ifdef CONFIG_SCHED_MUQSS ++ { ++ .procname = "rr_interval", ++ .data = &rr_interval, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &one, ++ .extra2 = &one_thousand, ++ }, ++ { ++ .procname = "interactive", ++ .data = &sched_interactive, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &zero, ++ .extra2 = &one, ++ }, ++ { ++ .procname = "iso_cpu", ++ .data = &sched_iso_cpu, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &zero, ++ .extra2 = &one_hundred, ++ }, ++ { ++ .procname = "yield_type", ++ .data = &sched_yield_type, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &zero, ++ .extra2 = &two, ++ }, ++#endif + #if defined(CONFIG_S390) && defined(CONFIG_SMP) + { + .procname = "spin_retry", +diff -Nur a/kernel/time/clockevents.c b/kernel/time/clockevents.c +--- a/kernel/time/clockevents.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/time/clockevents.c 2019-07-07 09:17:41.261241813 +0100 +@@ -190,8 +190,13 @@ + + #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST + ++#ifdef CONFIG_SCHED_MUQSS ++/* Limit min_delta to 100us */ ++#define MIN_DELTA_LIMIT (NSEC_PER_SEC / 10000) ++#else + /* Limit min_delta to a jiffie */ + #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) ++#endif + + /** + * clockevents_increase_min_delta - raise minimum delta of a clock event device +diff -Nur a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c +--- a/kernel/time/posix-cpu-timers.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/time/posix-cpu-timers.c 2019-07-07 09:17:41.261241813 +0100 +@@ -829,7 +829,7 @@ + tsk_expires->virt_exp = expires; + + tsk_expires->sched_exp = check_timers_list(++timers, firing, +- tsk->se.sum_exec_runtime); ++ tsk_seruntime(tsk)); + + /* + * Check for the special case thread timers. +@@ -839,7 +839,7 @@ + unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); + + if (hard != RLIM_INFINITY && +- tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { ++ tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { + /* + * At the hard limit, we just die. + * No need to calculate anything else now. +@@ -851,7 +851,7 @@ + __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); + return; + } +- if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { ++ if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { + /* + * At the soft limit, send a SIGXCPU every second. + */ +@@ -1091,7 +1091,7 @@ + struct task_cputime task_sample; + + task_cputime(tsk, &task_sample.utime, &task_sample.stime); +- task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; ++ task_sample.sum_exec_runtime = tsk_seruntime(tsk); + if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) + return 1; + } +diff -Nur a/kernel/time/timer.c b/kernel/time/timer.c +--- a/kernel/time/timer.c 2019-07-07 09:08:19.152348621 +0100 ++++ b/kernel/time/timer.c 2019-07-07 09:17:41.271242152 +0100 +@@ -1478,7 +1478,7 @@ + * Check, if the next hrtimer event is before the next timer wheel + * event: + */ +-static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) ++static u64 cmp_next_hrtimer_event(struct timer_base *base, u64 basem, u64 expires) + { + u64 nextevt = hrtimer_get_next_event(); + +@@ -1496,6 +1496,9 @@ + if (nextevt <= basem) + return basem; + ++ if (nextevt < expires && nextevt - basem <= TICK_NSEC) ++ base->is_idle = false; ++ + /* + * Round up to the next jiffie. High resolution timers are + * off, so the hrtimers are expired in the tick and we need to +@@ -1565,7 +1568,7 @@ + } + raw_spin_unlock(&base->lock); + +- return cmp_next_hrtimer_event(basem, expires); ++ return cmp_next_hrtimer_event(base, basem, expires); + } + + /** +diff -Nur a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c +--- a/kernel/trace/trace_selftest.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/kernel/trace/trace_selftest.c 2019-07-07 09:17:41.271242152 +0100 +@@ -1045,10 +1045,15 @@ + { + /* Make this a -deadline thread */ + static const struct sched_attr attr = { ++#ifdef CONFIG_SCHED_MUQSS ++ /* No deadline on MuQSS, use RR */ ++ .sched_policy = SCHED_RR, ++#else + .sched_policy = SCHED_DEADLINE, + .sched_runtime = 100000ULL, + .sched_deadline = 10000000ULL, + .sched_period = 10000000ULL ++#endif + }; + struct wakeup_test_data *x = data; + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0002-Fix-Werror-build-failure-in-tools.patch b/sys-kernel/linux-sources-redcore/files/5.1-0002-Fix-Werror-build-failure-in-tools.patch new file mode 100644 index 00000000..c8c39888 --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0002-Fix-Werror-build-failure-in-tools.patch @@ -0,0 +1,25 @@ +From 89b8d55e743d382f463526832cf8b8a4f8cf32ff Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Sun, 18 Feb 2018 12:36:22 +1100 +Subject: [PATCH 02/16] Fix Werror build failure in tools. + +--- + tools/objtool/Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile +index 53f8be0f4a1f..ad2c11a881db 100644 +--- a/tools/objtool/Makefile ++++ b/tools/objtool/Makefile +@@ -34,7 +34,7 @@ INCLUDES := -I$(srctree)/tools/include \ + -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ + -I$(srctree)/tools/objtool/arch/$(ARCH)/include + WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed +-CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS) ++CFLAGS += $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS) + LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS) + + # Allow old libelf to be used: +-- +2.17.1 + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0003-Make-preemptible-kernel-default.patch b/sys-kernel/linux-sources-redcore/files/5.1-0003-Make-preemptible-kernel-default.patch new file mode 100644 index 00000000..ec621e09 --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0003-Make-preemptible-kernel-default.patch @@ -0,0 +1,4653 @@ +From 4caf76327e0d7e1c25b40dbbf7294cc80a2d167c Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Sat, 29 Oct 2016 11:20:37 +1100 +Subject: [PATCH 03/16] Make preemptible kernel default. + +Make full preempt default on all arches. +--- + arch/arc/configs/tb10x_defconfig | 2 +- + arch/arm/configs/bcm2835_defconfig | 2 +- + arch/arm/configs/imx_v6_v7_defconfig | 1 + + arch/arm/configs/mps2_defconfig | 2 +- + arch/arm/configs/mxs_defconfig | 7 +- + arch/blackfin/configs/BF518F-EZBRD_defconfig | 121 ++++ + arch/blackfin/configs/BF526-EZBRD_defconfig | 158 ++++++ + .../blackfin/configs/BF527-EZKIT-V2_defconfig | 188 +++++++ + arch/blackfin/configs/BF527-EZKIT_defconfig | 181 ++++++ + .../blackfin/configs/BF527-TLL6527M_defconfig | 178 ++++++ + arch/blackfin/configs/BF533-EZKIT_defconfig | 114 ++++ + arch/blackfin/configs/BF533-STAMP_defconfig | 124 +++++ + arch/blackfin/configs/BF537-STAMP_defconfig | 136 +++++ + arch/blackfin/configs/BF538-EZKIT_defconfig | 133 +++++ + arch/blackfin/configs/BF548-EZKIT_defconfig | 207 +++++++ + arch/blackfin/configs/BF561-ACVILON_defconfig | 149 +++++ + .../configs/BF561-EZKIT-SMP_defconfig | 112 ++++ + arch/blackfin/configs/BF561-EZKIT_defconfig | 114 ++++ + arch/blackfin/configs/BF609-EZKIT_defconfig | 154 +++++ + arch/blackfin/configs/BlackStamp_defconfig | 108 ++++ + arch/blackfin/configs/CM-BF527_defconfig | 129 +++++ + arch/blackfin/configs/PNAV-10_defconfig | 111 ++++ + arch/blackfin/configs/SRV1_defconfig | 88 +++ + arch/blackfin/configs/TCM-BF518_defconfig | 131 +++++ + arch/mips/configs/fuloong2e_defconfig | 2 +- + arch/mips/configs/gpr_defconfig | 2 +- + arch/mips/configs/ip22_defconfig | 2 +- + arch/mips/configs/ip28_defconfig | 2 +- + arch/mips/configs/jazz_defconfig | 2 +- + arch/mips/configs/mtx1_defconfig | 2 +- + arch/mips/configs/nlm_xlr_defconfig | 2 +- + arch/mips/configs/pic32mzda_defconfig | 2 +- + arch/mips/configs/pistachio_defconfig | 2 +- + arch/mips/configs/pnx8335_stb225_defconfig | 2 +- + arch/mips/configs/rm200_defconfig | 2 +- + arch/parisc/configs/712_defconfig | 2 +- + arch/parisc/configs/c3000_defconfig | 2 +- + arch/parisc/configs/default_defconfig | 2 +- + arch/powerpc/configs/c2k_defconfig | 389 +++++++++++++ + arch/powerpc/configs/ppc6xx_defconfig | 2 +- + arch/score/configs/spct6600_defconfig | 84 +++ + arch/sh/configs/se7712_defconfig | 2 +- + arch/sh/configs/se7721_defconfig | 2 +- + arch/sh/configs/titan_defconfig | 2 +- + arch/sparc/configs/sparc64_defconfig | 2 +- + arch/tile/configs/tilegx_defconfig | 411 ++++++++++++++ + arch/tile/configs/tilepro_defconfig | 524 ++++++++++++++++++ + arch/x86/configs/i386_defconfig | 2 +- + arch/x86/configs/x86_64_defconfig | 2 +- + kernel/Kconfig.preempt | 7 +- + 50 files changed, 4079 insertions(+), 28 deletions(-) + create mode 100644 arch/blackfin/configs/BF518F-EZBRD_defconfig + create mode 100644 arch/blackfin/configs/BF526-EZBRD_defconfig + create mode 100644 arch/blackfin/configs/BF527-EZKIT-V2_defconfig + create mode 100644 arch/blackfin/configs/BF527-EZKIT_defconfig + create mode 100644 arch/blackfin/configs/BF527-TLL6527M_defconfig + create mode 100644 arch/blackfin/configs/BF533-EZKIT_defconfig + create mode 100644 arch/blackfin/configs/BF533-STAMP_defconfig + create mode 100644 arch/blackfin/configs/BF537-STAMP_defconfig + create mode 100644 arch/blackfin/configs/BF538-EZKIT_defconfig + create mode 100644 arch/blackfin/configs/BF548-EZKIT_defconfig + create mode 100644 arch/blackfin/configs/BF561-ACVILON_defconfig + create mode 100644 arch/blackfin/configs/BF561-EZKIT-SMP_defconfig + create mode 100644 arch/blackfin/configs/BF561-EZKIT_defconfig + create mode 100644 arch/blackfin/configs/BF609-EZKIT_defconfig + create mode 100644 arch/blackfin/configs/BlackStamp_defconfig + create mode 100644 arch/blackfin/configs/CM-BF527_defconfig + create mode 100644 arch/blackfin/configs/PNAV-10_defconfig + create mode 100644 arch/blackfin/configs/SRV1_defconfig + create mode 100644 arch/blackfin/configs/TCM-BF518_defconfig + create mode 100644 arch/powerpc/configs/c2k_defconfig + create mode 100644 arch/score/configs/spct6600_defconfig + create mode 100644 arch/tile/configs/tilegx_defconfig + create mode 100644 arch/tile/configs/tilepro_defconfig + +diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig +index 5b5119d2b5d5..7425d2ec3a82 100644 +--- a/arch/arc/configs/tb10x_defconfig ++++ b/arch/arc/configs/tb10x_defconfig +@@ -29,7 +29,7 @@ CONFIG_ARC_PLAT_TB10X=y + CONFIG_ARC_CACHE_LINE_SHIFT=5 + CONFIG_HZ=250 + CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk" +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + # CONFIG_COMPACTION is not set + CONFIG_NET=y + CONFIG_PACKET=y +diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig +index dcf7610cfe55..d15cd13aa944 100644 +--- a/arch/arm/configs/bcm2835_defconfig ++++ b/arch/arm/configs/bcm2835_defconfig +@@ -29,7 +29,7 @@ CONFIG_MODULE_UNLOAD=y + CONFIG_ARCH_MULTI_V6=y + CONFIG_ARCH_BCM=y + CONFIG_ARCH_BCM2835=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_AEABI=y + CONFIG_KSM=y + CONFIG_CLEANCACHE=y +diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig +index 50fb01d70b10..5f2960f1853f 100644 +--- a/arch/arm/configs/imx_v6_v7_defconfig ++++ b/arch/arm/configs/imx_v6_v7_defconfig +@@ -45,6 +45,7 @@ CONFIG_PCI_MSI=y + CONFIG_PCI_IMX6=y + CONFIG_SMP=y + CONFIG_ARM_PSCI=y ++CONFIG_PREEMPT=y + CONFIG_HIGHMEM=y + CONFIG_FORCE_MAX_ZONEORDER=14 + CONFIG_CMDLINE="noinitrd console=ttymxc0,115200" +diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig +index 1d923dbb9928..9c1931f1fafd 100644 +--- a/arch/arm/configs/mps2_defconfig ++++ b/arch/arm/configs/mps2_defconfig +@@ -18,7 +18,7 @@ CONFIG_ARCH_MPS2=y + CONFIG_SET_MEM_PARAM=y + CONFIG_DRAM_BASE=0x21000000 + CONFIG_DRAM_SIZE=0x1000000 +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + # CONFIG_ATAGS is not set + CONFIG_ZBOOT_ROM_TEXT=0x0 + CONFIG_ZBOOT_ROM_BSS=0x0 +diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig +index 38480596c449..d509ff66f73a 100644 +--- a/arch/arm/configs/mxs_defconfig ++++ b/arch/arm/configs/mxs_defconfig +@@ -1,7 +1,7 @@ + CONFIG_SYSVIPC=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT_VOLUNTARY=n + CONFIG_TASKSTATS=y + CONFIG_TASK_DELAY_ACCT=y + CONFIG_TASK_XACCT=y +@@ -27,6 +27,11 @@ CONFIG_MODVERSIONS=y + CONFIG_BLK_DEV_INTEGRITY=y + # CONFIG_IOSCHED_DEADLINE is not set + # CONFIG_IOSCHED_CFQ is not set ++# CONFIG_ARCH_MULTI_V7 is not set ++CONFIG_ARCH_MXS=y ++# CONFIG_ARM_THUMB is not set ++CONFIG_PREEMPT=y ++CONFIG_AEABI=y + CONFIG_NET=y + CONFIG_PACKET=y + CONFIG_UNIX=y +diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig +new file mode 100644 +index 000000000000..39b91dfa55b5 +--- /dev/null ++++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig +@@ -0,0 +1,121 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF518=y ++CONFIG_IRQ_TIMER0=12 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_JEDECPROBE=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++CONFIG_NET_BFIN=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_USB_SUPPORT is not set ++CONFIG_MMC=y ++CONFIG_SDH_BFIN=y ++CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=m ++# CONFIG_DNOTIFY is not set ++CONFIG_VFAT_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRC_CCITT=m +diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig +new file mode 100644 +index 000000000000..675cadb3a0c4 +--- /dev/null ++++ b/arch/blackfin/configs/BF526-EZBRD_defconfig +@@ -0,0 +1,158 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF526=y ++CONFIG_IRQ_TIMER0=12 ++CONFIG_BFIN526_EZBRD=y ++CONFIG_IRQ_USB_INT0=11 ++CONFIG_IRQ_USB_INT1=11 ++CONFIG_IRQ_USB_INT2=11 ++CONFIG_IRQ_USB_DMA=11 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_PHYSMAP=y ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_NAND=m ++CONFIG_MTD_SPI_NOR=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_SCSI=y ++# CONFIG_SCSI_PROC_FS is not set ++CONFIG_BLK_DEV_SD=y ++CONFIG_BLK_DEV_SR=m ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_BFIN=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT_FF_MEMLESS=m ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_HID_A4TECH=y ++CONFIG_HID_APPLE=y ++CONFIG_HID_BELKIN=y ++CONFIG_HID_CHERRY=y ++CONFIG_HID_CHICONY=y ++CONFIG_HID_CYPRESS=y ++CONFIG_HID_EZKEY=y ++CONFIG_HID_GYRATION=y ++CONFIG_HID_LOGITECH=y ++CONFIG_HID_MICROSOFT=y ++CONFIG_HID_MONTEREY=y ++CONFIG_HID_PANTHERLORD=y ++CONFIG_HID_PETALYNX=y ++CONFIG_HID_SAMSUNG=y ++CONFIG_HID_SONY=y ++CONFIG_HID_SUNPLUS=y ++CONFIG_USB=y ++# CONFIG_USB_DEVICE_CLASS is not set ++CONFIG_USB_OTG_BLACKLIST_HUB=y ++CONFIG_USB_MON=y ++CONFIG_USB_STORAGE=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=m ++# CONFIG_DNOTIFY is not set ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_VFAT_FS=m ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRC_CCITT=m +diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig +new file mode 100644 +index 000000000000..4c517c443af5 +--- /dev/null ++++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig +@@ -0,0 +1,188 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF527=y ++CONFIG_BF_REV_0_2=y ++CONFIG_BFIN527_EZKIT_V2=y ++CONFIG_IRQ_USB_INT0=11 ++CONFIG_IRQ_USB_INT1=11 ++CONFIG_IRQ_USB_INT2=11 ++CONFIG_IRQ_USB_DMA=11 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++CONFIG_BFIN_SIR0=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_JEDECPROBE=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_NAND=m ++CONFIG_MTD_SPI_NOR=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_SCSI=y ++# CONFIG_SCSI_PROC_FS is not set ++CONFIG_BLK_DEV_SD=y ++CONFIG_BLK_DEV_SR=m ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_BFIN=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT_FF_MEMLESS=m ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=y ++CONFIG_KEYBOARD_ADP5520=y ++# CONFIG_KEYBOARD_ATKBD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_AD7879=y ++CONFIG_TOUCHSCREEN_AD7879_I2C=y ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_PMIC_ADP5520=y ++CONFIG_FB=y ++CONFIG_FB_BFIN_LQ035Q1=y ++CONFIG_BACKLIGHT_LCD_SUPPORT=y ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_LOGO=y ++# CONFIG_LOGO_LINUX_MONO is not set ++# CONFIG_LOGO_LINUX_VGA16 is not set ++# CONFIG_LOGO_LINUX_CLUT224 is not set ++# CONFIG_LOGO_BLACKFIN_VGA16 is not set ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_SOC=y ++CONFIG_SND_BF5XX_I2S=y ++CONFIG_SND_BF5XX_SOC_SSM2602=y ++CONFIG_HID_A4TECH=y ++CONFIG_HID_APPLE=y ++CONFIG_HID_BELKIN=y ++CONFIG_HID_CHERRY=y ++CONFIG_HID_CHICONY=y ++CONFIG_HID_CYPRESS=y ++CONFIG_HID_EZKEY=y ++CONFIG_HID_GYRATION=y ++CONFIG_HID_LOGITECH=y ++CONFIG_HID_MICROSOFT=y ++CONFIG_HID_MONTEREY=y ++CONFIG_HID_PANTHERLORD=y ++CONFIG_HID_PETALYNX=y ++CONFIG_HID_SAMSUNG=y ++CONFIG_HID_SONY=y ++CONFIG_HID_SUNPLUS=y ++CONFIG_USB=y ++# CONFIG_USB_DEVICE_CLASS is not set ++CONFIG_USB_OTG_BLACKLIST_HUB=y ++CONFIG_USB_MON=y ++CONFIG_USB_MUSB_HDRC=y ++CONFIG_USB_MUSB_BLACKFIN=y ++CONFIG_USB_STORAGE=y ++CONFIG_USB_GADGET=y ++CONFIG_NEW_LEDS=y ++CONFIG_LEDS_CLASS=y ++CONFIG_LEDS_ADP5520=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=m ++# CONFIG_DNOTIFY is not set ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_UDF_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig +new file mode 100644 +index 000000000000..bf8df3e6cf02 +--- /dev/null ++++ b/arch/blackfin/configs/BF527-EZKIT_defconfig +@@ -0,0 +1,181 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF527=y ++CONFIG_BF_REV_0_1=y ++CONFIG_IRQ_USB_INT0=11 ++CONFIG_IRQ_USB_INT1=11 ++CONFIG_IRQ_USB_INT2=11 ++CONFIG_IRQ_USB_DMA=11 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++CONFIG_BFIN_SIR0=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_JEDECPROBE=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_NAND=m ++CONFIG_MTD_SPI_NOR=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_SCSI=y ++# CONFIG_SCSI_PROC_FS is not set ++CONFIG_BLK_DEV_SD=y ++CONFIG_BLK_DEV_SR=m ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_BFIN=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT_FF_MEMLESS=m ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_FB=y ++CONFIG_FB_BFIN_T350MCQB=y ++CONFIG_BACKLIGHT_LCD_SUPPORT=y ++CONFIG_LCD_LTV350QV=m ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_LOGO=y ++# CONFIG_LOGO_LINUX_MONO is not set ++# CONFIG_LOGO_LINUX_VGA16 is not set ++# CONFIG_LOGO_LINUX_CLUT224 is not set ++# CONFIG_LOGO_BLACKFIN_VGA16 is not set ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_SOC=y ++CONFIG_SND_BF5XX_I2S=y ++CONFIG_SND_BF5XX_SOC_SSM2602=y ++CONFIG_HID_A4TECH=y ++CONFIG_HID_APPLE=y ++CONFIG_HID_BELKIN=y ++CONFIG_HID_CHERRY=y ++CONFIG_HID_CHICONY=y ++CONFIG_HID_CYPRESS=y ++CONFIG_HID_EZKEY=y ++CONFIG_HID_GYRATION=y ++CONFIG_HID_LOGITECH=y ++CONFIG_HID_MICROSOFT=y ++CONFIG_HID_MONTEREY=y ++CONFIG_HID_PANTHERLORD=y ++CONFIG_HID_PETALYNX=y ++CONFIG_HID_SAMSUNG=y ++CONFIG_HID_SONY=y ++CONFIG_HID_SUNPLUS=y ++CONFIG_USB=y ++# CONFIG_USB_DEVICE_CLASS is not set ++CONFIG_USB_OTG_BLACKLIST_HUB=y ++CONFIG_USB_MON=y ++CONFIG_USB_MUSB_HDRC=y ++CONFIG_MUSB_PIO_ONLY=y ++CONFIG_USB_MUSB_BLACKFIN=y ++CONFIG_MUSB_PIO_ONLY=y ++CONFIG_USB_STORAGE=y ++CONFIG_USB_GADGET=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=m ++# CONFIG_DNOTIFY is not set ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_UDF_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF527-TLL6527M_defconfig b/arch/blackfin/configs/BF527-TLL6527M_defconfig +new file mode 100644 +index 000000000000..0220b3b15c53 +--- /dev/null ++++ b/arch/blackfin/configs/BF527-TLL6527M_defconfig +@@ -0,0 +1,178 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_LOCALVERSION="DEV_0-1_pre2010" ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_PREEMPT=y ++CONFIG_BF527=y ++CONFIG_BF_REV_0_2=y ++CONFIG_BFIN527_TLL6527M=y ++CONFIG_BF527_UART1_PORTG=y ++CONFIG_IRQ_USB_INT0=11 ++CONFIG_IRQ_USB_INT1=11 ++CONFIG_IRQ_USB_INT2=11 ++CONFIG_IRQ_USB_DMA=11 ++CONFIG_BOOT_LOAD=0x400000 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=y ++CONFIG_DMA_UNCACHED_2M=y ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_0=0xFFC2 ++CONFIG_BANK_1=0xFFC2 ++CONFIG_BANK_2=0xFFC2 ++CONFIG_BANK_3=0xFFC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++CONFIG_BFIN_SIR0=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=y ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_GPIO_ADDR=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_SCSI=y ++# CONFIG_SCSI_PROC_FS is not set ++CONFIG_BLK_DEV_SD=y ++CONFIG_BLK_DEV_SR=m ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_ETHERNET=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_AD7879=m ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_AD714X=y ++CONFIG_INPUT_ADXL34X=y ++# CONFIG_SERIO is not set ++CONFIG_BFIN_PPI=m ++CONFIG_BFIN_SIMPLE_TIMER=m ++CONFIG_BFIN_SPORT=m ++# CONFIG_CONSOLE_TRANSLATIONS is not set ++# CONFIG_DEVKMEM is not set ++CONFIG_BFIN_JTAG_COMM=m ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C_CHARDEV=y ++# CONFIG_I2C_HELPER_AUTO is not set ++CONFIG_I2C_SMBUS=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_MEDIA_SUPPORT=y ++CONFIG_VIDEO_DEV=y ++# CONFIG_MEDIA_TUNER_CUSTOMISE is not set ++CONFIG_VIDEO_HELPER_CHIPS_AUTO=y ++CONFIG_VIDEO_BLACKFIN_CAM=m ++CONFIG_OV9655=y ++CONFIG_FB=y ++CONFIG_BACKLIGHT_LCD_SUPPORT=y ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_FONTS=y ++CONFIG_FONT_6x11=y ++CONFIG_LOGO=y ++# CONFIG_LOGO_LINUX_MONO is not set ++# CONFIG_LOGO_LINUX_VGA16 is not set ++# CONFIG_LOGO_LINUX_CLUT224 is not set ++# CONFIG_LOGO_BLACKFIN_VGA16 is not set ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_MIXER_OSS=y ++CONFIG_SND_PCM_OSS=y ++CONFIG_SND_SOC=y ++CONFIG_SND_BF5XX_I2S=y ++CONFIG_SND_BF5XX_SOC_SSM2602=y ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++CONFIG_MMC=m ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=y ++# CONFIG_DNOTIFY is not set ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_UDF_FS=m ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_JFFS2_FS=y ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++# CONFIG_RPCSEC_GSS_KRB5 is not set ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRC7=m +diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig +new file mode 100644 +index 000000000000..6023e3fd2c48 +--- /dev/null ++++ b/arch/blackfin/configs/BF533-EZKIT_defconfig +@@ -0,0 +1,114 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BFIN533_EZKIT=y ++CONFIG_TIMER0=11 ++CONFIG_CLKIN_HZ=27000000 ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0xAAC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_JEDECPROBE=y ++CONFIG_MTD_CFI_AMDSTD=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=y ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_PHYSMAP=y ++CONFIG_MTD_PLATRAM=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++CONFIG_SMC91X=y ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT=m ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_USB_SUPPORT is not set ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig +new file mode 100644 +index 000000000000..f5cd0f18b711 +--- /dev/null ++++ b/arch/blackfin/configs/BF533-STAMP_defconfig +@@ -0,0 +1,124 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_TIMER0=11 ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0xAAC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=m ++CONFIG_MTD_CFI_AMDSTD=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++CONFIG_SMC91X=y ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=m ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_GPIO=m ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_FB=m ++CONFIG_FIRMWARE_EDID=y ++CONFIG_SOUND=m ++CONFIG_SND=m ++CONFIG_SND_MIXER_OSS=m ++CONFIG_SND_PCM_OSS=m ++CONFIG_SND_SOC=m ++CONFIG_SND_BF5XX_I2S=m ++CONFIG_SND_BF5XX_SOC_AD73311=m ++# CONFIG_USB_SUPPORT is not set ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig +new file mode 100644 +index 000000000000..48085fde7f9e +--- /dev/null ++++ b/arch/blackfin/configs/BF537-STAMP_defconfig +@@ -0,0 +1,136 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF537=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_CAN=m ++CONFIG_CAN_RAW=m ++CONFIG_CAN_BCM=m ++CONFIG_CAN_BFIN=m ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++CONFIG_BFIN_SIR1=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=m ++CONFIG_MTD_CFI_AMDSTD=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_PHYSMAP=m ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_SPI_NOR=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++CONFIG_NET_BFIN=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=m ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_BLACKFIN_TWI=m ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_FB=m ++CONFIG_FIRMWARE_EDID=y ++CONFIG_BACKLIGHT_LCD_SUPPORT=y ++CONFIG_SOUND=m ++CONFIG_SND=m ++CONFIG_SND_MIXER_OSS=m ++CONFIG_SND_PCM_OSS=m ++CONFIG_SND_SOC=m ++CONFIG_SND_BF5XX_I2S=m ++CONFIG_SND_BF5XX_SOC_AD73311=m ++# CONFIG_USB_SUPPORT is not set ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig +new file mode 100644 +index 000000000000..12deeaaef3cb +--- /dev/null ++++ b/arch/blackfin/configs/BF538-EZKIT_defconfig +@@ -0,0 +1,133 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF538=y ++CONFIG_IRQ_TIMER0=12 ++CONFIG_IRQ_TIMER1=12 ++CONFIG_IRQ_TIMER2=12 ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_PM=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_CAN=m ++CONFIG_CAN_RAW=m ++CONFIG_CAN_BCM=m ++CONFIG_CAN_DEV=m ++CONFIG_CAN_BFIN=m ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=m ++CONFIG_MTD_CFI_AMDSTD=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_PHYSMAP=m ++CONFIG_MTD_NAND=m ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++CONFIG_PHYLIB=y ++CONFIG_SMSC_PHY=y ++CONFIG_NET_ETHERNET=y ++CONFIG_SMC91X=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_AD7879=y ++CONFIG_TOUCHSCREEN_AD7879_SPI=y ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_DEVKMEM is not set ++CONFIG_BFIN_JTAG_COMM=m ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++CONFIG_SERIAL_BFIN_UART1=y ++CONFIG_SERIAL_BFIN_UART2=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=m ++CONFIG_I2C_BLACKFIN_TWI=m ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_FB=m ++CONFIG_FB_BFIN_LQ035Q1=m ++# CONFIG_USB_SUPPORT is not set ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_SMB_FS=m ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig +new file mode 100644 +index 000000000000..6a68ffc55b5a +--- /dev/null ++++ b/arch/blackfin/configs/BF548-EZKIT_defconfig +@@ -0,0 +1,207 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF548_std=y ++CONFIG_IRQ_TIMER0=11 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_CACHELINE_ALIGNED_L1=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_DMA_UNCACHED_2M=y ++CONFIG_BFIN_EXTMEM_WRITETHROUGH=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_EBIU_MBSCTLVAL=0x0 ++CONFIG_EBIU_MODEVAL=0x1 ++CONFIG_EBIU_FCTLVAL=0x6 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_CAN=m ++CONFIG_CAN_RAW=m ++CONFIG_CAN_BCM=m ++CONFIG_CAN_BFIN=m ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRTTY_SIR=m ++CONFIG_BFIN_SIR=m ++CONFIG_BFIN_SIR3=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_FW_LOADER=m ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_PHYSMAP=y ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_NAND=y ++CONFIG_MTD_NAND_BF5XX=y ++# CONFIG_MTD_NAND_BF5XX_HWECC is not set ++CONFIG_MTD_SPI_NOR=y ++CONFIG_BLK_DEV_RAM=y ++# CONFIG_SCSI_PROC_FS is not set ++CONFIG_BLK_DEV_SD=y ++CONFIG_BLK_DEV_SR=m ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_ATA=y ++# CONFIG_SATA_PMP is not set ++CONFIG_PATA_BF54X=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++CONFIG_SMSC911X=y ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT_FF_MEMLESS=m ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++CONFIG_INPUT_EVBUG=m ++# CONFIG_KEYBOARD_ATKBD is not set ++CONFIG_KEYBOARD_BFIN=y ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_AD7877=m ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_FB=y ++CONFIG_FIRMWARE_EDID=y ++CONFIG_FB_BF54X_LQ043=y ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_FONTS=y ++CONFIG_FONT_6x11=y ++CONFIG_LOGO=y ++# CONFIG_LOGO_LINUX_MONO is not set ++# CONFIG_LOGO_LINUX_VGA16 is not set ++# CONFIG_LOGO_LINUX_CLUT224 is not set ++# CONFIG_LOGO_BLACKFIN_VGA16 is not set ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_MIXER_OSS=y ++CONFIG_SND_PCM_OSS=y ++CONFIG_SND_SOC=y ++CONFIG_SND_BF5XX_AC97=y ++CONFIG_SND_BF5XX_SOC_AD1980=y ++CONFIG_HID_A4TECH=y ++CONFIG_HID_APPLE=y ++CONFIG_HID_BELKIN=y ++CONFIG_HID_CHERRY=y ++CONFIG_HID_CHICONY=y ++CONFIG_HID_CYPRESS=y ++CONFIG_HID_EZKEY=y ++CONFIG_HID_GYRATION=y ++CONFIG_HID_LOGITECH=y ++CONFIG_HID_MICROSOFT=y ++CONFIG_HID_MONTEREY=y ++CONFIG_HID_PANTHERLORD=y ++CONFIG_HID_PETALYNX=y ++CONFIG_HID_SAMSUNG=y ++CONFIG_HID_SONY=y ++CONFIG_HID_SUNPLUS=y ++CONFIG_USB=y ++# CONFIG_USB_DEVICE_CLASS is not set ++CONFIG_USB_OTG_BLACKLIST_HUB=y ++CONFIG_USB_MON=y ++CONFIG_USB_MUSB_HDRC=y ++CONFIG_USB_MUSB_BLACKFIN=y ++CONFIG_USB_STORAGE=y ++CONFIG_USB_GADGET=y ++CONFIG_MMC=y ++CONFIG_MMC_BLOCK=m ++CONFIG_SDH_BFIN=y ++CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++# CONFIG_DNOTIFY is not set ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_ZISOFS=y ++CONFIG_MSDOS_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_NTFS_FS=m ++CONFIG_NTFS_RW=y ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_NFSD=m ++CONFIG_NFSD_V3=y ++CONFIG_CIFS=y ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig +new file mode 100644 +index 000000000000..e9f3ba783a4e +--- /dev/null ++++ b/arch/blackfin/configs/BF561-ACVILON_defconfig +@@ -0,0 +1,149 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_SYSFS_DEPRECATED_V2=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_PREEMPT=y ++CONFIG_BF561=y ++CONFIG_BF_REV_0_5=y ++CONFIG_IRQ_TIMER0=10 ++CONFIG_BFIN561_ACVILON=y ++# CONFIG_BF561_COREB is not set ++CONFIG_CLKIN_HZ=12000000 ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=y ++CONFIG_DMA_UNCACHED_4M=y ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_0=0x99b2 ++CONFIG_BANK_1=0x3350 ++CONFIG_BANK_3=0xAAC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++CONFIG_SYN_COOKIES=y ++# CONFIG_INET_LRO is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_PLATRAM=y ++CONFIG_MTD_PHRAM=y ++CONFIG_MTD_BLOCK2MTD=y ++CONFIG_MTD_NAND=y ++CONFIG_MTD_NAND_PLATFORM=y ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_COUNT=2 ++CONFIG_BLK_DEV_RAM_SIZE=16384 ++CONFIG_SCSI=y ++# CONFIG_SCSI_PROC_FS is not set ++CONFIG_BLK_DEV_SD=y ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_ETHERNET=y ++CONFIG_SMSC911X=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_PIO=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_PCA_PLATFORM=y ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_SPI_SPIDEV=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_GPIO_PCF857X=y ++CONFIG_SENSORS_LM75=y ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_MIXER_OSS=y ++CONFIG_SND_PCM_OSS=y ++# CONFIG_SND_DRIVERS is not set ++# CONFIG_SND_USB is not set ++CONFIG_SND_SOC=y ++CONFIG_SND_BF5XX_I2S=y ++CONFIG_SND_BF5XX_SPORT_NUM=1 ++CONFIG_USB=y ++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y ++# CONFIG_USB_DEVICE_CLASS is not set ++CONFIG_USB_MON=y ++CONFIG_USB_STORAGE=y ++CONFIG_USB_SERIAL=y ++CONFIG_USB_SERIAL_FTDI_SIO=y ++CONFIG_USB_SERIAL_PL2303=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_DS1307=y ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++CONFIG_EXT2_FS_POSIX_ACL=y ++CONFIG_EXT2_FS_SECURITY=y ++# CONFIG_DNOTIFY is not set ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_FAT_DEFAULT_CODEPAGE=866 ++CONFIG_FAT_DEFAULT_IOCHARSET="cp1251" ++CONFIG_NTFS_FS=y ++CONFIG_CONFIGFS_FS=y ++CONFIG_JFFS2_FS=y ++CONFIG_JFFS2_COMPRESSION_OPTIONS=y ++# CONFIG_JFFS2_ZLIB is not set ++CONFIG_JFFS2_LZO=y ++# CONFIG_JFFS2_RTIME is not set ++CONFIG_JFFS2_CMODE_FAVOURLZO=y ++CONFIG_CRAMFS=y ++CONFIG_MINIX_FS=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++CONFIG_ROOT_NFS=y ++CONFIG_NLS_DEFAULT="cp1251" ++CONFIG_NLS_CODEPAGE_866=y ++CONFIG_NLS_CODEPAGE_1251=y ++CONFIG_NLS_KOI8_R=y ++CONFIG_NLS_UTF8=y ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++# CONFIG_DEBUG_BUGVERBOSE is not set ++CONFIG_DEBUG_INFO=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set ++CONFIG_CPLB_INFO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig +new file mode 100644 +index 000000000000..89b75a6c3fab +--- /dev/null ++++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig +@@ -0,0 +1,112 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF561=y ++CONFIG_SMP=y ++CONFIG_IRQ_TIMER0=10 ++CONFIG_CLKIN_HZ=30000000 ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0xAAC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_AMDSTD=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_PHYSMAP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++CONFIG_SMC91X=y ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT=m ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_USB_SUPPORT is not set ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig +new file mode 100644 +index 000000000000..67b3d2f419ba +--- /dev/null ++++ b/arch/blackfin/configs/BF561-EZKIT_defconfig +@@ -0,0 +1,114 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF561=y ++CONFIG_IRQ_TIMER0=10 ++CONFIG_CLKIN_HZ=30000000 ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_BFIN_EXTMEM_WRITETHROUGH=y ++CONFIG_BFIN_L2_DCACHEABLE=y ++CONFIG_BFIN_L2_WRITETHROUGH=y ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0xAAC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_AMDSTD=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_PHYSMAP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++CONFIG_SMC91X=y ++# CONFIG_NET_VENDOR_STMICRO is not set ++# CONFIG_WLAN is not set ++CONFIG_INPUT=m ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_JTAG_COMM=m ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_USB_SUPPORT is not set ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set +diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig +new file mode 100644 +index 000000000000..8cc75d4218fb +--- /dev/null ++++ b/arch/blackfin/configs/BF609-EZKIT_defconfig +@@ -0,0 +1,154 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF609=y ++CONFIG_PINT1_ASSIGN=0x01010000 ++CONFIG_PINT2_ASSIGN=0x07000101 ++CONFIG_PINT3_ASSIGN=0x02020303 ++CONFIG_IP_CHECKSUM_L1=y ++CONFIG_SYSCALL_TAB_L1=y ++CONFIG_CPLB_SWITCH_TAB_L1=y ++# CONFIG_APP_STACK_L1 is not set ++# CONFIG_BFIN_INS_LOWOVERHEAD is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_PM_BFIN_WAKE_PE12=y ++CONFIG_PM_BFIN_WAKE_PE12_POL=1 ++CONFIG_CPU_FREQ=y ++CONFIG_CPU_FREQ_GOV_POWERSAVE=y ++CONFIG_CPU_FREQ_GOV_ONDEMAND=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++CONFIG_IP_PNP_DHCP=y ++CONFIG_IP_PNP_BOOTP=y ++CONFIG_IP_PNP_RARP=y ++# CONFIG_IPV6 is not set ++CONFIG_NETFILTER=y ++CONFIG_CAN=y ++CONFIG_CAN_BFIN=y ++CONFIG_IRDA=y ++CONFIG_IRTTY_SIR=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_FW_LOADER=m ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_CFI_STAA=y ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_PHYSMAP=y ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_SPI_NOR=y ++CONFIG_MTD_UBI=m ++CONFIG_SCSI=y ++CONFIG_BLK_DEV_SD=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_VENDOR_BROADCOM is not set ++# CONFIG_NET_VENDOR_CHELSIO is not set ++# CONFIG_NET_VENDOR_INTEL is not set ++# CONFIG_NET_VENDOR_MARVELL is not set ++# CONFIG_NET_VENDOR_MICREL is not set ++# CONFIG_NET_VENDOR_MICROCHIP is not set ++# CONFIG_NET_VENDOR_NATSEMI is not set ++# CONFIG_NET_VENDOR_SEEQ is not set ++# CONFIG_NET_VENDOR_SMSC is not set ++CONFIG_STMMAC_ETH=y ++CONFIG_STMMAC_IEEE1588=y ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_BFIN_ROTARY=y ++# CONFIG_SERIO is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_BFIN_SIMPLE_TIMER=m ++# CONFIG_BFIN_CRC is not set ++CONFIG_BFIN_LINKPORT=y ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_ADI_V3=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_PINCTRL_MCP23S08=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_SOUND=m ++CONFIG_SND=m ++CONFIG_SND_MIXER_OSS=m ++CONFIG_SND_PCM_OSS=m ++# CONFIG_SND_DRIVERS is not set ++# CONFIG_SND_SPI is not set ++# CONFIG_SND_USB is not set ++CONFIG_SND_SOC=m ++CONFIG_USB=y ++CONFIG_USB_MUSB_HDRC=y ++CONFIG_USB_MUSB_BLACKFIN=m ++CONFIG_USB_STORAGE=y ++CONFIG_USB_GADGET=y ++CONFIG_USB_GADGET_MUSB_HDRC=y ++CONFIG_USB_ZERO=y ++CONFIG_MMC=y ++CONFIG_SDH_BFIN=y ++# CONFIG_IOMMU_SUPPORT is not set ++CONFIG_EXT2_FS=y ++# CONFIG_DNOTIFY is not set ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_JFFS2_FS=m ++CONFIG_UBIFS_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_DEBUG_FS=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++CONFIG_FRAME_POINTER=y ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_BFIN_PSEUDODBG_INSNS=y ++CONFIG_CRYPTO_HMAC=m ++CONFIG_CRYPTO_MD4=m ++CONFIG_CRYPTO_MD5=m ++CONFIG_CRYPTO_ARC4=m ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRYPTO_DEV_BFIN_CRC=m +diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig +new file mode 100644 +index 000000000000..9faf0ec7007f +--- /dev/null ++++ b/arch/blackfin/configs/BlackStamp_defconfig +@@ -0,0 +1,108 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_SYSFS_DEPRECATED_V2=y ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODULE_FORCE_UNLOAD=y ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_PREEMPT=y ++CONFIG_BF532=y ++CONFIG_BF_REV_0_5=y ++CONFIG_BLACKSTAMP=y ++CONFIG_TIMER0=11 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_ROMKERNEL=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=y ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0xAAC2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_BINFMT_SHARED_FLAT=y ++CONFIG_PM=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_LRO is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=m ++CONFIG_MTD_CFI_AMDSTD=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_M25P80=y ++CONFIG_MTD_SPI_NOR=y ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_NBD=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_MISC_DEVICES=y ++CONFIG_EEPROM_AT25=y ++CONFIG_NETDEVICES=y ++CONFIG_NET_ETHERNET=y ++CONFIG_SMC91X=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_HW_RANDOM=y ++CONFIG_I2C=m ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_GPIO=m ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_SPI_SPIDEV=m ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_USB_SUPPORT is not set ++CONFIG_MMC=y ++CONFIG_MMC_SPI=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++# CONFIG_DNOTIFY is not set ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_JFFS2_FS=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++CONFIG_NFS_V4=y ++CONFIG_SMB_FS=y ++CONFIG_CIFS=y ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_UTF8=y ++CONFIG_SYSCTL_SYSCALL_CHECK=y ++CONFIG_DEBUG_MMRS=y ++# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_CRC_CCITT=m +diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig +new file mode 100644 +index 000000000000..4a1ad4fd7bb2 +--- /dev/null ++++ b/arch/blackfin/configs/CM-BF527_defconfig +@@ -0,0 +1,129 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_KERNEL_LZMA=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_RD_GZIP is not set ++CONFIG_RD_LZMA=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_PREEMPT=y ++CONFIG_BF527=y ++CONFIG_BF_REV_0_1=y ++CONFIG_IRQ_TIMER0=12 ++CONFIG_BFIN527_BLUETECHNIX_CM=y ++CONFIG_IRQ_USB_INT0=11 ++CONFIG_IRQ_USB_INT1=11 ++CONFIG_IRQ_USB_INT2=11 ++CONFIG_IRQ_USB_DMA=11 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=y ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0xFFC0 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_GPIO_ADDR=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_SCSI=y ++CONFIG_BLK_DEV_SD=y ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_NETDEVICES=y ++CONFIG_NET_ETHERNET=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_BLACKFIN_TWI=m ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++CONFIG_USB=m ++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y ++# CONFIG_USB_DEVICE_CLASS is not set ++CONFIG_USB_OTG_BLACKLIST_HUB=y ++CONFIG_USB_MON=m ++CONFIG_USB_MUSB_HDRC=m ++CONFIG_USB_MUSB_PERIPHERAL=y ++CONFIG_USB_GADGET_MUSB_HDRC=y ++CONFIG_MUSB_PIO_ONLY=y ++CONFIG_USB_STORAGE=m ++CONFIG_USB_GADGET=m ++CONFIG_USB_ETH=m ++CONFIG_USB_MASS_STORAGE=m ++CONFIG_USB_G_SERIAL=m ++CONFIG_USB_G_PRINTER=m ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++# CONFIG_DNOTIFY is not set ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_JFFS2_FS=y ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_SMB_FS=m ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_DEBUG_FS=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set ++CONFIG_EARLY_PRINTK=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRC_CCITT=m ++CONFIG_CRC_ITU_T=y ++CONFIG_CRC7=y +diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig +new file mode 100644 +index 000000000000..9d787e28bbe8 +--- /dev/null ++++ b/arch/blackfin/configs/PNAV-10_defconfig +@@ -0,0 +1,111 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_LOG_BUF_SHIFT=14 ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF537=y ++CONFIG_IRQ_TIMER0=12 ++CONFIG_PNAV10=y ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++CONFIG_IP_CHECKSUM_L1=y ++CONFIG_SYSCALL_TAB_L1=y ++CONFIG_CPLB_SWITCH_TAB_L1=y ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=y ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_1=0x33B0 ++CONFIG_BANK_2=0x33B0 ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_UCLINUX=y ++CONFIG_MTD_NAND=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++CONFIG_NET_ETHERNET=y ++CONFIG_BFIN_MAC=y ++# CONFIG_BFIN_MAC_USE_L1 is not set ++CONFIG_BFIN_TX_DESC_NUM=100 ++CONFIG_BFIN_RX_DESC_NUM=100 ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_AD7877=y ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_UINPUT=y ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++CONFIG_SERIAL_BFIN_UART1=y ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_HW_RANDOM=y ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_FB=y ++CONFIG_FIRMWARE_EDID=y ++CONFIG_BACKLIGHT_LCD_SUPPORT=y ++CONFIG_LCD_CLASS_DEVICE=y ++CONFIG_BACKLIGHT_CLASS_DEVICE=y ++CONFIG_SOUND=y ++CONFIG_SND=m ++# CONFIG_SND_SUPPORT_OLD_API is not set ++# CONFIG_SND_VERBOSE_PROCFS is not set ++CONFIG_SOUND_PRIME=y ++# CONFIG_HID is not set ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++# CONFIG_DNOTIFY is not set ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_SMB_FS=m ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_DEBUG_HUNT_FOR_ZERO is not set ++# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set ++# CONFIG_ACCESS_CHECK is not set ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRC_CCITT=m +diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig +new file mode 100644 +index 000000000000..225df32dc9a8 +--- /dev/null ++++ b/arch/blackfin/configs/SRV1_defconfig +@@ -0,0 +1,88 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_SYSVIPC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++CONFIG_KALLSYMS_ALL=y ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_PREEMPT=y ++CONFIG_BF537=y ++CONFIG_IRQ_TIMER0=12 ++CONFIG_BOOT_LOAD=0x400000 ++CONFIG_CLKIN_HZ=22118400 ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_DMA_UNCACHED_2M=y ++CONFIG_C_CDPRIO=y ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_PM=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_IPV6 is not set ++CONFIG_IRDA=m ++CONFIG_IRLAN=m ++CONFIG_IRCOMM=m ++CONFIG_IRDA_CACHE_LAST_LSAP=y ++CONFIG_IRTTY_SIR=m ++# CONFIG_WIRELESS is not set ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_JEDECPROBE=m ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_UCLINUX=y ++CONFIG_MTD_NAND=m ++CONFIG_BLK_DEV_RAM=y ++CONFIG_MISC_DEVICES=y ++CONFIG_EEPROM_AT25=m ++CONFIG_NETDEVICES=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++CONFIG_INPUT_EVDEV=m ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_UINPUT=y ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_HWMON=m ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_HID is not set ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++# CONFIG_DNOTIFY is not set ++CONFIG_JFFS2_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3=y ++CONFIG_SMB_FS=m ++CONFIG_DEBUG_KERNEL=y ++# CONFIG_DEBUG_BUGVERBOSE is not set ++CONFIG_DEBUG_INFO=y ++# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set ++CONFIG_CPLB_INFO=y +diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig +new file mode 100644 +index 000000000000..425c24e43c34 +--- /dev/null ++++ b/arch/blackfin/configs/TCM-BF518_defconfig +@@ -0,0 +1,131 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_KERNEL_LZMA=y ++CONFIG_SYSVIPC=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_RD_GZIP is not set ++CONFIG_RD_LZMA=y ++CONFIG_EXPERT=y ++# CONFIG_SYSCTL_SYSCALL is not set ++# CONFIG_ELF_CORE is not set ++# CONFIG_FUTEX is not set ++# CONFIG_SIGNALFD is not set ++# CONFIG_TIMERFD is not set ++# CONFIG_EVENTFD is not set ++# CONFIG_AIO is not set ++CONFIG_SLAB=y ++CONFIG_MMAP_ALLOW_UNINITIALIZED=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_LBDAF is not set ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_PREEMPT=y ++CONFIG_BF518=y ++CONFIG_BF_REV_0_1=y ++CONFIG_BFIN518F_TCM=y ++CONFIG_IRQ_TIMER0=12 ++# CONFIG_CYCLES_CLOCKSOURCE is not set ++# CONFIG_SCHEDULE_L1 is not set ++# CONFIG_MEMSET_L1 is not set ++# CONFIG_MEMCPY_L1 is not set ++# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set ++CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 ++CONFIG_BFIN_GPTIMERS=m ++CONFIG_C_CDPRIO=y ++CONFIG_BANK_3=0x99B2 ++CONFIG_BINFMT_FLAT=y ++CONFIG_BINFMT_ZFLAT=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_PNP=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++# CONFIG_IPV6 is not set ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FW_LOADER is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_ADV_OPTIONS=y ++CONFIG_MTD_CFI_GEOMETRY=y ++# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set ++# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set ++# CONFIG_MTD_CFI_I2 is not set ++CONFIG_MTD_CFI_INTELEXT=y ++CONFIG_MTD_RAM=y ++CONFIG_MTD_ROM=m ++CONFIG_MTD_PHYSMAP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_NETDEVICES=y ++CONFIG_NET_ETHERNET=y ++CONFIG_BFIN_MAC=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++# CONFIG_SERIO is not set ++# CONFIG_DEVKMEM is not set ++CONFIG_BFIN_JTAG_COMM=m ++CONFIG_SERIAL_BFIN=y ++CONFIG_SERIAL_BFIN_CONSOLE=y ++CONFIG_SERIAL_BFIN_UART0=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_HW_RANDOM is not set ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_BLACKFIN_TWI=y ++CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 ++CONFIG_SPI=y ++CONFIG_SPI_BFIN5XX=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_BFIN_WDT=y ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++CONFIG_MMC=y ++CONFIG_MMC_DEBUG=y ++CONFIG_MMC_SPI=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_BFIN=y ++CONFIG_EXT2_FS=y ++# CONFIG_DNOTIFY is not set ++CONFIG_VFAT_FS=m ++# CONFIG_MISC_FILESYSTEMS is not set ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++CONFIG_ROOT_NFS=y ++CONFIG_NLS_CODEPAGE_437=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_UTF8=m ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_INFO=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++# CONFIG_FTRACE is not set ++CONFIG_DEBUG_MMRS=y ++CONFIG_DEBUG_HWERR=y ++CONFIG_EXACT_HWERR=y ++CONFIG_DEBUG_DOUBLEFAULT=y ++CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_CPLB_INFO=y ++CONFIG_CRYPTO=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++CONFIG_CRC_CCITT=m +diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig +index 8bcb61a6ec15..6b0912ba544a 100644 +--- a/arch/mips/configs/fuloong2e_defconfig ++++ b/arch/mips/configs/fuloong2e_defconfig +@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y + CONFIG_POSIX_MQUEUE=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_BSD_PROCESS_ACCT=y + CONFIG_IKCONFIG=y + CONFIG_IKCONFIG_PROC=y +diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig +index 9d9af5f923c3..acd27816d300 100644 +--- a/arch/mips/configs/gpr_defconfig ++++ b/arch/mips/configs/gpr_defconfig +@@ -1,8 +1,8 @@ ++CONFIG_PREEMPT=y + # CONFIG_LOCALVERSION_AUTO is not set + CONFIG_SYSVIPC=y + CONFIG_POSIX_MQUEUE=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_BSD_PROCESS_ACCT=y + CONFIG_BSD_PROCESS_ACCT_V3=y + CONFIG_RELAY=y +diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig +index ff40fbc2f439..2bca2257a8bb 100644 +--- a/arch/mips/configs/ip22_defconfig ++++ b/arch/mips/configs/ip22_defconfig +@@ -1,7 +1,7 @@ + CONFIG_SYSVIPC=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_IKCONFIG=y + CONFIG_IKCONFIG_PROC=y + CONFIG_LOG_BUF_SHIFT=14 +diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig +index 0921ef38e9fb..6da05cef46f8 100644 +--- a/arch/mips/configs/ip28_defconfig ++++ b/arch/mips/configs/ip28_defconfig +@@ -1,5 +1,5 @@ + CONFIG_SYSVIPC=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_IKCONFIG=y + CONFIG_IKCONFIG_PROC=y + CONFIG_LOG_BUF_SHIFT=14 +diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig +index 328d4dfeb4cb..e17cb23173ea 100644 +--- a/arch/mips/configs/jazz_defconfig ++++ b/arch/mips/configs/jazz_defconfig +@@ -1,6 +1,6 @@ ++CONFIG_PREEMPT=y + CONFIG_SYSVIPC=y + CONFIG_POSIX_MQUEUE=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_BSD_PROCESS_ACCT=y + CONFIG_IKCONFIG=y + CONFIG_IKCONFIG_PROC=y +diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig +index 16bef819fe98..a426aeb3a603 100644 +--- a/arch/mips/configs/mtx1_defconfig ++++ b/arch/mips/configs/mtx1_defconfig +@@ -1,8 +1,8 @@ ++CONFIG_PREEMPT=y + # CONFIG_LOCALVERSION_AUTO is not set + CONFIG_SYSVIPC=y + CONFIG_POSIX_MQUEUE=y + CONFIG_AUDIT=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_BSD_PROCESS_ACCT=y + CONFIG_BSD_PROCESS_ACCT_V3=y + CONFIG_RELAY=y +diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig +index 4ecb157e56d4..ea7309283b01 100644 +--- a/arch/mips/configs/nlm_xlr_defconfig ++++ b/arch/mips/configs/nlm_xlr_defconfig +@@ -1,10 +1,10 @@ ++CONFIG_PREEMPT=y + # CONFIG_LOCALVERSION_AUTO is not set + CONFIG_SYSVIPC=y + CONFIG_POSIX_MQUEUE=y + CONFIG_AUDIT=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_BSD_PROCESS_ACCT=y + CONFIG_BSD_PROCESS_ACCT_V3=y + CONFIG_TASKSTATS=y +diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig +index 63fe2da1b37f..7f08ee237345 100644 +--- a/arch/mips/configs/pic32mzda_defconfig ++++ b/arch/mips/configs/pic32mzda_defconfig +@@ -1,7 +1,7 @@ ++CONFIG_PREEMPT=y + CONFIG_SYSVIPC=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_IKCONFIG=y + CONFIG_IKCONFIG_PROC=y + CONFIG_LOG_BUF_SHIFT=14 +diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig +index 2f08d071ada6..11118f5f507a 100644 +--- a/arch/mips/configs/pistachio_defconfig ++++ b/arch/mips/configs/pistachio_defconfig +@@ -1,9 +1,9 @@ ++CONFIG_PREEMPT=y + # CONFIG_LOCALVERSION_AUTO is not set + CONFIG_DEFAULT_HOSTNAME="localhost" + CONFIG_SYSVIPC=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_IKCONFIG=m + CONFIG_IKCONFIG_PROC=y + CONFIG_LOG_BUF_SHIFT=18 +diff --git a/arch/mips/configs/pnx8335_stb225_defconfig b/arch/mips/configs/pnx8335_stb225_defconfig +index aa0b169800e0..d2177f598a1d 100644 +--- a/arch/mips/configs/pnx8335_stb225_defconfig ++++ b/arch/mips/configs/pnx8335_stb225_defconfig +@@ -1,9 +1,9 @@ ++CONFIG_PREEMPT=y + # CONFIG_LOCALVERSION_AUTO is not set + # CONFIG_SWAP is not set + CONFIG_SYSVIPC=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_LOG_BUF_SHIFT=14 + CONFIG_EXPERT=y + CONFIG_SLAB=y +diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig +index 0f4b09f8a0ee..6ba5086f6dff 100644 +--- a/arch/mips/configs/rm200_defconfig ++++ b/arch/mips/configs/rm200_defconfig +@@ -1,6 +1,6 @@ ++CONFIG_PREEMPT=y + CONFIG_SYSVIPC=y + CONFIG_POSIX_MQUEUE=y +-CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_BSD_PROCESS_ACCT=y + CONFIG_IKCONFIG=y + CONFIG_IKCONFIG_PROC=y +diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig +index ccc109761f44..a6a5b0b7a9c9 100644 +--- a/arch/parisc/configs/712_defconfig ++++ b/arch/parisc/configs/712_defconfig +@@ -13,7 +13,7 @@ CONFIG_MODULES=y + CONFIG_MODULE_UNLOAD=y + CONFIG_MODULE_FORCE_UNLOAD=y + CONFIG_PA7100LC=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_GSC_LASI=y + # CONFIG_PDC_CHASSIS is not set + CONFIG_BINFMT_MISC=m +diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig +index 8d41a73bd71b..b8e0a6662ff9 100644 +--- a/arch/parisc/configs/c3000_defconfig ++++ b/arch/parisc/configs/c3000_defconfig +@@ -13,7 +13,7 @@ CONFIG_MODULES=y + CONFIG_MODULE_UNLOAD=y + CONFIG_MODULE_FORCE_UNLOAD=y + CONFIG_PA8X00=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + # CONFIG_GSC is not set + CONFIG_PCI=y + CONFIG_PCI_LBA=y +diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig +index 52c9050a7c5c..8d86d2e989f4 100644 +--- a/arch/parisc/configs/default_defconfig ++++ b/arch/parisc/configs/default_defconfig +@@ -14,7 +14,7 @@ CONFIG_MODULE_UNLOAD=y + CONFIG_MODULE_FORCE_UNLOAD=y + # CONFIG_BLK_DEV_BSG is not set + CONFIG_PA7100LC=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_IOMMU_CCIO=y + CONFIG_GSC_LASI=y + CONFIG_GSC_WAX=y +diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig +new file mode 100644 +index 000000000000..04fee07ea6c5 +--- /dev/null ++++ b/arch/powerpc/configs/c2k_defconfig +@@ -0,0 +1,389 @@ ++CONFIG_SYSVIPC=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_AUDIT=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_PROFILING=y ++CONFIG_OPROFILE=m ++CONFIG_KPROBES=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODVERSIONS=y ++CONFIG_PARTITION_ADVANCED=y ++CONFIG_OSF_PARTITION=y ++CONFIG_MAC_PARTITION=y ++CONFIG_BSD_DISKLABEL=y ++CONFIG_MINIX_SUBPARTITION=y ++CONFIG_SOLARIS_X86_PARTITION=y ++CONFIG_UNIXWARE_DISKLABEL=y ++CONFIG_SGI_PARTITION=y ++CONFIG_SUN_PARTITION=y ++# CONFIG_PPC_CHRP is not set ++# CONFIG_PPC_PMAC is not set ++CONFIG_EMBEDDED6xx=y ++CONFIG_PPC_C2K=y ++CONFIG_CPU_FREQ=y ++CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y ++CONFIG_CPU_FREQ_GOV_PERFORMANCE=y ++CONFIG_CPU_FREQ_GOV_POWERSAVE=m ++CONFIG_CPU_FREQ_GOV_ONDEMAND=m ++CONFIG_GEN_RTC=y ++CONFIG_HIGHMEM=y ++CONFIG_PREEMPT=y ++CONFIG_BINFMT_MISC=y ++CONFIG_PM=y ++CONFIG_PCI_MSI=y ++CONFIG_HOTPLUG_PCI=y ++CONFIG_HOTPLUG_PCI_SHPC=m ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_XFRM_USER=y ++CONFIG_NET_KEY=m ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++CONFIG_IP_MULTIPLE_TABLES=y ++CONFIG_IP_ROUTE_MULTIPATH=y ++CONFIG_IP_ROUTE_VERBOSE=y ++CONFIG_IP_PNP=y ++CONFIG_IP_PNP_DHCP=y ++CONFIG_NET_IPIP=m ++CONFIG_IP_MROUTE=y ++CONFIG_IP_PIMSM_V1=y ++CONFIG_IP_PIMSM_V2=y ++CONFIG_SYN_COOKIES=y ++CONFIG_INET_AH=m ++CONFIG_INET_ESP=m ++CONFIG_INET_IPCOMP=m ++CONFIG_INET6_AH=m ++CONFIG_INET6_ESP=m ++CONFIG_INET6_IPCOMP=m ++CONFIG_IPV6_TUNNEL=m ++CONFIG_NETFILTER=y ++# CONFIG_NETFILTER_XT_MATCH_SCTP is not set ++CONFIG_IP_NF_IPTABLES=m ++CONFIG_IP_NF_MATCH_ECN=m ++CONFIG_IP_NF_MATCH_TTL=m ++CONFIG_IP_NF_FILTER=m ++CONFIG_IP_NF_TARGET_REJECT=m ++CONFIG_IP_NF_MANGLE=m ++CONFIG_IP_NF_TARGET_ECN=m ++CONFIG_IP_NF_RAW=m ++CONFIG_IP_NF_ARPTABLES=m ++CONFIG_IP_NF_ARPFILTER=m ++CONFIG_IP_NF_ARP_MANGLE=m ++CONFIG_IP6_NF_IPTABLES=m ++CONFIG_IP6_NF_MATCH_EUI64=m ++CONFIG_IP6_NF_MATCH_FRAG=m ++CONFIG_IP6_NF_MATCH_OPTS=m ++CONFIG_IP6_NF_MATCH_HL=m ++CONFIG_IP6_NF_MATCH_IPV6HEADER=m ++CONFIG_IP6_NF_MATCH_RT=m ++CONFIG_IP6_NF_FILTER=m ++CONFIG_IP6_NF_MANGLE=m ++CONFIG_IP6_NF_RAW=m ++CONFIG_BRIDGE_NF_EBTABLES=m ++CONFIG_BRIDGE_EBT_BROUTE=m ++CONFIG_BRIDGE_EBT_T_FILTER=m ++CONFIG_BRIDGE_EBT_T_NAT=m ++CONFIG_BRIDGE_EBT_802_3=m ++CONFIG_BRIDGE_EBT_AMONG=m ++CONFIG_BRIDGE_EBT_ARP=m ++CONFIG_BRIDGE_EBT_IP=m ++CONFIG_BRIDGE_EBT_LIMIT=m ++CONFIG_BRIDGE_EBT_MARK=m ++CONFIG_BRIDGE_EBT_PKTTYPE=m ++CONFIG_BRIDGE_EBT_STP=m ++CONFIG_BRIDGE_EBT_VLAN=m ++CONFIG_BRIDGE_EBT_ARPREPLY=m ++CONFIG_BRIDGE_EBT_DNAT=m ++CONFIG_BRIDGE_EBT_MARK_T=m ++CONFIG_BRIDGE_EBT_REDIRECT=m ++CONFIG_BRIDGE_EBT_SNAT=m ++CONFIG_BRIDGE_EBT_LOG=m ++CONFIG_IP_SCTP=m ++CONFIG_ATM=m ++CONFIG_ATM_CLIP=m ++CONFIG_ATM_LANE=m ++CONFIG_ATM_BR2684=m ++CONFIG_BRIDGE=m ++CONFIG_VLAN_8021Q=m ++CONFIG_NET_SCHED=y ++CONFIG_NET_SCH_CBQ=m ++CONFIG_NET_SCH_HTB=m ++CONFIG_NET_SCH_HFSC=m ++CONFIG_NET_SCH_ATM=m ++CONFIG_NET_SCH_PRIO=m ++CONFIG_NET_SCH_RED=m ++CONFIG_NET_SCH_SFQ=m ++CONFIG_NET_SCH_TEQL=m ++CONFIG_NET_SCH_TBF=m ++CONFIG_NET_SCH_GRED=m ++CONFIG_NET_SCH_DSMARK=m ++CONFIG_NET_SCH_NETEM=m ++CONFIG_NET_CLS_TCINDEX=m ++CONFIG_NET_CLS_ROUTE4=m ++CONFIG_NET_CLS_FW=m ++CONFIG_NET_CLS_U32=m ++CONFIG_CLS_U32_PERF=y ++CONFIG_NET_CLS_RSVP=m ++CONFIG_NET_CLS_RSVP6=m ++CONFIG_NET_CLS_IND=y ++CONFIG_BT=m ++CONFIG_BT_RFCOMM=m ++CONFIG_BT_RFCOMM_TTY=y ++CONFIG_BT_BNEP=m ++CONFIG_BT_BNEP_MC_FILTER=y ++CONFIG_BT_BNEP_PROTO_FILTER=y ++CONFIG_BT_HIDP=m ++CONFIG_BT_HCIUART=m ++CONFIG_BT_HCIUART_H4=y ++CONFIG_BT_HCIUART_BCSP=y ++CONFIG_BT_HCIBCM203X=m ++CONFIG_BT_HCIBFUSB=m ++CONFIG_BT_HCIVHCI=m ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_MTD=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_AMDSTD=y ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_PHYSMAP_OF=y ++CONFIG_BLK_DEV_LOOP=m ++CONFIG_BLK_DEV_CRYPTOLOOP=m ++CONFIG_BLK_DEV_NBD=m ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_SIZE=16384 ++CONFIG_SCSI=m ++CONFIG_BLK_DEV_SD=m ++CONFIG_CHR_DEV_ST=m ++CONFIG_CHR_DEV_OSST=m ++CONFIG_BLK_DEV_SR=m ++CONFIG_BLK_DEV_SR_VENDOR=y ++CONFIG_CHR_DEV_SG=m ++CONFIG_SCSI_CONSTANTS=y ++CONFIG_SCSI_LOGGING=y ++CONFIG_SCSI_ISCSI_ATTRS=m ++CONFIG_BLK_DEV_3W_XXXX_RAID=m ++CONFIG_SCSI_3W_9XXX=m ++CONFIG_SCSI_ACARD=m ++CONFIG_SCSI_AACRAID=m ++CONFIG_SCSI_AIC7XXX=m ++CONFIG_AIC7XXX_CMDS_PER_DEVICE=4 ++CONFIG_AIC7XXX_RESET_DELAY_MS=15000 ++# CONFIG_AIC7XXX_DEBUG_ENABLE is not set ++# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set ++CONFIG_SCSI_AIC79XX=m ++CONFIG_AIC79XX_CMDS_PER_DEVICE=4 ++CONFIG_AIC79XX_RESET_DELAY_MS=15000 ++# CONFIG_AIC79XX_DEBUG_ENABLE is not set ++# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set ++CONFIG_SCSI_ARCMSR=m ++CONFIG_MEGARAID_NEWGEN=y ++CONFIG_MEGARAID_MM=m ++CONFIG_MEGARAID_MAILBOX=m ++CONFIG_MEGARAID_SAS=m ++CONFIG_SCSI_GDTH=m ++CONFIG_SCSI_IPS=m ++CONFIG_SCSI_INITIO=m ++CONFIG_SCSI_SYM53C8XX_2=m ++CONFIG_SCSI_QLOGIC_1280=m ++CONFIG_NETDEVICES=y ++CONFIG_BONDING=m ++CONFIG_DUMMY=m ++CONFIG_NETCONSOLE=m ++CONFIG_TUN=m ++# CONFIG_ATM_DRIVERS is not set ++CONFIG_MV643XX_ETH=y ++CONFIG_VITESSE_PHY=y ++CONFIG_INPUT_EVDEV=y ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_UINPUT=m ++# CONFIG_SERIO is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_SERIAL_NONSTANDARD=y ++CONFIG_SERIAL_MPSC=y ++CONFIG_SERIAL_MPSC_CONSOLE=y ++CONFIG_NVRAM=m ++CONFIG_RAW_DRIVER=y ++CONFIG_MAX_RAW_DEVS=8192 ++CONFIG_I2C=m ++CONFIG_I2C_CHARDEV=m ++CONFIG_I2C_MV64XXX=m ++CONFIG_HWMON=m ++CONFIG_SENSORS_ADM1021=m ++CONFIG_SENSORS_ADM1025=m ++CONFIG_SENSORS_ADM1026=m ++CONFIG_SENSORS_ADM1031=m ++CONFIG_SENSORS_DS1621=m ++CONFIG_SENSORS_GL518SM=m ++CONFIG_SENSORS_MAX1619=m ++CONFIG_SENSORS_LM75=m ++CONFIG_SENSORS_LM77=m ++CONFIG_SENSORS_LM78=m ++CONFIG_SENSORS_LM80=m ++CONFIG_SENSORS_LM83=m ++CONFIG_SENSORS_LM85=m ++CONFIG_SENSORS_LM87=m ++CONFIG_SENSORS_LM90=m ++CONFIG_SENSORS_PCF8591=m ++CONFIG_SENSORS_VIA686A=m ++CONFIG_SENSORS_W83781D=m ++CONFIG_SENSORS_W83L785TS=m ++CONFIG_WATCHDOG=y ++CONFIG_SOFT_WATCHDOG=m ++CONFIG_PCIPCWATCHDOG=m ++CONFIG_WDTPCI=m ++CONFIG_USBPCWATCHDOG=m ++# CONFIG_VGA_CONSOLE is not set ++CONFIG_USB=m ++CONFIG_USB_MON=m ++CONFIG_USB_EHCI_HCD=m ++CONFIG_USB_EHCI_ROOT_HUB_TT=y ++CONFIG_USB_OHCI_HCD=m ++CONFIG_USB_OHCI_HCD_PPC_OF_BE=y ++CONFIG_USB_UHCI_HCD=m ++CONFIG_USB_ACM=m ++CONFIG_USB_PRINTER=m ++CONFIG_USB_STORAGE=m ++CONFIG_USB_STORAGE_DATAFAB=m ++CONFIG_USB_STORAGE_FREECOM=m ++CONFIG_USB_STORAGE_ISD200=m ++CONFIG_USB_STORAGE_SDDR09=m ++CONFIG_USB_STORAGE_SDDR55=m ++CONFIG_USB_STORAGE_JUMPSHOT=m ++CONFIG_USB_MDC800=m ++CONFIG_USB_MICROTEK=m ++CONFIG_USB_SERIAL=m ++CONFIG_USB_SERIAL_GENERIC=y ++CONFIG_USB_SERIAL_BELKIN=m ++CONFIG_USB_SERIAL_WHITEHEAT=m ++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m ++CONFIG_USB_SERIAL_EMPEG=m ++CONFIG_USB_SERIAL_FTDI_SIO=m ++CONFIG_USB_SERIAL_VISOR=m ++CONFIG_USB_SERIAL_IPAQ=m ++CONFIG_USB_SERIAL_IR=m ++CONFIG_USB_SERIAL_EDGEPORT=m ++CONFIG_USB_SERIAL_EDGEPORT_TI=m ++CONFIG_USB_SERIAL_KEYSPAN_PDA=m ++CONFIG_USB_SERIAL_KEYSPAN=m ++CONFIG_USB_SERIAL_KLSI=m ++CONFIG_USB_SERIAL_KOBIL_SCT=m ++CONFIG_USB_SERIAL_MCT_U232=m ++CONFIG_USB_SERIAL_PL2303=m ++CONFIG_USB_SERIAL_SAFE=m ++CONFIG_USB_SERIAL_SAFE_PADDED=y ++CONFIG_USB_SERIAL_CYBERJACK=m ++CONFIG_USB_SERIAL_XIRCOM=m ++CONFIG_USB_SERIAL_OMNINET=m ++CONFIG_USB_EMI62=m ++CONFIG_USB_RIO500=m ++CONFIG_USB_LEGOTOWER=m ++CONFIG_USB_LCD=m ++CONFIG_USB_LED=m ++CONFIG_USB_TEST=m ++CONFIG_USB_ATM=m ++CONFIG_USB_SPEEDTOUCH=m ++CONFIG_INFINIBAND=m ++CONFIG_INFINIBAND_USER_MAD=m ++CONFIG_INFINIBAND_USER_ACCESS=m ++CONFIG_INFINIBAND_MTHCA=m ++CONFIG_INFINIBAND_IPOIB=m ++CONFIG_INFINIBAND_IPOIB_CM=y ++CONFIG_INFINIBAND_SRP=m ++CONFIG_DMADEVICES=y ++CONFIG_EXT4_FS=m ++CONFIG_EXT4_FS_POSIX_ACL=y ++CONFIG_EXT4_FS_SECURITY=y ++CONFIG_QUOTA=y ++CONFIG_QFMT_V2=y ++CONFIG_AUTOFS4_FS=m ++CONFIG_UDF_FS=m ++CONFIG_MSDOS_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_FAT_DEFAULT_IOCHARSET="ascii" ++CONFIG_PROC_KCORE=y ++CONFIG_TMPFS=y ++CONFIG_HFS_FS=m ++CONFIG_HFSPLUS_FS=m ++CONFIG_JFFS2_FS=y ++CONFIG_CRAMFS=m ++CONFIG_VXFS_FS=m ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3_ACL=y ++CONFIG_NFS_V4=y ++CONFIG_ROOT_NFS=y ++CONFIG_CIFS=m ++CONFIG_CIFS_XATTR=y ++CONFIG_CIFS_POSIX=y ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="utf8" ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_CODEPAGE_737=m ++CONFIG_NLS_CODEPAGE_775=m ++CONFIG_NLS_CODEPAGE_850=m ++CONFIG_NLS_CODEPAGE_852=m ++CONFIG_NLS_CODEPAGE_855=m ++CONFIG_NLS_CODEPAGE_857=m ++CONFIG_NLS_CODEPAGE_860=m ++CONFIG_NLS_CODEPAGE_861=m ++CONFIG_NLS_CODEPAGE_862=m ++CONFIG_NLS_CODEPAGE_863=m ++CONFIG_NLS_CODEPAGE_864=m ++CONFIG_NLS_CODEPAGE_865=m ++CONFIG_NLS_CODEPAGE_866=m ++CONFIG_NLS_CODEPAGE_869=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_CODEPAGE_950=m ++CONFIG_NLS_CODEPAGE_932=m ++CONFIG_NLS_CODEPAGE_949=m ++CONFIG_NLS_CODEPAGE_874=m ++CONFIG_NLS_ISO8859_8=m ++CONFIG_NLS_CODEPAGE_1250=m ++CONFIG_NLS_CODEPAGE_1251=m ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_ISO8859_2=m ++CONFIG_NLS_ISO8859_3=m ++CONFIG_NLS_ISO8859_4=m ++CONFIG_NLS_ISO8859_5=m ++CONFIG_NLS_ISO8859_6=m ++CONFIG_NLS_ISO8859_7=m ++CONFIG_NLS_ISO8859_9=m ++CONFIG_NLS_ISO8859_13=m ++CONFIG_NLS_ISO8859_14=m ++CONFIG_NLS_ISO8859_15=m ++CONFIG_NLS_KOI8_R=m ++CONFIG_NLS_KOI8_U=m ++CONFIG_CRC_CCITT=m ++CONFIG_CRC_T10DIF=m ++CONFIG_DEBUG_INFO=y ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_STACK_USAGE=y ++CONFIG_DEBUG_HIGHMEM=y ++CONFIG_DEBUG_STACKOVERFLOW=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DEBUG_SPINLOCK=y ++CONFIG_BOOTX_TEXT=y ++CONFIG_PPC_EARLY_DEBUG=y ++CONFIG_SECURITY=y ++CONFIG_SECURITY_NETWORK=y ++CONFIG_SECURITY_SELINUX=y ++CONFIG_SECURITY_SELINUX_BOOTPARAM=y ++CONFIG_SECURITY_SELINUX_DISABLE=y ++CONFIG_CRYPTO_HMAC=y ++CONFIG_CRYPTO_MICHAEL_MIC=m ++CONFIG_CRYPTO_SHA1=y ++CONFIG_CRYPTO_SHA512=m ++CONFIG_CRYPTO_WP512=m ++CONFIG_CRYPTO_BLOWFISH=m ++CONFIG_CRYPTO_CAST6=m ++CONFIG_CRYPTO_KHAZAD=m ++CONFIG_CRYPTO_SERPENT=m ++CONFIG_CRYPTO_TEA=m ++CONFIG_CRYPTO_TWOFISH=m +diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig +index 7c6baf6df139..9aea58fa9dba 100644 +--- a/arch/powerpc/configs/ppc6xx_defconfig ++++ b/arch/powerpc/configs/ppc6xx_defconfig +@@ -74,7 +74,7 @@ CONFIG_QE_GPIO=y + CONFIG_MCU_MPC8349EMITX=y + CONFIG_HIGHMEM=y + CONFIG_HZ_1000=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_BINFMT_MISC=y + CONFIG_HIBERNATION=y + CONFIG_PM_DEBUG=y +diff --git a/arch/score/configs/spct6600_defconfig b/arch/score/configs/spct6600_defconfig +new file mode 100644 +index 000000000000..46434ca1fa10 +--- /dev/null ++++ b/arch/score/configs/spct6600_defconfig +@@ -0,0 +1,84 @@ ++CONFIG_HZ_100=y ++CONFIG_PREEMPT=y ++CONFIG_EXPERIMENTAL=y ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_SYSVIPC=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_LOG_BUF_SHIFT=12 ++CONFIG_SYSFS_DEPRECATED_V2=y ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ++CONFIG_EXPERT=y ++# CONFIG_KALLSYMS is not set ++# CONFIG_HOTPLUG is not set ++CONFIG_SLAB=y ++CONFIG_MODULES=y ++CONFIG_MODULE_FORCE_LOAD=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODULE_FORCE_UNLOAD=y ++# CONFIG_BLK_DEV_BSG is not set ++CONFIG_BINFMT_MISC=y ++CONFIG_NET=y ++CONFIG_UNIX=y ++CONFIG_NET_KEY=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_ARPD=y ++# CONFIG_INET_LRO is not set ++# CONFIG_IPV6 is not set ++# CONFIG_STANDALONE is not set ++# CONFIG_PREVENT_FIRMWARE_BUILD is not set ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_CRYPTOLOOP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_COUNT=1 ++# CONFIG_MISC_DEVICES is not set ++CONFIG_NETDEVICES=y ++# CONFIG_NETDEV_1000 is not set ++# CONFIG_NETDEV_10000 is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++CONFIG_SERIAL_NONSTANDARD=y ++CONFIG_STALDRV=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_RAW_DRIVER=y ++CONFIG_MAX_RAW_DEVS=8192 ++# CONFIG_HWMON is not set ++# CONFIG_VGA_CONSOLE is not set ++# CONFIG_HID_SUPPORT is not set ++# CONFIG_USB_SUPPORT is not set ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++CONFIG_EXT2_FS_POSIX_ACL=y ++CONFIG_EXT3_FS=y ++# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set ++CONFIG_EXT3_FS_POSIX_ACL=y ++CONFIG_AUTOFS_FS=y ++CONFIG_AUTOFS4_FS=y ++CONFIG_PROC_KCORE=y ++# CONFIG_PROC_PAGE_MONITOR is not set ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V3=y ++CONFIG_NFS_V3_ACL=y ++CONFIG_NFS_V4=y ++CONFIG_NFSD=y ++CONFIG_NFSD_V3_ACL=y ++CONFIG_NFSD_V4=y ++# CONFIG_RCU_CPU_STALL_DETECTOR is not set ++CONFIG_SECURITY=y ++CONFIG_SECURITY_NETWORK=y ++CONFIG_CRYPTO_NULL=y ++CONFIG_CRYPTO_CRYPTD=y ++CONFIG_CRYPTO_SEQIV=y ++CONFIG_CRYPTO_MD4=y ++CONFIG_CRYPTO_MICHAEL_MIC=y ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++# CONFIG_CRYPTO_HW is not set ++CONFIG_CRC_CCITT=y ++CONFIG_CRC16=y ++CONFIG_LIBCRC32C=y +diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig +index 5a1097641247..eb5fbf554e7f 100644 +--- a/arch/sh/configs/se7712_defconfig ++++ b/arch/sh/configs/se7712_defconfig +@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y + CONFIG_SH_SOLUTION_ENGINE=y + CONFIG_SH_PCLK_FREQ=66666666 + CONFIG_HEARTBEAT=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_CMDLINE_OVERWRITE=y + CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1" + CONFIG_NET=y +diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig +index 9c0ef13bee10..cbaa65c8bf9e 100644 +--- a/arch/sh/configs/se7721_defconfig ++++ b/arch/sh/configs/se7721_defconfig +@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y + CONFIG_SH_7721_SOLUTION_ENGINE=y + CONFIG_SH_PCLK_FREQ=33333333 + CONFIG_HEARTBEAT=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_CMDLINE_OVERWRITE=y + CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda2" + CONFIG_NET=y +diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig +index ceb48e9b70f4..1a69eda6610c 100644 +--- a/arch/sh/configs/titan_defconfig ++++ b/arch/sh/configs/titan_defconfig +@@ -20,7 +20,7 @@ CONFIG_SH_TITAN=y + CONFIG_SH_PCLK_FREQ=30000000 + CONFIG_SH_DMA=y + CONFIG_SH_DMA_API=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_CMDLINE_OVERWRITE=y + CONFIG_CMDLINE="console=ttySC1,38400N81 root=/dev/nfs ip=:::::eth1:autoconf rw" + CONFIG_PCI=y +diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig +index ea547d596fcf..e1f11071da4c 100644 +--- a/arch/sparc/configs/sparc64_defconfig ++++ b/arch/sparc/configs/sparc64_defconfig +@@ -22,7 +22,7 @@ CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y + CONFIG_NUMA=y + CONFIG_DEFAULT_MMAP_MIN_ADDR=8192 +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_SUN_LDOMS=y + CONFIG_PCI=y + CONFIG_PCI_MSI=y +diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig +new file mode 100644 +index 000000000000..939c63ba7e6e +--- /dev/null ++++ b/arch/tile/configs/tilegx_defconfig +@@ -0,0 +1,411 @@ ++CONFIG_TILEGX=y ++CONFIG_SYSVIPC=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_FHANDLE=y ++CONFIG_AUDIT=y ++CONFIG_NO_HZ=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++CONFIG_TASKSTATS=y ++CONFIG_TASK_DELAY_ACCT=y ++CONFIG_TASK_XACCT=y ++CONFIG_TASK_IO_ACCOUNTING=y ++CONFIG_LOG_BUF_SHIFT=19 ++CONFIG_CGROUPS=y ++CONFIG_CGROUP_DEBUG=y ++CONFIG_CGROUP_DEVICE=y ++CONFIG_CPUSETS=y ++CONFIG_CGROUP_CPUACCT=y ++CONFIG_CGROUP_SCHED=y ++CONFIG_RT_GROUP_SCHED=y ++CONFIG_BLK_CGROUP=y ++CONFIG_NAMESPACES=y ++CONFIG_RELAY=y ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_RD_XZ=y ++CONFIG_SYSCTL_SYSCALL=y ++CONFIG_EMBEDDED=y ++# CONFIG_COMPAT_BRK is not set ++CONFIG_PROFILING=y ++CONFIG_KPROBES=y ++CONFIG_MODULES=y ++CONFIG_MODULE_FORCE_LOAD=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_BLK_DEV_INTEGRITY=y ++CONFIG_PARTITION_ADVANCED=y ++CONFIG_OSF_PARTITION=y ++CONFIG_AMIGA_PARTITION=y ++CONFIG_MAC_PARTITION=y ++CONFIG_BSD_DISKLABEL=y ++CONFIG_MINIX_SUBPARTITION=y ++CONFIG_SOLARIS_X86_PARTITION=y ++CONFIG_UNIXWARE_DISKLABEL=y ++CONFIG_SGI_PARTITION=y ++CONFIG_SUN_PARTITION=y ++CONFIG_KARMA_PARTITION=y ++CONFIG_CFQ_GROUP_IOSCHED=y ++CONFIG_NR_CPUS=100 ++CONFIG_HZ_100=y ++# CONFIG_COMPACTION is not set ++CONFIG_PREEMPT=y ++CONFIG_TILE_PCI_IO=y ++CONFIG_PCI_DEBUG=y ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_BINFMT_MISC=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_XFRM_USER=y ++CONFIG_XFRM_SUB_POLICY=y ++CONFIG_XFRM_STATISTICS=y ++CONFIG_NET_KEY=m ++CONFIG_NET_KEY_MIGRATE=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++CONFIG_IP_MULTIPLE_TABLES=y ++CONFIG_IP_ROUTE_MULTIPATH=y ++CONFIG_IP_ROUTE_VERBOSE=y ++CONFIG_NET_IPIP=m ++CONFIG_IP_MROUTE=y ++CONFIG_IP_PIMSM_V1=y ++CONFIG_IP_PIMSM_V2=y ++CONFIG_SYN_COOKIES=y ++CONFIG_INET_AH=m ++CONFIG_INET_ESP=m ++CONFIG_INET_IPCOMP=m ++CONFIG_INET_XFRM_MODE_TRANSPORT=m ++CONFIG_INET_XFRM_MODE_TUNNEL=m ++CONFIG_INET_XFRM_MODE_BEET=m ++CONFIG_INET_DIAG=m ++CONFIG_TCP_CONG_ADVANCED=y ++CONFIG_TCP_CONG_HSTCP=m ++CONFIG_TCP_CONG_HYBLA=m ++CONFIG_TCP_CONG_SCALABLE=m ++CONFIG_TCP_CONG_LP=m ++CONFIG_TCP_CONG_VENO=m ++CONFIG_TCP_CONG_YEAH=m ++CONFIG_TCP_CONG_ILLINOIS=m ++CONFIG_TCP_MD5SIG=y ++CONFIG_IPV6=y ++CONFIG_IPV6_ROUTER_PREF=y ++CONFIG_IPV6_ROUTE_INFO=y ++CONFIG_IPV6_OPTIMISTIC_DAD=y ++CONFIG_INET6_AH=m ++CONFIG_INET6_ESP=m ++CONFIG_INET6_IPCOMP=m ++CONFIG_IPV6_MIP6=m ++CONFIG_INET6_XFRM_MODE_TRANSPORT=m ++CONFIG_INET6_XFRM_MODE_TUNNEL=m ++CONFIG_INET6_XFRM_MODE_BEET=m ++CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m ++CONFIG_IPV6_SIT=m ++CONFIG_IPV6_TUNNEL=m ++CONFIG_IPV6_MULTIPLE_TABLES=y ++CONFIG_IPV6_MROUTE=y ++CONFIG_IPV6_PIMSM_V2=y ++CONFIG_NETLABEL=y ++CONFIG_RDS=m ++CONFIG_RDS_TCP=m ++CONFIG_BRIDGE=m ++CONFIG_VLAN_8021Q=m ++CONFIG_VLAN_8021Q_GVRP=y ++CONFIG_PHONET=m ++CONFIG_NET_SCHED=y ++CONFIG_NET_SCH_CBQ=m ++CONFIG_NET_SCH_HTB=m ++CONFIG_NET_SCH_HFSC=m ++CONFIG_NET_SCH_PRIO=m ++CONFIG_NET_SCH_MULTIQ=m ++CONFIG_NET_SCH_RED=m ++CONFIG_NET_SCH_SFQ=m ++CONFIG_NET_SCH_TEQL=m ++CONFIG_NET_SCH_TBF=m ++CONFIG_NET_SCH_GRED=m ++CONFIG_NET_SCH_DSMARK=m ++CONFIG_NET_SCH_NETEM=m ++CONFIG_NET_SCH_DRR=m ++CONFIG_NET_SCH_INGRESS=m ++CONFIG_NET_CLS_BASIC=m ++CONFIG_NET_CLS_TCINDEX=m ++CONFIG_NET_CLS_ROUTE4=m ++CONFIG_NET_CLS_FW=m ++CONFIG_NET_CLS_U32=m ++CONFIG_CLS_U32_PERF=y ++CONFIG_CLS_U32_MARK=y ++CONFIG_NET_CLS_RSVP=m ++CONFIG_NET_CLS_RSVP6=m ++CONFIG_NET_CLS_FLOW=m ++CONFIG_NET_CLS_CGROUP=y ++CONFIG_NET_EMATCH=y ++CONFIG_NET_EMATCH_CMP=m ++CONFIG_NET_EMATCH_NBYTE=m ++CONFIG_NET_EMATCH_U32=m ++CONFIG_NET_EMATCH_META=m ++CONFIG_NET_EMATCH_TEXT=m ++CONFIG_NET_CLS_ACT=y ++CONFIG_NET_ACT_POLICE=m ++CONFIG_NET_ACT_GACT=m ++CONFIG_GACT_PROB=y ++CONFIG_NET_ACT_MIRRED=m ++CONFIG_NET_ACT_NAT=m ++CONFIG_NET_ACT_PEDIT=m ++CONFIG_NET_ACT_SIMP=m ++CONFIG_NET_ACT_SKBEDIT=m ++CONFIG_NET_CLS_IND=y ++CONFIG_DCB=y ++CONFIG_DNS_RESOLVER=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_DEVTMPFS=y ++CONFIG_DEVTMPFS_MOUNT=y ++CONFIG_CONNECTOR=y ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_CRYPTOLOOP=m ++CONFIG_BLK_DEV_SX8=m ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_SIZE=16384 ++CONFIG_ATA_OVER_ETH=m ++CONFIG_RAID_ATTRS=m ++CONFIG_BLK_DEV_SD=y ++CONFIG_SCSI_CONSTANTS=y ++CONFIG_SCSI_LOGGING=y ++CONFIG_SCSI_SAS_ATA=y ++CONFIG_ISCSI_TCP=m ++CONFIG_SCSI_MVSAS=y ++# CONFIG_SCSI_MVSAS_DEBUG is not set ++CONFIG_SCSI_MVSAS_TASKLET=y ++CONFIG_ATA=y ++CONFIG_SATA_AHCI=y ++CONFIG_SATA_SIL24=y ++# CONFIG_ATA_SFF is not set ++CONFIG_MD=y ++CONFIG_BLK_DEV_MD=y ++CONFIG_MD_LINEAR=m ++CONFIG_MD_RAID0=m ++CONFIG_MD_RAID1=m ++CONFIG_MD_RAID10=m ++CONFIG_MD_RAID456=m ++CONFIG_MD_FAULTY=m ++CONFIG_BLK_DEV_DM=m ++CONFIG_DM_DEBUG=y ++CONFIG_DM_CRYPT=m ++CONFIG_DM_SNAPSHOT=m ++CONFIG_DM_MIRROR=m ++CONFIG_DM_LOG_USERSPACE=m ++CONFIG_DM_ZERO=m ++CONFIG_DM_MULTIPATH=m ++CONFIG_DM_MULTIPATH_QL=m ++CONFIG_DM_MULTIPATH_ST=m ++CONFIG_DM_DELAY=m ++CONFIG_DM_UEVENT=y ++CONFIG_TARGET_CORE=m ++CONFIG_TCM_IBLOCK=m ++CONFIG_TCM_FILEIO=m ++CONFIG_TCM_PSCSI=m ++CONFIG_LOOPBACK_TARGET=m ++CONFIG_ISCSI_TARGET=m ++CONFIG_FUSION=y ++CONFIG_FUSION_SAS=y ++CONFIG_NETDEVICES=y ++CONFIG_BONDING=m ++CONFIG_DUMMY=m ++CONFIG_IFB=m ++CONFIG_MACVLAN=m ++CONFIG_MACVTAP=m ++CONFIG_NETCONSOLE=m ++CONFIG_NETCONSOLE_DYNAMIC=y ++CONFIG_TUN=y ++CONFIG_VETH=m ++CONFIG_NET_DSA_MV88E6060=y ++CONFIG_NET_DSA_MV88E6XXX=y ++CONFIG_SKY2=y ++CONFIG_PTP_1588_CLOCK_TILEGX=y ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_SERIAL_TILEGX=y ++CONFIG_HW_RANDOM=y ++CONFIG_HW_RANDOM_TIMERIOMEM=m ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_WATCHDOG_NOWAYOUT=y ++# CONFIG_VGA_ARB is not set ++CONFIG_DRM=m ++CONFIG_DRM_TDFX=m ++CONFIG_DRM_R128=m ++CONFIG_DRM_MGA=m ++CONFIG_DRM_VIA=m ++CONFIG_DRM_SAVAGE=m ++CONFIG_USB=y ++CONFIG_USB_EHCI_HCD=y ++CONFIG_USB_OHCI_HCD=y ++CONFIG_USB_STORAGE=y ++CONFIG_EDAC=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_TILE=y ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++CONFIG_EXT2_FS_POSIX_ACL=y ++CONFIG_EXT2_FS_SECURITY=y ++CONFIG_EXT2_FS_XIP=y ++CONFIG_EXT3_FS=y ++CONFIG_EXT3_FS_POSIX_ACL=y ++CONFIG_EXT3_FS_SECURITY=y ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_FS_POSIX_ACL=y ++CONFIG_EXT4_FS_SECURITY=y ++CONFIG_XFS_FS=y ++CONFIG_XFS_QUOTA=y ++CONFIG_XFS_POSIX_ACL=y ++CONFIG_GFS2_FS=m ++CONFIG_GFS2_FS_LOCKING_DLM=y ++CONFIG_BTRFS_FS=m ++CONFIG_BTRFS_FS_POSIX_ACL=y ++CONFIG_QUOTA=y ++CONFIG_QUOTA_NETLINK_INTERFACE=y ++# CONFIG_PRINT_QUOTA_WARNING is not set ++CONFIG_QFMT_V2=y ++CONFIG_AUTOFS4_FS=m ++CONFIG_FUSE_FS=y ++CONFIG_CUSE=m ++CONFIG_FSCACHE=m ++CONFIG_FSCACHE_STATS=y ++CONFIG_CACHEFILES=m ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_ZISOFS=y ++CONFIG_UDF_FS=m ++CONFIG_MSDOS_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_FAT_DEFAULT_IOCHARSET="ascii" ++CONFIG_PROC_KCORE=y ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_HUGETLBFS=y ++CONFIG_ECRYPT_FS=m ++CONFIG_CRAMFS=m ++CONFIG_SQUASHFS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3_ACL=y ++CONFIG_NFS_V4=m ++CONFIG_NFS_V4_1=y ++CONFIG_NFS_FSCACHE=y ++CONFIG_NFSD=m ++CONFIG_NFSD_V3_ACL=y ++CONFIG_NFSD_V4=y ++CONFIG_CIFS=m ++CONFIG_CIFS_STATS=y ++CONFIG_CIFS_WEAK_PW_HASH=y ++CONFIG_CIFS_UPCALL=y ++CONFIG_CIFS_XATTR=y ++CONFIG_CIFS_POSIX=y ++CONFIG_CIFS_DFS_UPCALL=y ++CONFIG_CIFS_FSCACHE=y ++CONFIG_NLS_DEFAULT="utf8" ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_CODEPAGE_737=m ++CONFIG_NLS_CODEPAGE_775=m ++CONFIG_NLS_CODEPAGE_850=m ++CONFIG_NLS_CODEPAGE_852=m ++CONFIG_NLS_CODEPAGE_855=m ++CONFIG_NLS_CODEPAGE_857=m ++CONFIG_NLS_CODEPAGE_860=m ++CONFIG_NLS_CODEPAGE_861=m ++CONFIG_NLS_CODEPAGE_862=m ++CONFIG_NLS_CODEPAGE_863=m ++CONFIG_NLS_CODEPAGE_864=m ++CONFIG_NLS_CODEPAGE_865=m ++CONFIG_NLS_CODEPAGE_866=m ++CONFIG_NLS_CODEPAGE_869=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_CODEPAGE_950=m ++CONFIG_NLS_CODEPAGE_932=m ++CONFIG_NLS_CODEPAGE_949=m ++CONFIG_NLS_CODEPAGE_874=m ++CONFIG_NLS_ISO8859_8=m ++CONFIG_NLS_CODEPAGE_1250=m ++CONFIG_NLS_CODEPAGE_1251=m ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_ISO8859_2=m ++CONFIG_NLS_ISO8859_3=m ++CONFIG_NLS_ISO8859_4=m ++CONFIG_NLS_ISO8859_5=m ++CONFIG_NLS_ISO8859_6=m ++CONFIG_NLS_ISO8859_7=m ++CONFIG_NLS_ISO8859_9=m ++CONFIG_NLS_ISO8859_13=m ++CONFIG_NLS_ISO8859_14=m ++CONFIG_NLS_ISO8859_15=m ++CONFIG_NLS_KOI8_R=m ++CONFIG_NLS_KOI8_U=m ++CONFIG_NLS_UTF8=m ++CONFIG_DLM=m ++CONFIG_DLM_DEBUG=y ++CONFIG_DYNAMIC_DEBUG=y ++CONFIG_DEBUG_INFO=y ++CONFIG_DEBUG_INFO_REDUCED=y ++# CONFIG_ENABLE_WARN_DEPRECATED is not set ++CONFIG_STRIP_ASM_SYMS=y ++CONFIG_DEBUG_FS=y ++CONFIG_HEADERS_CHECK=y ++# CONFIG_FRAME_POINTER is not set ++CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y ++CONFIG_DEBUG_VM=y ++CONFIG_DEBUG_MEMORY_INIT=y ++CONFIG_DEBUG_STACKOVERFLOW=y ++CONFIG_LOCKUP_DETECTOR=y ++CONFIG_SCHEDSTATS=y ++CONFIG_TIMER_STATS=y ++CONFIG_DEBUG_LIST=y ++CONFIG_DEBUG_CREDENTIALS=y ++CONFIG_RCU_CPU_STALL_TIMEOUT=60 ++CONFIG_ASYNC_RAID6_TEST=m ++CONFIG_KGDB=y ++CONFIG_SECURITY=y ++CONFIG_SECURITYFS=y ++CONFIG_SECURITY_NETWORK=y ++CONFIG_SECURITY_NETWORK_XFRM=y ++CONFIG_SECURITY_SELINUX=y ++CONFIG_SECURITY_SELINUX_BOOTPARAM=y ++CONFIG_SECURITY_SELINUX_DISABLE=y ++CONFIG_CRYPTO_PCRYPT=m ++CONFIG_CRYPTO_CRYPTD=m ++CONFIG_CRYPTO_TEST=m ++CONFIG_CRYPTO_CCM=m ++CONFIG_CRYPTO_GCM=m ++CONFIG_CRYPTO_CTS=m ++CONFIG_CRYPTO_LRW=m ++CONFIG_CRYPTO_PCBC=m ++CONFIG_CRYPTO_XTS=m ++CONFIG_CRYPTO_HMAC=y ++CONFIG_CRYPTO_XCBC=m ++CONFIG_CRYPTO_VMAC=m ++CONFIG_CRYPTO_MICHAEL_MIC=m ++CONFIG_CRYPTO_RMD128=m ++CONFIG_CRYPTO_RMD160=m ++CONFIG_CRYPTO_RMD256=m ++CONFIG_CRYPTO_RMD320=m ++CONFIG_CRYPTO_SHA1=y ++CONFIG_CRYPTO_SHA512=m ++CONFIG_CRYPTO_TGR192=m ++CONFIG_CRYPTO_WP512=m ++CONFIG_CRYPTO_ANUBIS=m ++CONFIG_CRYPTO_BLOWFISH=m ++CONFIG_CRYPTO_CAMELLIA=m ++CONFIG_CRYPTO_CAST5=m ++CONFIG_CRYPTO_CAST6=m ++CONFIG_CRYPTO_FCRYPT=m ++CONFIG_CRYPTO_KHAZAD=m ++CONFIG_CRYPTO_SEED=m ++CONFIG_CRYPTO_SERPENT=m ++CONFIG_CRYPTO_TEA=m ++CONFIG_CRYPTO_TWOFISH=m ++CONFIG_CRYPTO_LZO=m +diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig +new file mode 100644 +index 000000000000..e8c4003cbd81 +--- /dev/null ++++ b/arch/tile/configs/tilepro_defconfig +@@ -0,0 +1,524 @@ ++CONFIG_SYSVIPC=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_AUDIT=y ++CONFIG_NO_HZ=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++CONFIG_TASKSTATS=y ++CONFIG_TASK_DELAY_ACCT=y ++CONFIG_TASK_XACCT=y ++CONFIG_TASK_IO_ACCOUNTING=y ++CONFIG_LOG_BUF_SHIFT=19 ++CONFIG_CGROUPS=y ++CONFIG_CGROUP_DEBUG=y ++CONFIG_CGROUP_DEVICE=y ++CONFIG_CPUSETS=y ++CONFIG_CGROUP_CPUACCT=y ++CONFIG_CGROUP_SCHED=y ++CONFIG_RT_GROUP_SCHED=y ++CONFIG_BLK_CGROUP=y ++CONFIG_NAMESPACES=y ++CONFIG_RELAY=y ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_RD_XZ=y ++CONFIG_SYSCTL_SYSCALL=y ++CONFIG_EMBEDDED=y ++# CONFIG_COMPAT_BRK is not set ++CONFIG_PROFILING=y ++CONFIG_MODULES=y ++CONFIG_MODULE_FORCE_LOAD=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_BLK_DEV_INTEGRITY=y ++CONFIG_PARTITION_ADVANCED=y ++CONFIG_OSF_PARTITION=y ++CONFIG_AMIGA_PARTITION=y ++CONFIG_MAC_PARTITION=y ++CONFIG_BSD_DISKLABEL=y ++CONFIG_MINIX_SUBPARTITION=y ++CONFIG_SOLARIS_X86_PARTITION=y ++CONFIG_UNIXWARE_DISKLABEL=y ++CONFIG_SGI_PARTITION=y ++CONFIG_SUN_PARTITION=y ++CONFIG_KARMA_PARTITION=y ++CONFIG_CFQ_GROUP_IOSCHED=y ++CONFIG_HZ_100=y ++# CONFIG_COMPACTION is not set ++CONFIG_PREEMPT=y ++CONFIG_PCI_DEBUG=y ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_BINFMT_MISC=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_XFRM_USER=y ++CONFIG_XFRM_SUB_POLICY=y ++CONFIG_XFRM_STATISTICS=y ++CONFIG_NET_KEY=m ++CONFIG_NET_KEY_MIGRATE=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++CONFIG_IP_MULTIPLE_TABLES=y ++CONFIG_IP_ROUTE_MULTIPATH=y ++CONFIG_IP_ROUTE_VERBOSE=y ++CONFIG_NET_IPIP=m ++CONFIG_IP_MROUTE=y ++CONFIG_IP_PIMSM_V1=y ++CONFIG_IP_PIMSM_V2=y ++CONFIG_SYN_COOKIES=y ++CONFIG_INET_AH=m ++CONFIG_INET_ESP=m ++CONFIG_INET_IPCOMP=m ++CONFIG_INET_XFRM_MODE_TRANSPORT=m ++CONFIG_INET_XFRM_MODE_TUNNEL=m ++CONFIG_INET_XFRM_MODE_BEET=m ++CONFIG_INET_DIAG=m ++CONFIG_TCP_CONG_ADVANCED=y ++CONFIG_TCP_CONG_HSTCP=m ++CONFIG_TCP_CONG_HYBLA=m ++CONFIG_TCP_CONG_SCALABLE=m ++CONFIG_TCP_CONG_LP=m ++CONFIG_TCP_CONG_VENO=m ++CONFIG_TCP_CONG_YEAH=m ++CONFIG_TCP_CONG_ILLINOIS=m ++CONFIG_TCP_MD5SIG=y ++CONFIG_IPV6=y ++CONFIG_IPV6_ROUTER_PREF=y ++CONFIG_IPV6_ROUTE_INFO=y ++CONFIG_IPV6_OPTIMISTIC_DAD=y ++CONFIG_INET6_AH=m ++CONFIG_INET6_ESP=m ++CONFIG_INET6_IPCOMP=m ++CONFIG_IPV6_MIP6=m ++CONFIG_INET6_XFRM_MODE_TRANSPORT=m ++CONFIG_INET6_XFRM_MODE_TUNNEL=m ++CONFIG_INET6_XFRM_MODE_BEET=m ++CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m ++CONFIG_IPV6_SIT=m ++CONFIG_IPV6_TUNNEL=m ++CONFIG_IPV6_MULTIPLE_TABLES=y ++CONFIG_IPV6_MROUTE=y ++CONFIG_IPV6_PIMSM_V2=y ++CONFIG_NETLABEL=y ++CONFIG_NETFILTER=y ++CONFIG_NF_CONNTRACK=m ++CONFIG_NF_CONNTRACK_SECMARK=y ++CONFIG_NF_CONNTRACK_ZONES=y ++CONFIG_NF_CONNTRACK_EVENTS=y ++CONFIG_NF_CT_PROTO_DCCP=m ++CONFIG_NF_CT_PROTO_UDPLITE=m ++CONFIG_NF_CONNTRACK_AMANDA=m ++CONFIG_NF_CONNTRACK_FTP=m ++CONFIG_NF_CONNTRACK_H323=m ++CONFIG_NF_CONNTRACK_IRC=m ++CONFIG_NF_CONNTRACK_NETBIOS_NS=m ++CONFIG_NF_CONNTRACK_PPTP=m ++CONFIG_NF_CONNTRACK_SANE=m ++CONFIG_NF_CONNTRACK_SIP=m ++CONFIG_NF_CONNTRACK_TFTP=m ++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m ++CONFIG_NETFILTER_XT_TARGET_CONNMARK=m ++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m ++CONFIG_NETFILTER_XT_TARGET_DSCP=m ++CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m ++CONFIG_NETFILTER_XT_TARGET_MARK=m ++CONFIG_NETFILTER_XT_TARGET_NFLOG=m ++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m ++CONFIG_NETFILTER_XT_TARGET_NOTRACK=m ++CONFIG_NETFILTER_XT_TARGET_TEE=m ++CONFIG_NETFILTER_XT_TARGET_TPROXY=m ++CONFIG_NETFILTER_XT_TARGET_TRACE=m ++CONFIG_NETFILTER_XT_TARGET_SECMARK=m ++CONFIG_NETFILTER_XT_TARGET_TCPMSS=m ++CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m ++CONFIG_NETFILTER_XT_MATCH_CLUSTER=m ++CONFIG_NETFILTER_XT_MATCH_COMMENT=m ++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m ++CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m ++CONFIG_NETFILTER_XT_MATCH_CONNMARK=m ++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m ++CONFIG_NETFILTER_XT_MATCH_DCCP=m ++CONFIG_NETFILTER_XT_MATCH_DSCP=m ++CONFIG_NETFILTER_XT_MATCH_ESP=m ++CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m ++CONFIG_NETFILTER_XT_MATCH_HELPER=m ++CONFIG_NETFILTER_XT_MATCH_IPRANGE=m ++CONFIG_NETFILTER_XT_MATCH_IPVS=m ++CONFIG_NETFILTER_XT_MATCH_LENGTH=m ++CONFIG_NETFILTER_XT_MATCH_LIMIT=m ++CONFIG_NETFILTER_XT_MATCH_MAC=m ++CONFIG_NETFILTER_XT_MATCH_MARK=m ++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m ++CONFIG_NETFILTER_XT_MATCH_OSF=m ++CONFIG_NETFILTER_XT_MATCH_OWNER=m ++CONFIG_NETFILTER_XT_MATCH_POLICY=m ++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m ++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m ++CONFIG_NETFILTER_XT_MATCH_QUOTA=m ++CONFIG_NETFILTER_XT_MATCH_RATEEST=m ++CONFIG_NETFILTER_XT_MATCH_REALM=m ++CONFIG_NETFILTER_XT_MATCH_RECENT=m ++CONFIG_NETFILTER_XT_MATCH_SOCKET=m ++CONFIG_NETFILTER_XT_MATCH_STATE=m ++CONFIG_NETFILTER_XT_MATCH_STATISTIC=m ++CONFIG_NETFILTER_XT_MATCH_STRING=m ++CONFIG_NETFILTER_XT_MATCH_TCPMSS=m ++CONFIG_NETFILTER_XT_MATCH_TIME=m ++CONFIG_NETFILTER_XT_MATCH_U32=m ++CONFIG_IP_VS=m ++CONFIG_IP_VS_IPV6=y ++CONFIG_IP_VS_PROTO_TCP=y ++CONFIG_IP_VS_PROTO_UDP=y ++CONFIG_IP_VS_PROTO_ESP=y ++CONFIG_IP_VS_PROTO_AH=y ++CONFIG_IP_VS_PROTO_SCTP=y ++CONFIG_IP_VS_RR=m ++CONFIG_IP_VS_WRR=m ++CONFIG_IP_VS_LC=m ++CONFIG_IP_VS_WLC=m ++CONFIG_IP_VS_LBLC=m ++CONFIG_IP_VS_LBLCR=m ++CONFIG_IP_VS_SED=m ++CONFIG_IP_VS_NQ=m ++CONFIG_NF_CONNTRACK_IPV4=m ++# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set ++CONFIG_IP_NF_IPTABLES=y ++CONFIG_IP_NF_MATCH_AH=m ++CONFIG_IP_NF_MATCH_ECN=m ++CONFIG_IP_NF_MATCH_TTL=m ++CONFIG_IP_NF_FILTER=y ++CONFIG_IP_NF_TARGET_REJECT=y ++CONFIG_IP_NF_MANGLE=m ++CONFIG_IP_NF_TARGET_ECN=m ++CONFIG_IP_NF_TARGET_TTL=m ++CONFIG_IP_NF_RAW=m ++CONFIG_IP_NF_SECURITY=m ++CONFIG_IP_NF_ARPTABLES=m ++CONFIG_IP_NF_ARPFILTER=m ++CONFIG_IP_NF_ARP_MANGLE=m ++CONFIG_NF_CONNTRACK_IPV6=m ++CONFIG_IP6_NF_MATCH_AH=m ++CONFIG_IP6_NF_MATCH_EUI64=m ++CONFIG_IP6_NF_MATCH_FRAG=m ++CONFIG_IP6_NF_MATCH_OPTS=m ++CONFIG_IP6_NF_MATCH_HL=m ++CONFIG_IP6_NF_MATCH_IPV6HEADER=m ++CONFIG_IP6_NF_MATCH_MH=m ++CONFIG_IP6_NF_MATCH_RT=m ++CONFIG_IP6_NF_TARGET_HL=m ++CONFIG_IP6_NF_FILTER=m ++CONFIG_IP6_NF_TARGET_REJECT=m ++CONFIG_IP6_NF_MANGLE=m ++CONFIG_IP6_NF_RAW=m ++CONFIG_IP6_NF_SECURITY=m ++CONFIG_BRIDGE_NF_EBTABLES=m ++CONFIG_BRIDGE_EBT_BROUTE=m ++CONFIG_BRIDGE_EBT_T_FILTER=m ++CONFIG_BRIDGE_EBT_T_NAT=m ++CONFIG_BRIDGE_EBT_802_3=m ++CONFIG_BRIDGE_EBT_AMONG=m ++CONFIG_BRIDGE_EBT_ARP=m ++CONFIG_BRIDGE_EBT_IP=m ++CONFIG_BRIDGE_EBT_IP6=m ++CONFIG_BRIDGE_EBT_LIMIT=m ++CONFIG_BRIDGE_EBT_MARK=m ++CONFIG_BRIDGE_EBT_PKTTYPE=m ++CONFIG_BRIDGE_EBT_STP=m ++CONFIG_BRIDGE_EBT_VLAN=m ++CONFIG_BRIDGE_EBT_ARPREPLY=m ++CONFIG_BRIDGE_EBT_DNAT=m ++CONFIG_BRIDGE_EBT_MARK_T=m ++CONFIG_BRIDGE_EBT_REDIRECT=m ++CONFIG_BRIDGE_EBT_SNAT=m ++CONFIG_BRIDGE_EBT_LOG=m ++CONFIG_BRIDGE_EBT_ULOG=m ++CONFIG_BRIDGE_EBT_NFLOG=m ++CONFIG_RDS=m ++CONFIG_RDS_TCP=m ++CONFIG_BRIDGE=m ++CONFIG_VLAN_8021Q=m ++CONFIG_VLAN_8021Q_GVRP=y ++CONFIG_PHONET=m ++CONFIG_NET_SCHED=y ++CONFIG_NET_SCH_CBQ=m ++CONFIG_NET_SCH_HTB=m ++CONFIG_NET_SCH_HFSC=m ++CONFIG_NET_SCH_PRIO=m ++CONFIG_NET_SCH_MULTIQ=m ++CONFIG_NET_SCH_RED=m ++CONFIG_NET_SCH_SFQ=m ++CONFIG_NET_SCH_TEQL=m ++CONFIG_NET_SCH_TBF=m ++CONFIG_NET_SCH_GRED=m ++CONFIG_NET_SCH_DSMARK=m ++CONFIG_NET_SCH_NETEM=m ++CONFIG_NET_SCH_DRR=m ++CONFIG_NET_SCH_INGRESS=m ++CONFIG_NET_CLS_BASIC=m ++CONFIG_NET_CLS_TCINDEX=m ++CONFIG_NET_CLS_ROUTE4=m ++CONFIG_NET_CLS_FW=m ++CONFIG_NET_CLS_U32=m ++CONFIG_CLS_U32_PERF=y ++CONFIG_CLS_U32_MARK=y ++CONFIG_NET_CLS_RSVP=m ++CONFIG_NET_CLS_RSVP6=m ++CONFIG_NET_CLS_FLOW=m ++CONFIG_NET_CLS_CGROUP=y ++CONFIG_NET_EMATCH=y ++CONFIG_NET_EMATCH_CMP=m ++CONFIG_NET_EMATCH_NBYTE=m ++CONFIG_NET_EMATCH_U32=m ++CONFIG_NET_EMATCH_META=m ++CONFIG_NET_EMATCH_TEXT=m ++CONFIG_NET_CLS_ACT=y ++CONFIG_NET_ACT_POLICE=m ++CONFIG_NET_ACT_GACT=m ++CONFIG_GACT_PROB=y ++CONFIG_NET_ACT_MIRRED=m ++CONFIG_NET_ACT_IPT=m ++CONFIG_NET_ACT_NAT=m ++CONFIG_NET_ACT_PEDIT=m ++CONFIG_NET_ACT_SIMP=m ++CONFIG_NET_ACT_SKBEDIT=m ++CONFIG_NET_CLS_IND=y ++CONFIG_DCB=y ++CONFIG_DNS_RESOLVER=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_DEVTMPFS=y ++CONFIG_DEVTMPFS_MOUNT=y ++CONFIG_CONNECTOR=y ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_CRYPTOLOOP=m ++CONFIG_BLK_DEV_SX8=m ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_SIZE=16384 ++CONFIG_ATA_OVER_ETH=m ++CONFIG_RAID_ATTRS=m ++CONFIG_BLK_DEV_SD=y ++CONFIG_SCSI_CONSTANTS=y ++CONFIG_SCSI_LOGGING=y ++CONFIG_ATA=y ++CONFIG_SATA_SIL24=y ++# CONFIG_ATA_SFF is not set ++CONFIG_MD=y ++CONFIG_BLK_DEV_MD=y ++CONFIG_MD_LINEAR=m ++CONFIG_MD_RAID0=m ++CONFIG_MD_RAID1=m ++CONFIG_MD_RAID10=m ++CONFIG_MD_RAID456=m ++CONFIG_MD_FAULTY=m ++CONFIG_BLK_DEV_DM=m ++CONFIG_DM_DEBUG=y ++CONFIG_DM_CRYPT=m ++CONFIG_DM_SNAPSHOT=m ++CONFIG_DM_MIRROR=m ++CONFIG_DM_LOG_USERSPACE=m ++CONFIG_DM_ZERO=m ++CONFIG_DM_MULTIPATH=m ++CONFIG_DM_MULTIPATH_QL=m ++CONFIG_DM_MULTIPATH_ST=m ++CONFIG_DM_DELAY=m ++CONFIG_DM_UEVENT=y ++CONFIG_FUSION=y ++CONFIG_FUSION_SAS=y ++CONFIG_NETDEVICES=y ++CONFIG_BONDING=m ++CONFIG_DUMMY=m ++CONFIG_IFB=m ++CONFIG_MACVLAN=m ++CONFIG_MACVTAP=m ++CONFIG_NETCONSOLE=m ++CONFIG_NETCONSOLE_DYNAMIC=y ++CONFIG_TUN=y ++CONFIG_VETH=m ++CONFIG_NET_DSA_MV88E6060=y ++CONFIG_NET_DSA_MV88E6XXX=y ++# CONFIG_NET_VENDOR_3COM is not set ++CONFIG_E1000E=y ++# CONFIG_WLAN is not set ++# CONFIG_INPUT_MOUSEDEV is not set ++# CONFIG_INPUT_KEYBOARD is not set ++# CONFIG_INPUT_MOUSE is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++CONFIG_HW_RANDOM=y ++CONFIG_HW_RANDOM_TIMERIOMEM=m ++CONFIG_I2C=y ++CONFIG_I2C_CHARDEV=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_WATCHDOG_NOWAYOUT=y ++# CONFIG_VGA_ARB is not set ++# CONFIG_USB_SUPPORT is not set ++CONFIG_EDAC=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_TILE=y ++CONFIG_EXT2_FS=y ++CONFIG_EXT2_FS_XATTR=y ++CONFIG_EXT2_FS_POSIX_ACL=y ++CONFIG_EXT2_FS_SECURITY=y ++CONFIG_EXT2_FS_XIP=y ++CONFIG_EXT3_FS=y ++CONFIG_EXT3_FS_POSIX_ACL=y ++CONFIG_EXT3_FS_SECURITY=y ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_FS_POSIX_ACL=y ++CONFIG_EXT4_FS_SECURITY=y ++CONFIG_XFS_FS=y ++CONFIG_XFS_QUOTA=y ++CONFIG_XFS_POSIX_ACL=y ++CONFIG_GFS2_FS=m ++CONFIG_GFS2_FS_LOCKING_DLM=y ++CONFIG_BTRFS_FS=m ++CONFIG_BTRFS_FS_POSIX_ACL=y ++CONFIG_QUOTA=y ++CONFIG_QUOTA_NETLINK_INTERFACE=y ++# CONFIG_PRINT_QUOTA_WARNING is not set ++CONFIG_QFMT_V2=y ++CONFIG_AUTOFS4_FS=m ++CONFIG_FUSE_FS=y ++CONFIG_CUSE=m ++CONFIG_FSCACHE=m ++CONFIG_FSCACHE_STATS=y ++CONFIG_CACHEFILES=m ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_ZISOFS=y ++CONFIG_UDF_FS=m ++CONFIG_MSDOS_FS=m ++CONFIG_VFAT_FS=m ++CONFIG_FAT_DEFAULT_IOCHARSET="ascii" ++CONFIG_PROC_KCORE=y ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_HUGETLBFS=y ++CONFIG_CONFIGFS_FS=m ++CONFIG_ECRYPT_FS=m ++CONFIG_CRAMFS=m ++CONFIG_SQUASHFS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3_ACL=y ++CONFIG_NFS_V4=m ++CONFIG_NFS_V4_1=y ++CONFIG_NFS_FSCACHE=y ++CONFIG_NFSD=m ++CONFIG_NFSD_V3_ACL=y ++CONFIG_NFSD_V4=y ++CONFIG_CIFS=m ++CONFIG_CIFS_STATS=y ++CONFIG_CIFS_WEAK_PW_HASH=y ++CONFIG_CIFS_UPCALL=y ++CONFIG_CIFS_XATTR=y ++CONFIG_CIFS_POSIX=y ++CONFIG_CIFS_DFS_UPCALL=y ++CONFIG_CIFS_FSCACHE=y ++CONFIG_NLS=y ++CONFIG_NLS_DEFAULT="utf8" ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_CODEPAGE_737=m ++CONFIG_NLS_CODEPAGE_775=m ++CONFIG_NLS_CODEPAGE_850=m ++CONFIG_NLS_CODEPAGE_852=m ++CONFIG_NLS_CODEPAGE_855=m ++CONFIG_NLS_CODEPAGE_857=m ++CONFIG_NLS_CODEPAGE_860=m ++CONFIG_NLS_CODEPAGE_861=m ++CONFIG_NLS_CODEPAGE_862=m ++CONFIG_NLS_CODEPAGE_863=m ++CONFIG_NLS_CODEPAGE_864=m ++CONFIG_NLS_CODEPAGE_865=m ++CONFIG_NLS_CODEPAGE_866=m ++CONFIG_NLS_CODEPAGE_869=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_CODEPAGE_950=m ++CONFIG_NLS_CODEPAGE_932=m ++CONFIG_NLS_CODEPAGE_949=m ++CONFIG_NLS_CODEPAGE_874=m ++CONFIG_NLS_ISO8859_8=m ++CONFIG_NLS_CODEPAGE_1250=m ++CONFIG_NLS_CODEPAGE_1251=m ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_ISO8859_2=m ++CONFIG_NLS_ISO8859_3=m ++CONFIG_NLS_ISO8859_4=m ++CONFIG_NLS_ISO8859_5=m ++CONFIG_NLS_ISO8859_6=m ++CONFIG_NLS_ISO8859_7=m ++CONFIG_NLS_ISO8859_9=m ++CONFIG_NLS_ISO8859_13=m ++CONFIG_NLS_ISO8859_14=m ++CONFIG_NLS_ISO8859_15=m ++CONFIG_NLS_KOI8_R=m ++CONFIG_NLS_KOI8_U=m ++CONFIG_NLS_UTF8=m ++CONFIG_DLM=m ++CONFIG_DLM_DEBUG=y ++CONFIG_DYNAMIC_DEBUG=y ++CONFIG_DEBUG_INFO=y ++CONFIG_DEBUG_INFO_REDUCED=y ++# CONFIG_ENABLE_WARN_DEPRECATED is not set ++CONFIG_FRAME_WARN=2048 ++CONFIG_STRIP_ASM_SYMS=y ++CONFIG_DEBUG_FS=y ++CONFIG_HEADERS_CHECK=y ++# CONFIG_FRAME_POINTER is not set ++CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_DEBUG_VM=y ++CONFIG_DEBUG_MEMORY_INIT=y ++CONFIG_DEBUG_STACKOVERFLOW=y ++CONFIG_LOCKUP_DETECTOR=y ++CONFIG_SCHEDSTATS=y ++CONFIG_TIMER_STATS=y ++CONFIG_DEBUG_LIST=y ++CONFIG_DEBUG_CREDENTIALS=y ++CONFIG_RCU_CPU_STALL_TIMEOUT=60 ++CONFIG_ASYNC_RAID6_TEST=m ++CONFIG_SECURITY=y ++CONFIG_SECURITYFS=y ++CONFIG_SECURITY_NETWORK=y ++CONFIG_SECURITY_NETWORK_XFRM=y ++CONFIG_SECURITY_SELINUX=y ++CONFIG_SECURITY_SELINUX_BOOTPARAM=y ++CONFIG_SECURITY_SELINUX_DISABLE=y ++CONFIG_CRYPTO_PCRYPT=m ++CONFIG_CRYPTO_CRYPTD=m ++CONFIG_CRYPTO_TEST=m ++CONFIG_CRYPTO_CCM=m ++CONFIG_CRYPTO_GCM=m ++CONFIG_CRYPTO_CTS=m ++CONFIG_CRYPTO_LRW=m ++CONFIG_CRYPTO_PCBC=m ++CONFIG_CRYPTO_XTS=m ++CONFIG_CRYPTO_HMAC=y ++CONFIG_CRYPTO_XCBC=m ++CONFIG_CRYPTO_VMAC=m ++CONFIG_CRYPTO_MICHAEL_MIC=m ++CONFIG_CRYPTO_RMD128=m ++CONFIG_CRYPTO_RMD160=m ++CONFIG_CRYPTO_RMD256=m ++CONFIG_CRYPTO_RMD320=m ++CONFIG_CRYPTO_SHA1=y ++CONFIG_CRYPTO_SHA512=m ++CONFIG_CRYPTO_TGR192=m ++CONFIG_CRYPTO_WP512=m ++CONFIG_CRYPTO_ANUBIS=m ++CONFIG_CRYPTO_BLOWFISH=m ++CONFIG_CRYPTO_CAMELLIA=m ++CONFIG_CRYPTO_CAST5=m ++CONFIG_CRYPTO_CAST6=m ++CONFIG_CRYPTO_FCRYPT=m ++CONFIG_CRYPTO_KHAZAD=m ++CONFIG_CRYPTO_SEED=m ++CONFIG_CRYPTO_SERPENT=m ++CONFIG_CRYPTO_TEA=m ++CONFIG_CRYPTO_TWOFISH=m ++CONFIG_CRYPTO_LZO=m ++CONFIG_CRC_CCITT=m ++CONFIG_CRC7=m +diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig +index 9f908112bbb9..25452ca053b8 100644 +--- a/arch/x86/configs/i386_defconfig ++++ b/arch/x86/configs/i386_defconfig +@@ -41,7 +41,7 @@ CONFIG_SMP=y + CONFIG_X86_GENERIC=y + CONFIG_HPET_TIMER=y + CONFIG_SCHED_SMT=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y + CONFIG_X86_MCE=y + CONFIG_X86_REBOOTFIXUPS=y +diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig +index 1d3badfda09e..235b63cb08fa 100644 +--- a/arch/x86/configs/x86_64_defconfig ++++ b/arch/x86/configs/x86_64_defconfig +@@ -40,7 +40,7 @@ CONFIG_SMP=y + CONFIG_CALGARY_IOMMU=y + CONFIG_NR_CPUS=64 + CONFIG_SCHED_SMT=y +-CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_PREEMPT=y + CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y + CONFIG_X86_MCE=y + CONFIG_MICROCODE=y +diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt +index 0fee5fe6c899..9cf10230d5fb 100644 +--- a/kernel/Kconfig.preempt ++++ b/kernel/Kconfig.preempt +@@ -1,7 +1,7 @@ + + choice + prompt "Preemption Model" +- default PREEMPT_NONE ++ default PREEMPT + + config PREEMPT_NONE + bool "No Forced Preemption (Server)" +@@ -17,7 +17,7 @@ config PREEMPT_NONE + latencies. + + config PREEMPT_VOLUNTARY +- bool "Voluntary Kernel Preemption (Desktop)" ++ bool "Voluntary Kernel Preemption (Nothing)" + depends on !ARCH_NO_PREEMPT + help + This option reduces the latency of the kernel by adding more +@@ -32,7 +32,8 @@ config PREEMPT_VOLUNTARY + applications to run more 'smoothly' even when the system is + under load. + +- Select this if you are building a kernel for a desktop system. ++ Select this for no system in particular (choose Preemptible ++ instead on a desktop if you know what's good for you). + + config PREEMPT + bool "Preemptible Kernel (Low-Latency Desktop)" +-- +2.17.1 + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0004-Expose-vmsplit-for-our-poor-32-bit-users.patch b/sys-kernel/linux-sources-redcore/files/5.1-0004-Expose-vmsplit-for-our-poor-32-bit-users.patch new file mode 100644 index 00000000..fd180743 --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0004-Expose-vmsplit-for-our-poor-32-bit-users.patch @@ -0,0 +1,48 @@ +From 2dd86e6f35c475b4b42b0b96a0b47a39a630736e Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Fri, 12 May 2017 13:07:37 +1000 +Subject: [PATCH 04/16] Expose vmsplit for our poor 32 bit users. + +--- + arch/x86/Kconfig | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index fe5df269452d..29ecd7d99cfc 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -1426,7 +1426,7 @@ config HIGHMEM64G + endchoice + + choice +- prompt "Memory split" if EXPERT ++ prompt "Memory split" + default VMSPLIT_3G + depends on X86_32 + ---help--- +@@ -1446,17 +1446,17 @@ choice + option alone! + + config VMSPLIT_3G +- bool "3G/1G user/kernel split" ++ bool "Default 896MB lowmem (3G/1G user/kernel split)" + config VMSPLIT_3G_OPT + depends on !X86_PAE +- bool "3G/1G user/kernel split (for full 1G low memory)" ++ bool "1GB lowmem (3G/1G user/kernel split)" + config VMSPLIT_2G +- bool "2G/2G user/kernel split" ++ bool "2GB lowmem (2G/2G user/kernel split)" + config VMSPLIT_2G_OPT + depends on !X86_PAE +- bool "2G/2G user/kernel split (for full 2G low memory)" ++ bool "2GB lowmem (2G/2G user/kernel split)" + config VMSPLIT_1G +- bool "1G/3G user/kernel split" ++ bool "3GB lowmem (1G/3G user/kernel split)" + endchoice + + config PAGE_OFFSET +-- +2.17.1 + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0005-Create-highres-timeout-variants-of-schedule_timeout-.patch b/sys-kernel/linux-sources-redcore/files/5.1-0005-Create-highres-timeout-variants-of-schedule_timeout-.patch new file mode 100644 index 00000000..5c7831cb --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0005-Create-highres-timeout-variants-of-schedule_timeout-.patch @@ -0,0 +1,153 @@ +From 35c6a7df0bc36fdf3cb8e14c0ba8c73b6b17dded Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Sat, 12 Aug 2017 11:53:39 +1000 +Subject: [PATCH 05/16] Create highres timeout variants of schedule_timeout + functions. + +--- + include/linux/freezer.h | 1 + + include/linux/sched.h | 31 ++++++++++++++++-- + kernel/time/hrtimer.c | 71 +++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 101 insertions(+), 2 deletions(-) + +diff --git a/include/linux/freezer.h b/include/linux/freezer.h +index 21f5aa0b217f..ee9b46394fdf 100644 +--- a/include/linux/freezer.h ++++ b/include/linux/freezer.h +@@ -297,6 +297,7 @@ static inline void set_freezable(void) {} + #define wait_event_freezekillable_unsafe(wq, condition) \ + wait_event_killable(wq, condition) + ++#define pm_freezing (false) + #endif /* !CONFIG_FREEZER */ + + #endif /* FREEZER_H_INCLUDED */ +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 71d849ef5ee2..14ab8a8f8b1c 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -214,13 +214,40 @@ struct task_group; + + extern void scheduler_tick(void); + +-#define MAX_SCHEDULE_TIMEOUT LONG_MAX +- ++#define MAX_SCHEDULE_TIMEOUT LONG_MAX + extern long schedule_timeout(long timeout); + extern long schedule_timeout_interruptible(long timeout); + extern long schedule_timeout_killable(long timeout); + extern long schedule_timeout_uninterruptible(long timeout); + extern long schedule_timeout_idle(long timeout); ++ ++#ifdef CONFIG_HIGH_RES_TIMERS ++extern long schedule_msec_hrtimeout(long timeout); ++extern long schedule_min_hrtimeout(void); ++extern long schedule_msec_hrtimeout_interruptible(long timeout); ++extern long schedule_msec_hrtimeout_uninterruptible(long timeout); ++#else ++static inline long schedule_msec_hrtimeout(long timeout) ++{ ++ return schedule_timeout(msecs_to_jiffies(timeout)); ++} ++ ++static inline long schedule_min_hrtimeout(void) ++{ ++ return schedule_timeout(1); ++} ++ ++static inline long schedule_msec_hrtimeout_interruptible(long timeout) ++{ ++ return schedule_timeout_interruptible(msecs_to_jiffies(timeout)); ++} ++ ++static inline long schedule_msec_hrtimeout_uninterruptible(long timeout) ++{ ++ return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout)); ++} ++#endif ++ + asmlinkage void schedule(void); + extern void schedule_preempt_disabled(void); + +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 41dfff23c1f9..c6ea49693bca 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -2017,3 +2017,74 @@ int __sched schedule_hrtimeout(ktime_t *expires, + return schedule_hrtimeout_range(expires, 0, mode); + } + EXPORT_SYMBOL_GPL(schedule_hrtimeout); ++ ++/* ++ * As per schedule_hrtimeout but taskes a millisecond value and returns how ++ * many milliseconds are left. ++ */ ++long __sched schedule_msec_hrtimeout(long timeout) ++{ ++ struct hrtimer_sleeper t; ++ int delta, secs, jiffs; ++ ktime_t expires; ++ ++ if (!timeout) { ++ __set_current_state(TASK_RUNNING); ++ return 0; ++ } ++ ++ jiffs = msecs_to_jiffies(timeout); ++ /* ++ * If regular timer resolution is adequate or hrtimer resolution is not ++ * (yet) better than Hz, as would occur during startup, use regular ++ * timers. ++ */ ++ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ) ++ return schedule_timeout(jiffs); ++ ++ secs = timeout / 1000; ++ delta = (timeout % 1000) * NSEC_PER_MSEC; ++ expires = ktime_set(secs, delta); ++ ++ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_set_expires_range_ns(&t.timer, expires, delta); ++ ++ hrtimer_init_sleeper(&t, current); ++ ++ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL); ++ ++ if (likely(t.task)) ++ schedule(); ++ ++ hrtimer_cancel(&t.timer); ++ destroy_hrtimer_on_stack(&t.timer); ++ ++ __set_current_state(TASK_RUNNING); ++ ++ expires = hrtimer_expires_remaining(&t.timer); ++ timeout = ktime_to_ms(expires); ++ return timeout < 0 ? 0 : timeout; ++} ++ ++EXPORT_SYMBOL(schedule_msec_hrtimeout); ++ ++long __sched schedule_min_hrtimeout(void) ++{ ++ return schedule_msec_hrtimeout(1); ++} ++ ++EXPORT_SYMBOL(schedule_min_hrtimeout); ++ ++long __sched schedule_msec_hrtimeout_interruptible(long timeout) ++{ ++ __set_current_state(TASK_INTERRUPTIBLE); ++ return schedule_msec_hrtimeout(timeout); ++} ++EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible); ++ ++long __sched schedule_msec_hrtimeout_uninterruptible(long timeout) ++{ ++ __set_current_state(TASK_UNINTERRUPTIBLE); ++ return schedule_msec_hrtimeout(timeout); ++} ++EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible); +-- +2.17.1 + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0006-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch b/sys-kernel/linux-sources-redcore/files/5.1-0006-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch new file mode 100644 index 00000000..4c5c24e7 --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0006-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch @@ -0,0 +1,49 @@ +From cda5868e93585d3751bcb991e00735502cba2566 Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Sat, 5 Nov 2016 09:27:36 +1100 +Subject: [PATCH 06/16] Special case calls of schedule_timeout(1) to use the + min hrtimeout of 1ms, working around low Hz resolutions. + +--- + kernel/time/timer.c | 16 ++++++++++++++-- + 1 file changed, 14 insertions(+), 2 deletions(-) + +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index 926ab73595a2..98803a47491c 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -1800,6 +1800,18 @@ signed long __sched schedule_timeout(signed long timeout) + + expire = timeout + jiffies; + ++#ifdef CONFIG_HIGH_RES_TIMERS ++ if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ /* ++ * Special case 1 as being a request for the minimum timeout ++ * and use highres timers to timeout after 1ms to workaround ++ * the granularity of low Hz tick timers. ++ */ ++ if (!schedule_min_hrtimeout()) ++ return 0; ++ goto out_timeout; ++ } ++#endif + timer.task = current; + timer_setup_on_stack(&timer.timer, process_timeout, 0); + __mod_timer(&timer.timer, expire, 0); +@@ -1808,10 +1820,10 @@ signed long __sched schedule_timeout(signed long timeout) + + /* Remove the timer from the object tracker */ + destroy_timer_on_stack(&timer.timer); +- ++out_timeout: + timeout = expire - jiffies; + +- out: ++out: + return timeout < 0 ? 0 : timeout; + } + EXPORT_SYMBOL(schedule_timeout); +-- +2.17.1 + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0007-Convert-msleep-to-use-hrtimers-when-active.patch b/sys-kernel/linux-sources-redcore/files/5.1-0007-Convert-msleep-to-use-hrtimers-when-active.patch new file mode 100644 index 00000000..9dead86e --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0007-Convert-msleep-to-use-hrtimers-when-active.patch @@ -0,0 +1,54 @@ +From b713b0c571fa869fec376742be0e9c217ab38dab Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Fri, 4 Nov 2016 09:25:54 +1100 +Subject: [PATCH 07/16] Convert msleep to use hrtimers when active. + +--- + kernel/time/timer.c | 24 ++++++++++++++++++++++-- + 1 file changed, 22 insertions(+), 2 deletions(-) + +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index 98803a47491c..3ab277ba0f44 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -1964,7 +1964,19 @@ void __init init_timers(void) + */ + void msleep(unsigned int msecs) + { +- unsigned long timeout = msecs_to_jiffies(msecs) + 1; ++ int jiffs = msecs_to_jiffies(msecs); ++ unsigned long timeout; ++ ++ /* ++ * Use high resolution timers where the resolution of tick based ++ * timers is inadequate. ++ */ ++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ while (msecs) ++ msecs = schedule_msec_hrtimeout_uninterruptible(msecs); ++ return; ++ } ++ timeout = msecs_to_jiffies(msecs) + 1; + + while (timeout) + timeout = schedule_timeout_uninterruptible(timeout); +@@ -1978,7 +1990,15 @@ EXPORT_SYMBOL(msleep); + */ + unsigned long msleep_interruptible(unsigned int msecs) + { +- unsigned long timeout = msecs_to_jiffies(msecs) + 1; ++ int jiffs = msecs_to_jiffies(msecs); ++ unsigned long timeout; ++ ++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ while (msecs && !signal_pending(current)) ++ msecs = schedule_msec_hrtimeout_interruptible(msecs); ++ return msecs; ++ } ++ timeout = msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) + timeout = schedule_timeout_interruptible(timeout); +-- +2.17.1 + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch b/sys-kernel/linux-sources-redcore/files/5.1-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch new file mode 100644 index 00000000..b21caaeb --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch @@ -0,0 +1,995 @@ +From 97bf976c1a61af48431ce97bc129a0f448e8c735 Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Mon, 20 Feb 2017 13:28:30 +1100 +Subject: [PATCH 08/16] Replace all schedule timeout(1) with + schedule_min_hrtimeout() + +--- + drivers/block/swim.c | 6 +- + drivers/char/ipmi/ipmi_msghandler.c | 2 +- + drivers/char/ipmi/ipmi_ssif.c | 2 +- + drivers/char/snsc.c | 4 +- + drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 2 +- + drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 2 +- + drivers/media/pci/ivtv/ivtv-ioctl.c | 2 +- + drivers/media/pci/ivtv/ivtv-streams.c | 2 +- + drivers/mfd/ucb1x00-core.c | 2 +- + drivers/misc/sgi-xp/xpc_channel.c | 2 +- + drivers/net/caif/caif_hsi.c | 2 +- + drivers/net/can/usb/peak_usb/pcan_usb.c | 2 +- + drivers/net/usb/lan78xx.c | 2 +- + drivers/net/usb/usbnet.c | 2 +- + drivers/scsi/fnic/fnic_scsi.c | 4 +- + drivers/scsi/snic/snic_scsi.c | 2 +- + .../staging/comedi/drivers/ni_mio_common.c | 2 +- + drivers/staging/lustre/lnet/lnet/lib-eq.c | 426 ++++++++++++++++++ + drivers/staging/rts5208/rtsx.c | 2 +- + drivers/staging/speakup/speakup_acntpc.c | 4 +- + drivers/staging/speakup/speakup_apollo.c | 2 +- + drivers/staging/speakup/speakup_decext.c | 2 +- + drivers/staging/speakup/speakup_decpc.c | 2 +- + drivers/staging/speakup/speakup_dectlk.c | 2 +- + drivers/staging/speakup/speakup_dtlk.c | 4 +- + drivers/staging/speakup/speakup_keypc.c | 4 +- + drivers/staging/speakup/synth.c | 14 +- + .../staging/unisys/visornic/visornic_main.c | 6 +- + drivers/video/fbdev/omap/hwa742.c | 2 +- + drivers/video/fbdev/pxafb.c | 2 +- + fs/btrfs/extent-tree.c | 2 +- + fs/btrfs/inode-map.c | 2 +- + sound/usb/line6/pcm.c | 2 +- + 33 files changed, 470 insertions(+), 50 deletions(-) + create mode 100644 drivers/staging/lustre/lnet/lnet/lib-eq.c + +diff --git a/drivers/block/swim.c b/drivers/block/swim.c +index 3fa6fcc34790..278486c8266d 100644 +--- a/drivers/block/swim.c ++++ b/drivers/block/swim.c +@@ -332,7 +332,7 @@ static inline void swim_motor(struct swim __iomem *base, + if (swim_readbit(base, MOTOR_ON)) + break; + current->state = TASK_INTERRUPTIBLE; +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + } else if (action == OFF) { + swim_action(base, MOTOR_OFF); +@@ -351,7 +351,7 @@ static inline void swim_eject(struct swim __iomem *base) + if (!swim_readbit(base, DISK_IN)) + break; + current->state = TASK_INTERRUPTIBLE; +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + swim_select(base, RELAX); + } +@@ -375,7 +375,7 @@ static inline int swim_step(struct swim __iomem *base) + for (wait = 0; wait < HZ; wait++) { + + current->state = TASK_INTERRUPTIBLE; +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + + swim_select(base, RELAX); + if (!swim_readbit(base, STEP)) +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index 00bf4b17edbf..71c49540cb25 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -3541,7 +3541,7 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf) + /* Current message first, to preserve order */ + while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { + /* Wait for the message to clear out. */ +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + + /* No need for locks, the interface is down. */ +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c +index 8b5aec5430f1..a5737d29cd93 100644 +--- a/drivers/char/ipmi/ipmi_ssif.c ++++ b/drivers/char/ipmi/ipmi_ssif.c +@@ -1285,7 +1285,7 @@ static void shutdown_ssif(void *send_info) + + /* make sure the driver is not looking for flags any more. */ + while (ssif_info->ssif_state != SSIF_NORMAL) +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + + ssif_info->stopping = true; + del_timer_sync(&ssif_info->watch_timer); +diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c +index 5918ea7499bb..5228e78df804 100644 +--- a/drivers/char/snsc.c ++++ b/drivers/char/snsc.c +@@ -198,7 +198,7 @@ scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos) + add_wait_queue(&sd->sd_rq, &wait); + spin_unlock_irqrestore(&sd->sd_rlock, flags); + +- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT)); ++ schedule_msec_hrtimeout((SCDRV_TIMEOUT)); + + remove_wait_queue(&sd->sd_rq, &wait); + if (signal_pending(current)) { +@@ -294,7 +294,7 @@ scdrv_write(struct file *file, const char __user *buf, + add_wait_queue(&sd->sd_wq, &wait); + spin_unlock_irqrestore(&sd->sd_wlock, flags); + +- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT)); ++ schedule_msec_hrtimeout((SCDRV_TIMEOUT)); + + remove_wait_queue(&sd->sd_wq, &wait); + if (signal_pending(current)) { +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +index d0fd147ef75f..730ae4fe6b85 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +@@ -235,7 +235,7 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv, + DRM_ERROR("SVGA device lockup.\n"); + break; + } +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + if (interruptible && signal_pending(current)) { + ret = -ERESTARTSYS; + break; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +index c3ad4478266b..7e2a29d56459 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +@@ -202,7 +202,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, + break; + } + if (lazy) +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + else if ((++count & 0x0F) == 0) { + /** + * FIXME: Use schedule_hr_timeout here for +diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c +index 6c269ecd8d05..69becedee614 100644 +--- a/drivers/media/pci/ivtv/ivtv-ioctl.c ++++ b/drivers/media/pci/ivtv/ivtv-ioctl.c +@@ -1156,7 +1156,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std) + TASK_UNINTERRUPTIBLE); + if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100) + break; +- schedule_timeout(msecs_to_jiffies(25)); ++ schedule_msec_hrtimeout((25)); + } + finish_wait(&itv->vsync_waitq, &wait); + mutex_lock(&itv->serialize_lock); +diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c +index a641f20e3f86..e1b40d2b4bed 100644 +--- a/drivers/media/pci/ivtv/ivtv-streams.c ++++ b/drivers/media/pci/ivtv/ivtv-streams.c +@@ -843,7 +843,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end) + while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) && + time_before(jiffies, + then + msecs_to_jiffies(2000))) { +- schedule_timeout(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout((10)); + } + + /* To convert jiffies to ms, we must multiply by 1000 +diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c +index d6fb2e1a759a..7ac951b84beb 100644 +--- a/drivers/mfd/ucb1x00-core.c ++++ b/drivers/mfd/ucb1x00-core.c +@@ -253,7 +253,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync) + break; + /* yield to other processes */ + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + + return UCB_ADC_DAT(val); +diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c +index 8e6607fc8a67..b9ab770bbdb5 100644 +--- a/drivers/misc/sgi-xp/xpc_channel.c ++++ b/drivers/misc/sgi-xp/xpc_channel.c +@@ -834,7 +834,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) + + atomic_inc(&ch->n_on_msg_allocate_wq); + prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE); +- ret = schedule_timeout(1); ++ ret = schedule_min_hrtimeout(); + finish_wait(&ch->msg_allocate_wq, &wait); + atomic_dec(&ch->n_on_msg_allocate_wq); + +diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c +index 433a14b9f731..4d197a99472b 100644 +--- a/drivers/net/caif/caif_hsi.c ++++ b/drivers/net/caif/caif_hsi.c +@@ -939,7 +939,7 @@ static void cfhsi_wake_down(struct work_struct *work) + break; + + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + retry--; + } + +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c +index 13238a72a338..fc51ae55c63f 100644 +--- a/drivers/net/can/usb/peak_usb/pcan_usb.c ++++ b/drivers/net/can/usb/peak_usb/pcan_usb.c +@@ -250,7 +250,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff) + } else { + /* the PCAN-USB needs time to init */ + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT)); ++ schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT)); + } + + return err; +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c +index 3d92ea6fcc02..de564401fd4d 100644 +--- a/drivers/net/usb/lan78xx.c ++++ b/drivers/net/usb/lan78xx.c +@@ -2674,7 +2674,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev) + while (!skb_queue_empty(&dev->rxq) && + !skb_queue_empty(&dev->txq) && + !skb_queue_empty(&dev->done)) { +- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); ++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS)); + set_current_state(TASK_UNINTERRUPTIBLE); + netif_dbg(dev, ifdown, dev->net, + "waited for %d urb completions\n", temp); +diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c +index 504282af27e5..da60ab9b56c7 100644 +--- a/drivers/net/usb/usbnet.c ++++ b/drivers/net/usb/usbnet.c +@@ -770,7 +770,7 @@ static void wait_skb_queue_empty(struct sk_buff_head *q) + spin_lock_irqsave(&q->lock, flags); + while (!skb_queue_empty(q)) { + spin_unlock_irqrestore(&q->lock, flags); +- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); ++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS)); + set_current_state(TASK_UNINTERRUPTIBLE); + spin_lock_irqsave(&q->lock, flags); + } +diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c +index 80608b53897b..84051b538fa8 100644 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@ -216,7 +216,7 @@ int fnic_fw_reset_handler(struct fnic *fnic) + + /* wait for io cmpl */ + while (atomic_read(&fnic->in_flight)) +- schedule_timeout(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout((1)); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + +@@ -2273,7 +2273,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, + } + } + +- schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); ++ schedule_msec_hrtimeout((2 * fnic->config.ed_tov)); + + /* walk again to check, if IOs are still pending in fw */ + if (fnic_is_abts_pending(fnic, lr_sc)) +diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c +index b3650c989ed4..7ed1fb285754 100644 +--- a/drivers/scsi/snic/snic_scsi.c ++++ b/drivers/scsi/snic/snic_scsi.c +@@ -2353,7 +2353,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc) + + /* Wait for all the IOs that are entered in Qcmd */ + while (atomic_read(&snic->ios_inflight)) +- schedule_timeout(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout((1)); + + ret = snic_issue_hba_reset(snic, sc); + if (ret) { +diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c +index b04dad8c7092..27c824bdebf0 100644 +--- a/drivers/staging/comedi/drivers/ni_mio_common.c ++++ b/drivers/staging/comedi/drivers/ni_mio_common.c +@@ -4727,7 +4727,7 @@ static int cs5529_wait_for_idle(struct comedi_device *dev) + if ((status & NI67XX_CAL_STATUS_BUSY) == 0) + break; + set_current_state(TASK_INTERRUPTIBLE); +- if (schedule_timeout(1)) ++ if (schedule_min_hrtimeout()) + return -EIO; + } + if (i == timeout) { +diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c +new file mode 100644 +index 000000000000..8cca151741b2 +--- /dev/null ++++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c +@@ -0,0 +1,426 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * GPL HEADER START ++ * ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 only, ++ * as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License version 2 for more details (a copy is included ++ * in the LICENSE file that accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License ++ * version 2 along with this program; If not, see ++ * http://www.gnu.org/licenses/gpl-2.0.html ++ * ++ * GPL HEADER END ++ */ ++/* ++ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. ++ * Use is subject to license terms. ++ * ++ * Copyright (c) 2012, Intel Corporation. ++ */ ++/* ++ * This file is part of Lustre, http://www.lustre.org/ ++ * Lustre is a trademark of Sun Microsystems, Inc. ++ * ++ * lnet/lnet/lib-eq.c ++ * ++ * Library level Event queue management routines ++ */ ++ ++#define DEBUG_SUBSYSTEM S_LNET ++ ++#include <linux/lnet/lib-lnet.h> ++ ++/** ++ * Create an event queue that has room for \a count number of events. ++ * ++ * The event queue is circular and older events will be overwritten by new ++ * ones if they are not removed in time by the user using the functions ++ * LNetEQGet(), LNetEQWait(), or LNetEQPoll(). It is up to the user to ++ * determine the appropriate size of the event queue to prevent this loss ++ * of events. Note that when EQ handler is specified in \a callback, no ++ * event loss can happen, since the handler is run for each event deposited ++ * into the EQ. ++ * ++ * \param count The number of events to be stored in the event queue. It ++ * will be rounded up to the next power of two. ++ * \param callback A handler function that runs when an event is deposited ++ * into the EQ. The constant value LNET_EQ_HANDLER_NONE can be used to ++ * indicate that no event handler is desired. ++ * \param handle On successful return, this location will hold a handle for ++ * the newly created EQ. ++ * ++ * \retval 0 On success. ++ * \retval -EINVAL If an parameter is not valid. ++ * \retval -ENOMEM If memory for the EQ can't be allocated. ++ * ++ * \see lnet_eq_handler_t for the discussion on EQ handler semantics. ++ */ ++int ++LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, ++ struct lnet_handle_eq *handle) ++{ ++ struct lnet_eq *eq; ++ ++ LASSERT(the_lnet.ln_refcount > 0); ++ ++ /* ++ * We need count to be a power of 2 so that when eq_{enq,deq}_seq ++ * overflow, they don't skip entries, so the queue has the same ++ * apparent capacity at all times ++ */ ++ if (count) ++ count = roundup_pow_of_two(count); ++ ++ if (callback != LNET_EQ_HANDLER_NONE && count) ++ CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count); ++ ++ /* ++ * count can be 0 if only need callback, we can eliminate ++ * overhead of enqueue event ++ */ ++ if (!count && callback == LNET_EQ_HANDLER_NONE) ++ return -EINVAL; ++ ++ eq = kzalloc(sizeof(*eq), GFP_NOFS); ++ if (!eq) ++ return -ENOMEM; ++ ++ if (count) { ++ eq->eq_events = kvmalloc_array(count, sizeof(struct lnet_event), ++ GFP_KERNEL | __GFP_ZERO); ++ if (!eq->eq_events) ++ goto failed; ++ /* ++ * NB allocator has set all event sequence numbers to 0, ++ * so all them should be earlier than eq_deq_seq ++ */ ++ } ++ ++ eq->eq_deq_seq = 1; ++ eq->eq_enq_seq = 1; ++ eq->eq_size = count; ++ eq->eq_callback = callback; ++ ++ eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(), ++ sizeof(*eq->eq_refs[0])); ++ if (!eq->eq_refs) ++ goto failed; ++ ++ /* MUST hold both exclusive lnet_res_lock */ ++ lnet_res_lock(LNET_LOCK_EX); ++ /* ++ * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do ++ * both EQ lookup and poll event with only lnet_eq_wait_lock ++ */ ++ lnet_eq_wait_lock(); ++ ++ lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh); ++ list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active); ++ ++ lnet_eq_wait_unlock(); ++ lnet_res_unlock(LNET_LOCK_EX); ++ ++ lnet_eq2handle(handle, eq); ++ return 0; ++ ++failed: ++ kvfree(eq->eq_events); ++ ++ if (eq->eq_refs) ++ cfs_percpt_free(eq->eq_refs); ++ ++ kfree(eq); ++ return -ENOMEM; ++} ++EXPORT_SYMBOL(LNetEQAlloc); ++ ++/** ++ * Release the resources associated with an event queue if it's idle; ++ * otherwise do nothing and it's up to the user to try again. ++ * ++ * \param eqh A handle for the event queue to be released. ++ * ++ * \retval 0 If the EQ is not in use and freed. ++ * \retval -ENOENT If \a eqh does not point to a valid EQ. ++ * \retval -EBUSY If the EQ is still in use by some MDs. ++ */ ++int ++LNetEQFree(struct lnet_handle_eq eqh) ++{ ++ struct lnet_eq *eq; ++ struct lnet_event *events = NULL; ++ int **refs = NULL; ++ int *ref; ++ int rc = 0; ++ int size = 0; ++ int i; ++ ++ LASSERT(the_lnet.ln_refcount > 0); ++ ++ lnet_res_lock(LNET_LOCK_EX); ++ /* ++ * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do ++ * both EQ lookup and poll event with only lnet_eq_wait_lock ++ */ ++ lnet_eq_wait_lock(); ++ ++ eq = lnet_handle2eq(&eqh); ++ if (!eq) { ++ rc = -ENOENT; ++ goto out; ++ } ++ ++ cfs_percpt_for_each(ref, i, eq->eq_refs) { ++ LASSERT(*ref >= 0); ++ if (!*ref) ++ continue; ++ ++ CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n", ++ i, *ref); ++ rc = -EBUSY; ++ goto out; ++ } ++ ++ /* stash for free after lock dropped */ ++ events = eq->eq_events; ++ size = eq->eq_size; ++ refs = eq->eq_refs; ++ ++ lnet_res_lh_invalidate(&eq->eq_lh); ++ list_del(&eq->eq_list); ++ kfree(eq); ++ out: ++ lnet_eq_wait_unlock(); ++ lnet_res_unlock(LNET_LOCK_EX); ++ ++ kvfree(events); ++ if (refs) ++ cfs_percpt_free(refs); ++ ++ return rc; ++} ++EXPORT_SYMBOL(LNetEQFree); ++ ++void ++lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev) ++{ ++ /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */ ++ int index; ++ ++ if (!eq->eq_size) { ++ LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE); ++ eq->eq_callback(ev); ++ return; ++ } ++ ++ lnet_eq_wait_lock(); ++ ev->sequence = eq->eq_enq_seq++; ++ ++ LASSERT(eq->eq_size == LOWEST_BIT_SET(eq->eq_size)); ++ index = ev->sequence & (eq->eq_size - 1); ++ ++ eq->eq_events[index] = *ev; ++ ++ if (eq->eq_callback != LNET_EQ_HANDLER_NONE) ++ eq->eq_callback(ev); ++ ++ /* Wake anyone waiting in LNetEQPoll() */ ++ if (waitqueue_active(&the_lnet.ln_eq_waitq)) ++ wake_up_all(&the_lnet.ln_eq_waitq); ++ lnet_eq_wait_unlock(); ++} ++ ++static int ++lnet_eq_dequeue_event(struct lnet_eq *eq, struct lnet_event *ev) ++{ ++ int new_index = eq->eq_deq_seq & (eq->eq_size - 1); ++ struct lnet_event *new_event = &eq->eq_events[new_index]; ++ int rc; ++ ++ /* must called with lnet_eq_wait_lock hold */ ++ if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence)) ++ return 0; ++ ++ /* We've got a new event... */ ++ *ev = *new_event; ++ ++ CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n", ++ new_event, eq->eq_deq_seq, eq->eq_size); ++ ++ /* ...but did it overwrite an event we've not seen yet? */ ++ if (eq->eq_deq_seq == new_event->sequence) { ++ rc = 1; ++ } else { ++ /* ++ * don't complain with CERROR: some EQs are sized small ++ * anyway; if it's important, the caller should complain ++ */ ++ CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n", ++ eq->eq_deq_seq, new_event->sequence); ++ rc = -EOVERFLOW; ++ } ++ ++ eq->eq_deq_seq = new_event->sequence + 1; ++ return rc; ++} ++ ++/** ++ * A nonblocking function that can be used to get the next event in an EQ. ++ * If an event handler is associated with the EQ, the handler will run before ++ * this function returns successfully. The event is removed from the queue. ++ * ++ * \param eventq A handle for the event queue. ++ * \param event On successful return (1 or -EOVERFLOW), this location will ++ * hold the next event in the EQ. ++ * ++ * \retval 0 No pending event in the EQ. ++ * \retval 1 Indicates success. ++ * \retval -ENOENT If \a eventq does not point to a valid EQ. ++ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that ++ * at least one event between this event and the last event obtained from the ++ * EQ has been dropped due to limited space in the EQ. ++ */ ++ ++/** ++ * Block the calling process until there is an event in the EQ. ++ * If an event handler is associated with the EQ, the handler will run before ++ * this function returns successfully. This function returns the next event ++ * in the EQ and removes it from the EQ. ++ * ++ * \param eventq A handle for the event queue. ++ * \param event On successful return (1 or -EOVERFLOW), this location will ++ * hold the next event in the EQ. ++ * ++ * \retval 1 Indicates success. ++ * \retval -ENOENT If \a eventq does not point to a valid EQ. ++ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that ++ * at least one event between this event and the last event obtained from the ++ * EQ has been dropped due to limited space in the EQ. ++ */ ++ ++static int ++lnet_eq_wait_locked(int *timeout_ms, long state) ++__must_hold(&the_lnet.ln_eq_wait_lock) ++{ ++ int tms = *timeout_ms; ++ int wait; ++ wait_queue_entry_t wl; ++ unsigned long now; ++ ++ if (!tms) ++ return -ENXIO; /* don't want to wait and no new event */ ++ ++ init_waitqueue_entry(&wl, current); ++ set_current_state(state); ++ add_wait_queue(&the_lnet.ln_eq_waitq, &wl); ++ ++ lnet_eq_wait_unlock(); ++ ++ if (tms < 0) { ++ schedule(); ++ } else { ++ now = jiffies; ++ schedule_msec_hrtimeout((tms)); ++ tms -= jiffies_to_msecs(jiffies - now); ++ if (tms < 0) /* no more wait but may have new event */ ++ tms = 0; ++ } ++ ++ wait = tms; /* might need to call here again */ ++ *timeout_ms = tms; ++ ++ lnet_eq_wait_lock(); ++ remove_wait_queue(&the_lnet.ln_eq_waitq, &wl); ++ ++ return wait; ++} ++ ++/** ++ * Block the calling process until there's an event from a set of EQs or ++ * timeout happens. ++ * ++ * If an event handler is associated with the EQ, the handler will run before ++ * this function returns successfully, in which case the corresponding event ++ * is consumed. ++ * ++ * LNetEQPoll() provides a timeout to allow applications to poll, block for a ++ * fixed period, or block indefinitely. ++ * ++ * \param eventqs,neq An array of EQ handles, and size of the array. ++ * \param timeout_ms Time in milliseconds to wait for an event to occur on ++ * one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an ++ * infinite timeout. ++ * \param interruptible, if true, use TASK_INTERRUPTIBLE, else TASK_NOLOAD ++ * \param event,which On successful return (1 or -EOVERFLOW), \a event will ++ * hold the next event in the EQs, and \a which will contain the index of the ++ * EQ from which the event was taken. ++ * ++ * \retval 0 No pending event in the EQs after timeout. ++ * \retval 1 Indicates success. ++ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that ++ * at least one event between this event and the last event obtained from the ++ * EQ indicated by \a which has been dropped due to limited space in the EQ. ++ * \retval -ENOENT If there's an invalid handle in \a eventqs. ++ */ ++int ++LNetEQPoll(struct lnet_handle_eq *eventqs, int neq, int timeout_ms, ++ int interruptible, ++ struct lnet_event *event, int *which) ++{ ++ int wait = 1; ++ int rc; ++ int i; ++ ++ LASSERT(the_lnet.ln_refcount > 0); ++ ++ if (neq < 1) ++ return -ENOENT; ++ ++ lnet_eq_wait_lock(); ++ ++ for (;;) { ++ for (i = 0; i < neq; i++) { ++ struct lnet_eq *eq = lnet_handle2eq(&eventqs[i]); ++ ++ if (!eq) { ++ lnet_eq_wait_unlock(); ++ return -ENOENT; ++ } ++ ++ rc = lnet_eq_dequeue_event(eq, event); ++ if (rc) { ++ lnet_eq_wait_unlock(); ++ *which = i; ++ return rc; ++ } ++ } ++ ++ if (!wait) ++ break; ++ ++ /* ++ * return value of lnet_eq_wait_locked: ++ * -1 : did nothing and it's sure no new event ++ * 1 : sleep inside and wait until new event ++ * 0 : don't want to wait anymore, but might have new event ++ * so need to call dequeue again ++ */ ++ wait = lnet_eq_wait_locked(&timeout_ms, ++ interruptible ? TASK_INTERRUPTIBLE ++ : TASK_NOLOAD); ++ if (wait < 0) /* no new event */ ++ break; ++ } ++ ++ lnet_eq_wait_unlock(); ++ return 0; ++} +diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c +index fa597953e9a0..685cf842badc 100644 +--- a/drivers/staging/rts5208/rtsx.c ++++ b/drivers/staging/rts5208/rtsx.c +@@ -490,7 +490,7 @@ static int rtsx_polling_thread(void *__dev) + + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL)); ++ schedule_msec_hrtimeout((POLLING_INTERVAL)); + + /* lock the device pointers */ + mutex_lock(&dev->dev_mutex); +diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c +index c94328a5bd4a..6e7d4671aa69 100644 +--- a/drivers/staging/speakup/speakup_acntpc.c ++++ b/drivers/staging/speakup/speakup_acntpc.c +@@ -198,7 +198,7 @@ static void do_catch_up(struct spk_synth *synth) + full_time_val = full_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); + if (synth_full()) { +- schedule_timeout(msecs_to_jiffies(full_time_val)); ++ schedule_msec_hrtimeout((full_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +@@ -226,7 +226,7 @@ static void do_catch_up(struct spk_synth *synth) + jiffy_delta_val = jiffy_delta->u.n.value; + delay_time_val = delay_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout(delay_time_val); + jiff_max = jiffies + jiffy_delta_val; + } + } +diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c +index 0877b4044c28..627102d048c1 100644 +--- a/drivers/staging/speakup/speakup_apollo.c ++++ b/drivers/staging/speakup/speakup_apollo.c +@@ -165,7 +165,7 @@ static void do_catch_up(struct spk_synth *synth) + if (!synth->io_ops->synth_out(synth, ch)) { + synth->io_ops->tiocmset(0, UART_MCR_RTS); + synth->io_ops->tiocmset(UART_MCR_RTS, 0); +- schedule_timeout(msecs_to_jiffies(full_time_val)); ++ schedule_msec_hrtimeout(full_time_val); + continue; + } + if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) { +diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c +index ddbb7e97d118..f9502addc765 100644 +--- a/drivers/staging/speakup/speakup_decext.c ++++ b/drivers/staging/speakup/speakup_decext.c +@@ -176,7 +176,7 @@ static void do_catch_up(struct spk_synth *synth) + if (ch == '\n') + ch = 0x0D; + if (synth_full() || !synth->io_ops->synth_out(synth, ch)) { +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout(delay_time_val); + continue; + } + set_current_state(TASK_RUNNING); +diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c +index 459ee0c0bd57..52b539544c98 100644 +--- a/drivers/staging/speakup/speakup_decpc.c ++++ b/drivers/staging/speakup/speakup_decpc.c +@@ -394,7 +394,7 @@ static void do_catch_up(struct spk_synth *synth) + if (ch == '\n') + ch = 0x0D; + if (dt_sendchar(ch)) { +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c +index dccb4ea29d37..8ecead307d04 100644 +--- a/drivers/staging/speakup/speakup_dectlk.c ++++ b/drivers/staging/speakup/speakup_dectlk.c +@@ -244,7 +244,7 @@ static void do_catch_up(struct spk_synth *synth) + if (ch == '\n') + ch = 0x0D; + if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) { +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout(delay_time_val); + continue; + } + set_current_state(TASK_RUNNING); +diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c +index dbebed0eeeec..6d83c13ca4a6 100644 +--- a/drivers/staging/speakup/speakup_dtlk.c ++++ b/drivers/staging/speakup/speakup_dtlk.c +@@ -211,7 +211,7 @@ static void do_catch_up(struct spk_synth *synth) + delay_time_val = delay_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); + if (synth_full()) { +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +@@ -227,7 +227,7 @@ static void do_catch_up(struct spk_synth *synth) + delay_time_val = delay_time->u.n.value; + jiffy_delta_val = jiffy_delta->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + jiff_max = jiffies + jiffy_delta_val; + } + } +diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c +index b788272da4f9..d5dac16c04d8 100644 +--- a/drivers/staging/speakup/speakup_keypc.c ++++ b/drivers/staging/speakup/speakup_keypc.c +@@ -199,7 +199,7 @@ static void do_catch_up(struct spk_synth *synth) + full_time_val = full_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); + if (synth_full()) { +- schedule_timeout(msecs_to_jiffies(full_time_val)); ++ schedule_msec_hrtimeout((full_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +@@ -232,7 +232,7 @@ static void do_catch_up(struct spk_synth *synth) + jiffy_delta_val = jiffy_delta->u.n.value; + delay_time_val = delay_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + jiff_max = jiffies+jiffy_delta_val; + } + } +diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c +index 3568bfb89912..0a80b3b098b2 100644 +--- a/drivers/staging/speakup/synth.c ++++ b/drivers/staging/speakup/synth.c +@@ -93,12 +93,8 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode) + spin_unlock_irqrestore(&speakup_info.spinlock, flags); + if (ch == '\n') + ch = synth->procspeech; +- if (unicode) +- ret = synth->io_ops->synth_out_unicode(synth, ch); +- else +- ret = synth->io_ops->synth_out(synth, ch); +- if (!ret) { +- schedule_timeout(msecs_to_jiffies(full_time_val)); ++ if (!synth->io_ops->synth_out(synth, ch)) { ++ schedule_msec_hrtimeout(full_time_val); + continue; + } + if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) { +@@ -108,11 +104,9 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode) + full_time_val = full_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); + if (synth->io_ops->synth_out(synth, synth->procspeech)) +- schedule_timeout( +- msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout(delay_time_val); + else +- schedule_timeout( +- msecs_to_jiffies(full_time_val)); ++ schedule_msec_hrtimeout(full_time_val); + jiff_max = jiffies + jiffy_delta_val; + } + set_current_state(TASK_RUNNING); +diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c +index 1c1a470d2e50..cbac00e586b0 100644 +--- a/drivers/staging/unisys/visornic/visornic_main.c ++++ b/drivers/staging/unisys/visornic/visornic_main.c +@@ -549,7 +549,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev, + } + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&devdata->priv_lock, flags); +- wait += schedule_timeout(msecs_to_jiffies(10)); ++ wait += schedule_msec_hrtimeout((10)); + spin_lock_irqsave(&devdata->priv_lock, flags); + } + +@@ -560,7 +560,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev, + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&devdata->priv_lock, flags); +- schedule_timeout(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout((10)); + spin_lock_irqsave(&devdata->priv_lock, flags); + if (atomic_read(&devdata->usage)) + break; +@@ -714,7 +714,7 @@ static int visornic_enable_with_timeout(struct net_device *netdev, + } + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&devdata->priv_lock, flags); +- wait += schedule_timeout(msecs_to_jiffies(10)); ++ wait += schedule_msec_hrtimeout((10)); + spin_lock_irqsave(&devdata->priv_lock, flags); + } + +diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c +index 6199d4806193..7c7165f2dad4 100644 +--- a/drivers/video/fbdev/omap/hwa742.c ++++ b/drivers/video/fbdev/omap/hwa742.c +@@ -926,7 +926,7 @@ static void hwa742_resume(void) + if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7)) + break; + set_current_state(TASK_UNINTERRUPTIBLE); +- schedule_timeout(msecs_to_jiffies(5)); ++ schedule_msec_hrtimeout((5)); + } + hwa742_set_update_mode(hwa742.update_mode_before_suspend); + } +diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c +index d59c8a59f582..e103cce28de7 100644 +--- a/drivers/video/fbdev/pxafb.c ++++ b/drivers/video/fbdev/pxafb.c +@@ -1287,7 +1287,7 @@ static int pxafb_smart_thread(void *arg) + mutex_unlock(&fbi->ctrlr_lock); + + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(msecs_to_jiffies(30)); ++ schedule_msec_hrtimeout((30)); + } + + pr_debug("%s(): task ending\n", __func__); +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index c5880329ae37..aad921814170 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -6206,7 +6206,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) + flush = BTRFS_RESERVE_FLUSH_LIMIT; + + if (btrfs_transaction_in_commit(fs_info)) +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + + if (delalloc_lock) +diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c +index ffca2abf13d0..89b2a7f7397e 100644 +--- a/fs/btrfs/inode-map.c ++++ b/fs/btrfs/inode-map.c +@@ -75,7 +75,7 @@ static int caching_kthread(void *data) + btrfs_release_path(path); + root->ino_cache_progress = last; + up_read(&fs_info->commit_root_sem); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + goto again; + } else + continue; +diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c +index 72c6f8e82a7e..46d8c2a148ad 100644 +--- a/sound/usb/line6/pcm.c ++++ b/sound/usb/line6/pcm.c +@@ -131,7 +131,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm, + if (!alive) + break; + set_current_state(TASK_UNINTERRUPTIBLE); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } while (--timeout > 0); + if (alive) + dev_err(line6pcm->line6->ifcdev, +-- +2.17.1 + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0009-Replace-all-calls-to-schedule_timeout_interruptible-.patch b/sys-kernel/linux-sources-redcore/files/5.1-0009-Replace-all-calls-to-schedule_timeout_interruptible-.patch new file mode 100644 index 00000000..5e044ab3 --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0009-Replace-all-calls-to-schedule_timeout_interruptible-.patch @@ -0,0 +1,311 @@ +From ed9d6f38013a94e1ea56b33c02706a4079b31e92 Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Mon, 20 Feb 2017 13:30:07 +1100 +Subject: [PATCH 09/16] Replace all calls to schedule_timeout_interruptible of + potentially under 50ms to use schedule_msec_hrtimeout_interruptible. + +--- + drivers/hwmon/fam15h_power.c | 2 +- + drivers/iio/light/tsl2563.c | 6 +----- + drivers/media/i2c/msp3400-driver.c | 4 ++-- + drivers/media/pci/ivtv/ivtv-gpio.c | 6 +++--- + drivers/media/radio/radio-mr800.c | 2 +- + drivers/media/radio/radio-tea5777.c | 2 +- + drivers/media/radio/tea575x.c | 2 +- + drivers/parport/ieee1284.c | 2 +- + drivers/parport/ieee1284_ops.c | 2 +- + drivers/platform/x86/intel_ips.c | 8 ++++---- + net/core/pktgen.c | 2 +- + sound/soc/codecs/wm8350.c | 12 ++++++------ + sound/soc/codecs/wm8900.c | 2 +- + sound/soc/codecs/wm9713.c | 4 ++-- + 14 files changed, 26 insertions(+), 30 deletions(-) + +diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c +index 9545a346044f..c24cf1302ec7 100644 +--- a/drivers/hwmon/fam15h_power.c ++++ b/drivers/hwmon/fam15h_power.c +@@ -237,7 +237,7 @@ static ssize_t power1_average_show(struct device *dev, + prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu]; + } + +- leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period)); ++ leftover = schedule_msec_hrtimeout_interruptible((data->power_period)); + if (leftover) + return 0; + +diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c +index 6bbb0b1e6032..f4b83648c405 100644 +--- a/drivers/iio/light/tsl2563.c ++++ b/drivers/iio/light/tsl2563.c +@@ -282,11 +282,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip) + default: + delay = 402; + } +- /* +- * TODO: Make sure that we wait at least required delay but why we +- * have to extend it one tick more? +- */ +- schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2); ++ schedule_msec_hrtimeout_interruptible(delay + 1); + } + + static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc) +diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c +index 522fb1d561e7..47d1afee1d04 100644 +--- a/drivers/media/i2c/msp3400-driver.c ++++ b/drivers/media/i2c/msp3400-driver.c +@@ -179,7 +179,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr) + break; + dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err, + dev, addr); +- schedule_timeout_interruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_interruptible((10)); + } + if (err == 3) { + dev_warn(&client->dev, "resetting chip, sound will go off.\n"); +@@ -220,7 +220,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val) + break; + dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err, + dev, addr); +- schedule_timeout_interruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_interruptible((10)); + } + if (err == 3) { + dev_warn(&client->dev, "resetting chip, sound will go off.\n"); +diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c +index f752f3993687..23372af61ebf 100644 +--- a/drivers/media/pci/ivtv/ivtv-gpio.c ++++ b/drivers/media/pci/ivtv/ivtv-gpio.c +@@ -117,7 +117,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv) + curout = (curout & ~0xF) | 1; + write_reg(curout, IVTV_REG_GPIO_OUT); + /* We could use something else for smaller time */ +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible((1)); + curout |= 2; + write_reg(curout, IVTV_REG_GPIO_OUT); + curdir &= ~0x80; +@@ -137,11 +137,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value) + curout = read_reg(IVTV_REG_GPIO_OUT); + curout &= ~(1 << itv->card->xceive_pin); + write_reg(curout, IVTV_REG_GPIO_OUT); +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible((1)); + + curout |= 1 << itv->card->xceive_pin; + write_reg(curout, IVTV_REG_GPIO_OUT); +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible((1)); + return 0; + } + +diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c +index ab1324f68199..3fdb422a5caa 100644 +--- a/drivers/media/radio/radio-mr800.c ++++ b/drivers/media/radio/radio-mr800.c +@@ -378,7 +378,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv, + retval = -ENODATA; + break; + } +- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) { ++ if (schedule_msec_hrtimeout_interruptible((10))) { + retval = -ERESTARTSYS; + break; + } +diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c +index 61f751cf1aa4..7eb30468091e 100644 +--- a/drivers/media/radio/radio-tea5777.c ++++ b/drivers/media/radio/radio-tea5777.c +@@ -245,7 +245,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait) + } + + if (wait) { +- if (schedule_timeout_interruptible(msecs_to_jiffies(wait))) ++ if (schedule_msec_hrtimeout_interruptible((wait))) + return -ERESTARTSYS; + } + +diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c +index f89f83e04741..325987cd5997 100644 +--- a/drivers/media/radio/tea575x.c ++++ b/drivers/media/radio/tea575x.c +@@ -416,7 +416,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea, + for (;;) { + if (time_after(jiffies, timeout)) + break; +- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) { ++ if (schedule_msec_hrtimeout_interruptible((10))) { + /* some signal arrived, stop search */ + tea->val &= ~TEA575X_BIT_SEARCH; + snd_tea575x_set_freq(tea); +diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c +index f12b9da69255..6ca6eecbdb2d 100644 +--- a/drivers/parport/ieee1284.c ++++ b/drivers/parport/ieee1284.c +@@ -208,7 +208,7 @@ int parport_wait_peripheral(struct parport *port, + /* parport_wait_event didn't time out, but the + * peripheral wasn't actually ready either. + * Wait for another 10ms. */ +- schedule_timeout_interruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_interruptible((10)); + } + } + +diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c +index 5d41dda6da4e..34705f6b423f 100644 +--- a/drivers/parport/ieee1284_ops.c ++++ b/drivers/parport/ieee1284_ops.c +@@ -537,7 +537,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port, + /* Yield the port for a while. */ + if (count && dev->port->irq != PARPORT_IRQ_NONE) { + parport_release (dev); +- schedule_timeout_interruptible(msecs_to_jiffies(40)); ++ schedule_msec_hrtimeout_interruptible((40)); + parport_claim_or_block (dev); + } + else +diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c +index bffe548187ee..c2918ee3e100 100644 +--- a/drivers/platform/x86/intel_ips.c ++++ b/drivers/platform/x86/intel_ips.c +@@ -798,7 +798,7 @@ static int ips_adjust(void *data) + ips_gpu_lower(ips); + + sleep: +- schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD)); ++ schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD)); + } while (!kthread_should_stop()); + + dev_dbg(ips->dev, "ips-adjust thread stopped\n"); +@@ -974,7 +974,7 @@ static int ips_monitor(void *data) + seqno_timestamp = get_jiffies_64(); + + old_cpu_power = thm_readl(THM_CEC); +- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); ++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD)); + + /* Collect an initial average */ + for (i = 0; i < IPS_SAMPLE_COUNT; i++) { +@@ -1001,7 +1001,7 @@ static int ips_monitor(void *data) + mchp_samples[i] = mchp; + } + +- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); ++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD)); + if (kthread_should_stop()) + break; + } +@@ -1028,7 +1028,7 @@ static int ips_monitor(void *data) + * us to reduce the sample frequency if the CPU and GPU are idle. + */ + old_cpu_power = thm_readl(THM_CEC); +- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); ++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD)); + last_sample_period = IPS_SAMPLE_PERIOD; + + timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE); +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index f3f5a78cd062..edbed00a06ed 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -1901,7 +1901,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname) + mutex_unlock(&pktgen_thread_lock); + pr_debug("%s: waiting for %s to disappear....\n", + __func__, ifname); +- schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try)); ++ schedule_msec_hrtimeout_interruptible((msec_per_try)); + mutex_lock(&pktgen_thread_lock); + + if (++i >= max_tries) { +diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c +index e92ebe52d485..88791ebb6df0 100644 +--- a/sound/soc/codecs/wm8350.c ++++ b/sound/soc/codecs/wm8350.c +@@ -236,10 +236,10 @@ static void wm8350_pga_work(struct work_struct *work) + out2->ramp == WM8350_RAMP_UP) { + /* delay is longer over 0dB as increases are larger */ + if (i >= WM8350_OUTn_0dB) +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (2)); + else +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (1)); + } else + udelay(50); /* doesn't matter if we delay longer */ +@@ -1123,7 +1123,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component, + (platform->dis_out4 << 6)); + + /* wait for discharge */ +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (platform-> + cap_discharge_msecs)); + +@@ -1139,7 +1139,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component, + WM8350_VBUFEN); + + /* wait for vmid */ +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (platform-> + vmid_charge_msecs)); + +@@ -1190,7 +1190,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component, + wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1); + + /* wait */ +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (platform-> + vmid_discharge_msecs)); + +@@ -1208,7 +1208,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component, + pm1 | WM8350_OUTPUT_DRAIN_EN); + + /* wait */ +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (platform->drain_msecs)); + + pm1 &= ~WM8350_BIASEN; +diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c +index 1a14e902949d..68f17d9877ec 100644 +--- a/sound/soc/codecs/wm8900.c ++++ b/sound/soc/codecs/wm8900.c +@@ -1112,7 +1112,7 @@ static int wm8900_set_bias_level(struct snd_soc_component *component, + /* Need to let things settle before stopping the clock + * to ensure that restart works, see "Stopping the + * master clock" in the datasheet. */ +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible(1); + snd_soc_component_write(component, WM8900_REG_POWER2, + WM8900_REG_POWER2_SYSCLK_ENA); + break; +diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c +index 5a2fdf4f69bf..aeb4e759de4c 100644 +--- a/sound/soc/codecs/wm9713.c ++++ b/sound/soc/codecs/wm9713.c +@@ -203,7 +203,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w, + + /* Gracefully shut down the voice interface. */ + snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0200); +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible(1); + snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0f00); + snd_soc_component_update_bits(component, AC97_EXTENDED_MID, 0x1000, 0x1000); + +@@ -872,7 +872,7 @@ static int wm9713_set_pll(struct snd_soc_component *component, + wm9713->pll_in = freq_in; + + /* wait 10ms AC97 link frames for the link to stabilise */ +- schedule_timeout_interruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_interruptible((10)); + return 0; + } + +-- +2.17.1 + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0010-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch b/sys-kernel/linux-sources-redcore/files/5.1-0010-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch new file mode 100644 index 00000000..ab601f4a --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0010-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch @@ -0,0 +1,160 @@ +From 4c8554b4da764ea564a9a2ef9faa481a61cb1a4b Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Mon, 20 Feb 2017 13:30:32 +1100 +Subject: [PATCH 10/16] Replace all calls to schedule_timeout_uninterruptible + of potentially under 50ms to use schedule_msec_hrtimeout_uninterruptible + +--- + drivers/media/pci/cx18/cx18-gpio.c | 4 ++-- + drivers/net/wireless/intel/ipw2x00/ipw2100.c | 4 ++-- + drivers/rtc/rtc-wm8350.c | 6 +++--- + drivers/scsi/lpfc/lpfc_scsi.c | 2 +- + sound/pci/maestro3.c | 4 ++-- + sound/soc/codecs/rt5631.c | 4 ++-- + sound/soc/soc-dapm.c | 2 +- + 7 files changed, 13 insertions(+), 13 deletions(-) + +diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c +index 012859e6dc7b..206bd08265a5 100644 +--- a/drivers/media/pci/cx18/cx18-gpio.c ++++ b/drivers/media/pci/cx18/cx18-gpio.c +@@ -90,11 +90,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi, + + /* Assert */ + gpio_update(cx, mask, ~active_lo); +- schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs)); ++ schedule_msec_hrtimeout_uninterruptible((assert_msecs)); + + /* Deassert */ + gpio_update(cx, mask, ~active_hi); +- schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs)); ++ schedule_msec_hrtimeout_uninterruptible((recovery_msecs)); + } + + /* +diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c +index 52e5ed2d3bc2..7d72a8b62700 100644 +--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c ++++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c +@@ -830,7 +830,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv, + * doesn't seem to have as many firmware restart cycles... + * + * As a test, we're sticking in a 1/100s delay here */ +- schedule_timeout_uninterruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_uninterruptible((10)); + + return 0; + +@@ -1281,7 +1281,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv) + IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n"); + i = 5000; + do { +- schedule_timeout_uninterruptible(msecs_to_jiffies(40)); ++ schedule_msec_hrtimeout_uninterruptible((40)); + /* Todo... wait for sync command ... */ + + read_register(priv->net_dev, IPW_REG_INTA, &inta); +diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c +index 483c7993516b..fddbaa475066 100644 +--- a/drivers/rtc/rtc-wm8350.c ++++ b/drivers/rtc/rtc-wm8350.c +@@ -119,7 +119,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm) + /* Wait until confirmation of stopping */ + do { + rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); +- schedule_timeout_uninterruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_uninterruptible((1)); + } while (--retries && !(rtc_ctrl & WM8350_RTC_STS)); + + if (!retries) { +@@ -202,7 +202,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350) + /* Wait until confirmation of stopping */ + do { + rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); +- schedule_timeout_uninterruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_uninterruptible((1)); + } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS)); + + if (!(rtc_ctrl & WM8350_RTC_ALMSTS)) +@@ -225,7 +225,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350) + /* Wait until confirmation */ + do { + rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); +- schedule_timeout_uninterruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_uninterruptible((1)); + } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS); + + if (rtc_ctrl & WM8350_RTC_ALMSTS) +diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c +index a497b2c0cb79..1ed8a04c5cdf 100644 +--- a/drivers/scsi/lpfc/lpfc_scsi.c ++++ b/drivers/scsi/lpfc/lpfc_scsi.c +@@ -5007,7 +5007,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, + tgt_id, lun_id, context); + later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + while (time_after(later, jiffies) && cnt) { +- schedule_timeout_uninterruptible(msecs_to_jiffies(20)); ++ schedule_msec_hrtimeout_uninterruptible((20)); + cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); + } + if (cnt) { +diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c +index 1a9468c14aaf..410ad89a3c7c 100644 +--- a/sound/pci/maestro3.c ++++ b/sound/pci/maestro3.c +@@ -2016,7 +2016,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip) + outw(0, io + GPIO_DATA); + outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION); + +- schedule_timeout_uninterruptible(msecs_to_jiffies(delay1)); ++ schedule_msec_hrtimeout_uninterruptible((delay1)); + + outw(GPO_PRIMARY_AC97, io + GPIO_DATA); + udelay(5); +@@ -2024,7 +2024,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip) + outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A); + outw(~0, io + GPIO_MASK); + +- schedule_timeout_uninterruptible(msecs_to_jiffies(delay2)); ++ schedule_msec_hrtimeout_uninterruptible((delay2)); + + if (! snd_m3_try_read_vendor(chip)) + break; +diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c +index 865f49ac38dd..3c1190dd114f 100644 +--- a/sound/soc/codecs/rt5631.c ++++ b/sound/soc/codecs/rt5631.c +@@ -419,7 +419,7 @@ static void onebit_depop_mute_stage(struct snd_soc_component *component, int ena + hp_zc = snd_soc_component_read32(component, RT5631_INT_ST_IRQ_CTRL_2); + snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff); + if (enable) { +- schedule_timeout_uninterruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_uninterruptible((10)); + /* config one-bit depop parameter */ + rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x307f); + snd_soc_component_update_bits(component, RT5631_HP_OUT_VOL, +@@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_component *component, int enable + hp_zc = snd_soc_component_read32(component, RT5631_INT_ST_IRQ_CTRL_2); + snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff); + if (enable) { +- schedule_timeout_uninterruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_uninterruptible((10)); + + /* config depop sequence parameter */ + rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x302f); +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index 0382a47b30bd..0d805d3563ca 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -154,7 +154,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm) + static void pop_wait(u32 pop_time) + { + if (pop_time) +- schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time)); ++ schedule_msec_hrtimeout_uninterruptible((pop_time)); + } + + __printf(3, 4) +-- +2.17.1 + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0011-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch b/sys-kernel/linux-sources-redcore/files/5.1-0011-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch new file mode 100644 index 00000000..a18d030a --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0011-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch @@ -0,0 +1,69 @@ +From 876598b9dbe9c1f27feae36c2e2deacdd4beaf9d Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Mon, 20 Feb 2017 13:32:58 +1100 +Subject: [PATCH 11/16] Don't use hrtimer overlay when pm_freezing since some + drivers still don't correctly use freezable timeouts. + +--- + kernel/time/hrtimer.c | 2 +- + kernel/time/timer.c | 9 +++++---- + 2 files changed, 6 insertions(+), 5 deletions(-) + +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index c6ea49693bca..17ad543fbbc4 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -2039,7 +2039,7 @@ long __sched schedule_msec_hrtimeout(long timeout) + * (yet) better than Hz, as would occur during startup, use regular + * timers. + */ +- if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ) ++ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing) + return schedule_timeout(jiffs); + + secs = timeout / 1000; +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index 3ab277ba0f44..28509c518461 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -43,6 +43,7 @@ + #include <linux/sched/debug.h> + #include <linux/slab.h> + #include <linux/compat.h> ++#include <linux/freezer.h> + + #include <linux/uaccess.h> + #include <asm/unistd.h> +@@ -1971,12 +1972,12 @@ void msleep(unsigned int msecs) + * Use high resolution timers where the resolution of tick based + * timers is inadequate. + */ +- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) { + while (msecs) + msecs = schedule_msec_hrtimeout_uninterruptible(msecs); + return; + } +- timeout = msecs_to_jiffies(msecs) + 1; ++ timeout = jiffs + 1; + + while (timeout) + timeout = schedule_timeout_uninterruptible(timeout); +@@ -1993,12 +1994,12 @@ unsigned long msleep_interruptible(unsigned int msecs) + int jiffs = msecs_to_jiffies(msecs); + unsigned long timeout; + +- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) { + while (msecs && !signal_pending(current)) + msecs = schedule_msec_hrtimeout_interruptible(msecs); + return msecs; + } +- timeout = msecs_to_jiffies(msecs) + 1; ++ timeout = jiffs + 1; + + while (timeout && !signal_pending(current)) + timeout = schedule_timeout_interruptible(timeout); +-- +2.17.1 + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0012-Make-threaded-IRQs-optionally-the-default-which-can-.patch b/sys-kernel/linux-sources-redcore/files/5.1-0012-Make-threaded-IRQs-optionally-the-default-which-can-.patch new file mode 100644 index 00000000..3e40227f --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0012-Make-threaded-IRQs-optionally-the-default-which-can-.patch @@ -0,0 +1,67 @@ +From 50ddde6e3c62abd39dc6a3cd5941febed7ad49c3 Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Wed, 7 Dec 2016 21:13:16 +1100 +Subject: [PATCH 13/16] Make threaded IRQs optionally the default which can be + disabled. + +--- + kernel/irq/Kconfig | 17 +++++++++++++++++ + kernel/irq/manage.c | 11 +++++++++++ + 2 files changed, 28 insertions(+) + +diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig +index 5f3e2baefca9..de3e5740679b 100644 +--- a/kernel/irq/Kconfig ++++ b/kernel/irq/Kconfig +@@ -107,6 +107,23 @@ config GENERIC_IRQ_RESERVATION_MODE + config IRQ_FORCED_THREADING + bool + ++config FORCE_IRQ_THREADING ++ bool "Make IRQ threading compulsory" ++ depends on IRQ_FORCED_THREADING ++ default n ++ ---help--- ++ ++ Make IRQ threading mandatory for any IRQ handlers that support it ++ instead of being optional and requiring the threadirqs kernel ++ parameter. Instead they can be optionally disabled with the ++ nothreadirqs kernel parameter. ++ ++ Enabling this may make some architectures not boot with runqueue ++ sharing and MuQSS. ++ ++ Enable if you are building for a desktop or low latency system, ++ otherwise say N. ++ + config SPARSE_IRQ + bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ + ---help--- +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index 1401afa0d58a..54394031b536 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -23,9 +23,20 @@ + #include "internals.h" + + #ifdef CONFIG_IRQ_FORCED_THREADING ++#ifdef CONFIG_FORCE_IRQ_THREADING ++__read_mostly bool force_irqthreads = true; ++#else + __read_mostly bool force_irqthreads; ++#endif + EXPORT_SYMBOL_GPL(force_irqthreads); + ++static int __init setup_noforced_irqthreads(char *arg) ++{ ++ force_irqthreads = false; ++ return 0; ++} ++early_param("nothreadirqs", setup_noforced_irqthreads); ++ + static int __init setup_forced_irqthreads(char *arg) + { + force_irqthreads = true; +-- +2.17.1 + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0013-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch b/sys-kernel/linux-sources-redcore/files/5.1-0013-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch new file mode 100644 index 00000000..cd46a360 --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0013-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch @@ -0,0 +1,81 @@ +From f1b776971e6aa46347f035adfebfd71d5f0930bb Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Wed, 7 Dec 2016 21:23:01 +1100 +Subject: [PATCH 14/16] Reinstate default Hz of 100 in combination with MuQSS + and -ck patches. + +--- + kernel/Kconfig.hz | 25 ++++++++++++++++++------- + 1 file changed, 18 insertions(+), 7 deletions(-) + +diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz +index 2a202a846757..1806fcac8f14 100644 +--- a/kernel/Kconfig.hz ++++ b/kernel/Kconfig.hz +@@ -4,7 +4,8 @@ + + choice + prompt "Timer frequency" +- default HZ_250 ++ default HZ_100 if SCHED_MUQSS ++ default HZ_250_NODEF if !SCHED_MUQSS + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -19,11 +20,18 @@ choice + config HZ_100 + bool "100 HZ" + help ++ 100 Hz is a suitable choice in combination with MuQSS which does ++ not rely on ticks for rescheduling interrupts, and is not Hz limited ++ for timeouts and sleeps from both the kernel and userspace. ++ This allows us to benefit from the lower overhead and higher ++ throughput of fewer timer ticks. ++ ++ Non-MuQSS kernels: + 100 Hz is a typical choice for servers, SMP and NUMA systems + with lots of processors that may show reduced performance if + too many timer interrupts are occurring. + +- config HZ_250 ++ config HZ_250_NODEF + bool "250 HZ" + help + 250 Hz is a good compromise choice allowing server performance +@@ -31,7 +39,10 @@ choice + on SMP and NUMA systems. If you are going to be using NTSC video + or multimedia, selected 300Hz instead. + +- config HZ_300 ++ 250 Hz is the default choice for the mainline scheduler but not ++ advantageous in combination with MuQSS. ++ ++ config HZ_300_NODEF + bool "300 HZ" + help + 300 Hz is a good compromise choice allowing server performance +@@ -39,7 +50,7 @@ choice + on SMP and NUMA systems and exactly dividing by both PAL and + NTSC frame rates for video and multimedia work. + +- config HZ_1000 ++ config HZ_1000_NODEF + bool "1000 HZ" + help + 1000 Hz is the preferred choice for desktop systems and other +@@ -50,9 +61,9 @@ endchoice + config HZ + int + default 100 if HZ_100 +- default 250 if HZ_250 +- default 300 if HZ_300 +- default 1000 if HZ_1000 ++ default 250 if HZ_250_NODEF ++ default 300 if HZ_300_NODEF ++ default 1000 if HZ_1000_NODEF + + config SCHED_HRTICK + def_bool HIGH_RES_TIMERS +-- +2.17.1 + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-0014-Swap-sucks.patch b/sys-kernel/linux-sources-redcore/files/5.1-0014-Swap-sucks.patch new file mode 100644 index 00000000..cca606c8 --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-0014-Swap-sucks.patch @@ -0,0 +1,25 @@ +From 2113c0b7c42ba961bcc409c1bf9aca9db747b2b0 Mon Sep 17 00:00:00 2001 +From: Con Kolivas <kernel@kolivas.org> +Date: Sat, 12 Aug 2017 12:02:04 +1000 +Subject: [PATCH 15/16] Swap sucks. + +--- + mm/vmscan.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/mm/vmscan.c b/mm/vmscan.c +index a815f73ee4d5..4bcbaec19859 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -164,7 +164,7 @@ struct scan_control { + /* + * From 0 .. 100. Higher means more swappy. + */ +-int vm_swappiness = 60; ++int vm_swappiness = 33; + /* + * The total number of pages which are beyond the high watermark within all + * zones. +-- +2.17.1 + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-Unknow-SSD-HFM128GDHTNG-8310B-QUIRK_NO_APST.patch b/sys-kernel/linux-sources-redcore/files/5.1-Unknow-SSD-HFM128GDHTNG-8310B-QUIRK_NO_APST.patch new file mode 100644 index 00000000..073fb752 --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-Unknow-SSD-HFM128GDHTNG-8310B-QUIRK_NO_APST.patch @@ -0,0 +1,19 @@ +diff -Naur linux-4.20.6/drivers/nvme/host/core.c linux-4.20.6-p/drivers/nvme/host/core.c +--- linux-4.20.6/drivers/nvme/host/core.c 2019-01-31 08:15:47.000000000 +0100 ++++ linux-4.20.6-p/drivers/nvme/host/core.c 2019-02-04 22:32:28.182827035 +0100 +@@ -2046,6 +2046,15 @@ + .vid = 0x1179, + .mn = "THNSF5256GPUK TOSHIBA", + .quirks = NVME_QUIRK_NO_APST, ++ }, ++ ++ { ++ /* https://forum.openmandriva.org/t/nvme-ssd-m2-not-seen-by-omlx-4-0/2407 ++ * Unknow SSD .. Maybe ADATA/Hynix ( a similar mn from ADTA but vid seems to be Hynix) ++ */ ++ .vid = 0x1c5c, ++ .mn = "HFM128GDHTNG-8310B", ++ .quirks = NVME_QUIRK_NO_APST, + } + }; + diff --git a/sys-kernel/linux-sources-redcore/files/5.1-acpi-use-kern_warning_even_when_error.patch b/sys-kernel/linux-sources-redcore/files/5.1-acpi-use-kern_warning_even_when_error.patch new file mode 100644 index 00000000..64c773ab --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-acpi-use-kern_warning_even_when_error.patch @@ -0,0 +1,18 @@ +diff -Naur linux-5.1/include/acpi/platform/aclinux.h linux-5.1-p/include/acpi/platform/aclinux.h +--- linux-5.1/include/acpi/platform/aclinux.h 2019-05-06 02:42:58.000000000 +0200 ++++ linux-5.1-p/include/acpi/platform/aclinux.h 2019-05-07 09:49:23.980444601 +0200 +@@ -153,12 +153,12 @@ + #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_next_filename + #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_directory + +-#define ACPI_MSG_ERROR KERN_ERR "ACPI Error: " ++#define ACPI_MSG_ERROR KERN_WARNING "ACPI Error: " + #define ACPI_MSG_EXCEPTION KERN_ERR "ACPI Exception: " + #define ACPI_MSG_WARNING KERN_WARNING "ACPI Warning: " + #define ACPI_MSG_INFO KERN_INFO "ACPI: " + +-#define ACPI_MSG_BIOS_ERROR KERN_ERR "ACPI BIOS Error (bug): " ++#define ACPI_MSG_BIOS_ERROR KERN_WARNING "ACPI BIOS Error (bug): " + #define ACPI_MSG_BIOS_WARNING KERN_WARNING "ACPI BIOS Warning (bug): " + + /* diff --git a/sys-kernel/linux-sources-redcore/files/redcore-amd64.config b/sys-kernel/linux-sources-redcore/files/5.1-amd64.config index 288c1279..40628bb6 100644 --- a/sys-kernel/linux-sources-redcore/files/redcore-amd64.config +++ b/sys-kernel/linux-sources-redcore/files/5.1-amd64.config @@ -1,54 +1,16 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 4.18.5-redcore Kernel Configuration +# Linux/x86 5.1.15-redcore Kernel Configuration # # -# Compiler: gcc (Gentoo Hardened 7.3.0-r3 p1.4) 7.3.0 +# Compiler: gcc (Gentoo Hardened 8.2.0-r1337 p1.6) 8.2.0 # -CONFIG_64BIT=y -CONFIG_X86_64=y -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_OUTPUT_FORMAT="elf64-x86-64" -CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_MMU=y -CONFIG_ARCH_MMAP_RND_BITS_MIN=28 -CONFIG_ARCH_MMAP_RND_BITS_MAX=32 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_ARCH_HAS_FILTER_PGPROT=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ZONE_DMA32=y -CONFIG_AUDIT_ARCH=y -CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_HAVE_INTEL_TXT=y -CONFIG_X86_64_SMP=y -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_DYNAMIC_PHYSICAL_MASK=y -CONFIG_PGTABLE_LEVELS=4 CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=70300 +CONFIG_GCC_VERSION=80200 CONFIG_CLANG_VERSION=0 +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y CONFIG_IRQ_WORK=y CONFIG_BUILDTIME_EXTABLE_SORT=y CONFIG_THREAD_INFO_IN_TASK=y @@ -56,22 +18,24 @@ CONFIG_THREAD_INFO_IN_TASK=y # # General setup # +CONFIG_SCHED_MUQSS=y CONFIG_INIT_ENV_ARG_LIMIT=32 # CONFIG_COMPILE_TEST is not set CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y +CONFIG_BUILD_SALT="" CONFIG_HAVE_KERNEL_GZIP=y CONFIG_HAVE_KERNEL_BZIP2=y CONFIG_HAVE_KERNEL_LZMA=y CONFIG_HAVE_KERNEL_XZ=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_HAVE_KERNEL_LZ4=y -CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_GZIP is not set # CONFIG_KERNEL_BZIP2 is not set # CONFIG_KERNEL_LZMA is not set # CONFIG_KERNEL_XZ is not set # CONFIG_KERNEL_LZO is not set -# CONFIG_KERNEL_LZ4 is not set +CONFIG_KERNEL_LZ4=y CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SWAP=y CONFIG_SYSVIPC=y @@ -83,8 +47,6 @@ CONFIG_CROSS_MEMORY_ATTACH=y CONFIG_AUDIT=y CONFIG_HAVE_ARCH_AUDITSYSCALL=y CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y # # IRQ subsystem @@ -103,10 +65,12 @@ CONFIG_GENERIC_MSI_IRQ_DOMAIN=y CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y CONFIG_GENERIC_IRQ_RESERVATION_MODE=y CONFIG_IRQ_FORCED_THREADING=y +# CONFIG_FORCE_IRQ_THREADING is not set CONFIG_SPARSE_IRQ=y # CONFIG_GENERIC_IRQ_DEBUGFS is not set CONFIG_CLOCKSOURCE_WATCHDOG=y CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y CONFIG_GENERIC_TIME_VSYSCALL=y CONFIG_GENERIC_CLOCKEVENTS=y @@ -118,25 +82,33 @@ CONFIG_GENERIC_CMOS_UPDATE=y # Timers subsystem # CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set +CONFIG_HZ_PERIODIC=y # CONFIG_NO_HZ_IDLE is not set -CONFIG_NO_HZ_FULL=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y # # CPU/Task time and stats accounting # CONFIG_VIRT_CPU_ACCOUNTING=y +# CONFIG_TICK_CPU_ACCOUNTING is not set CONFIG_VIRT_CPU_ACCOUNTING_GEN=y CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y CONFIG_BSD_PROCESS_ACCT=y # CONFIG_BSD_PROCESS_ACCT_V3 is not set CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_PSI is not set CONFIG_CPU_ISOLATION=y # @@ -149,9 +121,6 @@ CONFIG_TREE_SRCU=y CONFIG_TASKS_RCU=y CONFIG_RCU_STALL_COMMON=y CONFIG_RCU_NEED_SEGCBLIST=y -CONFIG_CONTEXT_TRACKING=y -# CONFIG_CONTEXT_TRACKING_FORCE is not set -CONFIG_RCU_NOCB_CPU=y CONFIG_BUILD_BIN2C=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y @@ -162,20 +131,16 @@ CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_NUMA_BALANCING=y -CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y CONFIG_CGROUPS=y CONFIG_PAGE_COUNTER=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y CONFIG_BLK_CGROUP=y # CONFIG_DEBUG_BLK_CGROUP is not set CONFIG_CGROUP_WRITEBACK=y CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y @@ -183,7 +148,6 @@ CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y # CONFIG_CGROUP_DEBUG is not set @@ -194,7 +158,7 @@ CONFIG_IPC_NS=y CONFIG_USER_NS=y CONFIG_PID_NS=y CONFIG_NET_NS=y -CONFIG_SCHED_AUTOGROUP=y +# CONFIG_CHECKPOINT_RESTORE is not set # CONFIG_SYSFS_DEPRECATED is not set CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y @@ -235,6 +199,7 @@ CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y CONFIG_AIO=y +CONFIG_IO_URING=y CONFIG_ADVISE_SYSCALLS=y CONFIG_MEMBARRIER=y CONFIG_KALLSYMS=y @@ -269,203 +234,47 @@ CONFIG_SLUB_CPU_PARTIAL=y CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y -CONFIG_CRASH_CORE=y -CONFIG_KEXEC_CORE=y -CONFIG_HOTPLUG_SMT=y -# CONFIG_OPROFILE is not set -CONFIG_HAVE_OPROFILE=y -CONFIG_OPROFILE_NMI_TIMER=y -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set -CONFIG_OPTPROBES=y -CONFIG_KPROBES_ON_FTRACE=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_KRETPROBES=y -CONFIG_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_KPROBES_ON_FTRACE=y -CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y -CONFIG_HAVE_NMI=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y -CONFIG_PLUGIN_HOSTCC="g++" -CONFIG_HAVE_GCC_PLUGINS=y -# CONFIG_GCC_PLUGINS is not set -CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_CC_HAS_STACKPROTECTOR_NONE=y -CONFIG_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR_STRONG=y -CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_HAVE_ARCH_SOFT_DIRTY=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_HAVE_EXIT_THREAD=y -CONFIG_ARCH_MMAP_RND_BITS=32 -CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 -CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y -CONFIG_HAVE_COPY_THREAD_TLS=y -CONFIG_HAVE_STACK_VALIDATION=y -CONFIG_ISA_BUS_API=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y -CONFIG_COMPAT_32BIT_TIME=y -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_REFCOUNT=y -CONFIG_REFCOUNT_FULL=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_MODULE_SIG=y -# CONFIG_MODULE_SIG_FORCE is not set -CONFIG_MODULE_SIG_ALL=y -# CONFIG_MODULE_SIG_SHA1 is not set -# CONFIG_MODULE_SIG_SHA224 is not set -# CONFIG_MODULE_SIG_SHA256 is not set -# CONFIG_MODULE_SIG_SHA384 is not set -CONFIG_MODULE_SIG_SHA512=y -CONFIG_MODULE_SIG_HASH="sha512" -CONFIG_MODULE_COMPRESS=y -CONFIG_MODULE_COMPRESS_GZIP=y -# CONFIG_MODULE_COMPRESS_XZ is not set -# CONFIG_TRIM_UNUSED_KSYMS is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLK_SCSI_REQUEST=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_ZONED=y -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -CONFIG_BLK_CMDLINE_PARSER=y -CONFIG_BLK_WBT=y -CONFIG_BLK_WBT_SQ=y -CONFIG_BLK_WBT_MQ=y -CONFIG_BLK_DEBUG_FS=y -# CONFIG_BLK_SED_OPAL is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -CONFIG_ACORN_PARTITION=y -CONFIG_ACORN_PARTITION_CUMANA=y -CONFIG_ACORN_PARTITION_EESOX=y -CONFIG_ACORN_PARTITION_ICS=y -CONFIG_ACORN_PARTITION_ADFS=y -CONFIG_ACORN_PARTITION_POWERTEC=y -CONFIG_ACORN_PARTITION_RISCIX=y -CONFIG_AIX_PARTITION=y -CONFIG_OSF_PARTITION=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_LDM_PARTITION=y -CONFIG_LDM_DEBUG=y -CONFIG_SGI_PARTITION=y -CONFIG_ULTRIX_PARTITION=y -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y -CONFIG_SYSV68_PARTITION=y -CONFIG_CMDLINE_PARTITION=y -CONFIG_BLOCK_COMPAT=y -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_MQ_RDMA=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_MQ_IOSCHED_DEADLINE=y -# CONFIG_MQ_IOSCHED_KYBER is not set -CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -CONFIG_ASN1=y -CONFIG_UNINLINE_SPIN_UNLOCK=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y -CONFIG_QUEUED_SPINLOCKS=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y -CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=4 CONFIG_CC_HAS_SANE_STACKPROTECTOR=y -CONFIG_FREEZER=y # # Processor type and features @@ -477,7 +286,7 @@ CONFIG_X86_X2APIC=y CONFIG_X86_MPPARSE=y # CONFIG_GOLDFISH is not set CONFIG_RETPOLINE=y -CONFIG_INTEL_RDT=y +CONFIG_X86_CPU_RESCTRL=y # CONFIG_X86_EXTENDED_PLATFORM is not set CONFIG_X86_INTEL_LPSS=y CONFIG_X86_AMD_PLATFORM_DEVICE=y @@ -491,11 +300,11 @@ CONFIG_PARAVIRT=y # CONFIG_PARAVIRT_SPINLOCKS is not set # CONFIG_XEN is not set CONFIG_KVM_GUEST=y +# CONFIG_PVH is not set # CONFIG_KVM_DEBUG_FS is not set # CONFIG_PARAVIRT_TIME_ACCOUNTING is not set CONFIG_PARAVIRT_CLOCK=y CONFIG_JAILHOUSE_GUEST=y -CONFIG_NO_BOOTMEM=y # CONFIG_MK8 is not set # CONFIG_MPSC is not set # CONFIG_MCORE2 is not set @@ -510,6 +319,7 @@ CONFIG_X86_MINIMUM_CPU_FAMILY=64 CONFIG_X86_DEBUGCTLMSR=y CONFIG_CPU_SUP_INTEL=y CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y CONFIG_CPU_SUP_CENTAUR=y CONFIG_HPET_TIMER=y CONFIG_HPET_EMULATE_RTC=y @@ -522,12 +332,15 @@ CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=8192 CONFIG_NR_CPUS=8192 CONFIG_SCHED_SMT=y +CONFIG_SMT_NICE=y CONFIG_SCHED_MC=y CONFIG_SCHED_MC_PRIO=y -# CONFIG_PREEMPT_NONE is not set -# CONFIG_PREEMPT_VOLUNTARY is not set -CONFIG_PREEMPT=y -CONFIG_PREEMPT_COUNT=y +# CONFIG_RQ_NONE is not set +# CONFIG_RQ_SMT is not set +CONFIG_RQ_MC=y +# CONFIG_RQ_SMP is not set +# CONFIG_RQ_ALL is not set +CONFIG_SHARERQ=2 CONFIG_X86_LOCAL_APIC=y CONFIG_X86_IO_APIC=y CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y @@ -558,10 +371,10 @@ CONFIG_X86_MSR=m CONFIG_X86_CPUID=m # CONFIG_X86_5LEVEL is not set CONFIG_X86_DIRECT_GBPAGES=y +# CONFIG_X86_CPA_STATISTICS is not set CONFIG_ARCH_HAS_MEM_ENCRYPT=y CONFIG_AMD_MEM_ENCRYPT=y # CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set -CONFIG_ARCH_USE_MEMREMAP_PROT=y CONFIG_NUMA=y CONFIG_AMD_NUMA=y CONFIG_X86_64_ACPI_NUMA=y @@ -574,74 +387,6 @@ CONFIG_ARCH_SELECT_MEMORY_MODEL=y CONFIG_ARCH_MEMORY_PROBE=y CONFIG_ARCH_PROC_KCORE_TEXT=y CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_NEED_MULTIPLE_NODES=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_HAVE_MEMBLOCK_NODE_MAP=y -CONFIG_HAVE_GENERIC_GUP=y -CONFIG_ARCH_DISCARD_MEMBLOCK=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_HAVE_BOOTMEM_INFO_NODE=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_SPARSE=y -# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y -CONFIG_ARCH_ENABLE_THP_MIGRATION=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_BOUNCE=y -CONFIG_VIRT_TO_BUS=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_UKSM=y -# CONFIG_KSM_LEGACY is not set -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -# CONFIG_HWPOISON_INJECT is not set -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -CONFIG_ARCH_WANTS_THP_SWAP=y -CONFIG_THP_SWAP=y -CONFIG_TRANSPARENT_HUGE_PAGECACHE=y -CONFIG_CLEANCACHE=y -CONFIG_FRONTSWAP=y -CONFIG_CMA=y -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set -CONFIG_CMA_AREAS=7 -# CONFIG_ZSWAP is not set -CONFIG_ZPOOL=m -CONFIG_ZBUD=m -CONFIG_Z3FOLD=m -CONFIG_ZSMALLOC=y -# CONFIG_PGTABLE_MAPPING is not set -# CONFIG_ZSMALLOC_STAT is not set -CONFIG_GENERIC_EARLY_IOREMAP=y -# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set -# CONFIG_IDLE_PAGE_TRACKING is not set -CONFIG_ARCH_HAS_ZONE_DEVICE=y -# CONFIG_ZONE_DEVICE is not set -CONFIG_FRAME_VECTOR=y -CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y -CONFIG_ARCH_HAS_PKEYS=y -# CONFIG_PERCPU_STATS is not set -# CONFIG_GUP_BENCHMARK is not set -CONFIG_ARCH_HAS_PTE_SPECIAL=y CONFIG_X86_PMEM_LEGACY_DEVICE=y CONFIG_X86_PMEM_LEGACY=m CONFIG_X86_CHECK_BIOS_CORRUPTION=y @@ -662,13 +407,14 @@ CONFIG_EFI=y CONFIG_EFI_STUB=y CONFIG_EFI_MIXED=y CONFIG_SECCOMP=y -# CONFIG_HZ_100 is not set -# CONFIG_HZ_250 is not set -# CONFIG_HZ_300 is not set -CONFIG_HZ_1000=y -CONFIG_HZ=1000 +CONFIG_HZ_100=y +# CONFIG_HZ_250_NODEF is not set +# CONFIG_HZ_300_NODEF is not set +# CONFIG_HZ_1000_NODEF is not set +CONFIG_HZ=100 CONFIG_SCHED_HRTICK=y CONFIG_KEXEC=y +# CONFIG_KEXEC_FILE is not set # CONFIG_CRASH_DUMP is not set CONFIG_KEXEC_JUMP=y CONFIG_PHYSICAL_START=0x1000000 @@ -693,6 +439,9 @@ CONFIG_ARCH_HAS_ADD_PAGES=y CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y # # Power management and ACPI options @@ -715,6 +464,8 @@ CONFIG_PM_CLK=y CONFIG_PM_GENERIC_DOMAINS=y # CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y CONFIG_ACPI=y CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y @@ -755,6 +506,7 @@ CONFIG_ACPI_HED=y # CONFIG_ACPI_CUSTOM_METHOD is not set CONFIG_ACPI_BGRT=y CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set CONFIG_HAVE_ACPI_APEI=y CONFIG_HAVE_ACPI_APEI_NMI=y CONFIG_ACPI_APEI=y @@ -766,6 +518,7 @@ CONFIG_ACPI_APEI_MEMORY_FAILURE=y CONFIG_DPTF_POWER=m CONFIG_ACPI_WATCHDOG=y CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_ADXL=y CONFIG_PMIC_OPREGION=y # CONFIG_XPOWER_PMIC_OPREGION is not set # CONFIG_BXT_WC_PMIC_OPREGION is not set @@ -816,119 +569,306 @@ CONFIG_X86_AMD_FREQ_SENSITIVITY=m CONFIG_CPU_IDLE=y CONFIG_CPU_IDLE_GOV_LADDER=y CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set CONFIG_INTEL_IDLE=y # # Bus options (PCI etc.) # -CONFIG_PCI=y CONFIG_PCI_DIRECT=y CONFIG_PCI_MMCONFIG=y -CONFIG_PCI_DOMAINS=y CONFIG_MMCONF_FAM10H=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -CONFIG_PCIEAER_INJECT=m -CONFIG_PCIE_ECRC=y -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -CONFIG_PCIE_DPC=y -CONFIG_PCIE_PTM=y -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y -CONFIG_PCI_QUIRKS=y -# CONFIG_PCI_DEBUG is not set -CONFIG_PCI_REALLOC_ENABLE_AUTO=y -CONFIG_PCI_STUB=m -CONFIG_PCI_PF_STUB=m -CONFIG_PCI_ATS=y -CONFIG_PCI_LOCKLESS_CONFIG=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PRI=y -CONFIG_PCI_PASID=y -CONFIG_PCI_LABEL=y -CONFIG_PCI_HYPERV=m -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -CONFIG_HOTPLUG_PCI_CPCI=y -CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m -CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m -# CONFIG_HOTPLUG_PCI_SHPC is not set +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +CONFIG_X86_SYSFB=y # -# PCI controller drivers +# Binary Emulations # +CONFIG_IA32_EMULATION=y +CONFIG_X86_X32=y +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_X86_DEV_DMA_OPS=y +CONFIG_HAVE_GENERIC_GUP=y # -# Cadence PCIe controllers support +# Firmware Drivers # -CONFIG_VMD=m +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=m +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +CONFIG_FW_CFG_SYSFS=m +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_GOOGLE_FIRMWARE=y +CONFIG_GOOGLE_SMI=m +CONFIG_GOOGLE_COREBOOT_TABLE=m +CONFIG_GOOGLE_MEMCONSOLE=m +CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY=m +CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT=m +CONFIG_GOOGLE_MEMCONSOLE_COREBOOT=m +CONFIG_GOOGLE_VPD=m # -# DesignWare PCI Core Support +# EFI (Extensible Firmware Interface) Support # -# CONFIG_PCIE_DW_PLAT_HOST is not set -# CONFIG_PCIE_DW_PLAT_EP is not set +CONFIG_EFI_VARS=m +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_BOOTLOADER_CONTROL=m +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m +CONFIG_APPLE_PROPERTIES=y +CONFIG_RESET_ATTACK_MITIGATION=y +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y +CONFIG_EFI_DEV_PATH_PARSER=y +CONFIG_EFI_EARLYCON=y # -# PCI Endpoint +# Tegra firmware driver # -CONFIG_PCI_ENDPOINT=y -CONFIG_PCI_ENDPOINT_CONFIGFS=y -# CONFIG_PCI_EPF_TEST is not set +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m +CONFIG_KVM_AMD_SEV=y +# CONFIG_KVM_MMU_AUDIT is not set +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set # -# PCI switch controller drivers +# General architecture-dependent options # -CONFIG_PCI_SW_SWITCHTEC=m -CONFIG_ISA_DMA_API=y -CONFIG_AMD_NB=y -CONFIG_PCCARD=m -CONFIG_PCMCIA=m -CONFIG_PCMCIA_LOAD_CIS=y -CONFIG_CARDBUS=y +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HOTPLUG_SMT=y +# CONFIG_OPROFILE is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_HAVE_RCU_TABLE_INVALIDATE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=32 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_ISA_BUS_API=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_64BIT_TIME=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_REFCOUNT=y +CONFIG_REFCOUNT_FULL=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y # -# PC-card bridges +# GCOV-based kernel profiling # -CONFIG_YENTA=m -CONFIG_YENTA_O2=y -CONFIG_YENTA_RICOH=y -CONFIG_YENTA_TI=y -CONFIG_YENTA_ENE_TUNE=y -CONFIG_YENTA_TOSHIBA=y -CONFIG_PD6729=m -CONFIG_I82092=m -CONFIG_PCCARD_NONSTATIC=y -CONFIG_RAPIDIO=y -CONFIG_RAPIDIO_TSI721=y -CONFIG_RAPIDIO_DISC_TIMEOUT=30 -CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y -CONFIG_RAPIDIO_DMA_ENGINE=y -# CONFIG_RAPIDIO_DEBUG is not set -CONFIG_RAPIDIO_ENUM_BASIC=m -CONFIG_RAPIDIO_CHMAN=m -CONFIG_RAPIDIO_MPORT_CDEV=m +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_PLUGIN_HOSTCC="g++" +CONFIG_HAVE_GCC_PLUGINS=y +# CONFIG_GCC_PLUGINS is not set +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +# CONFIG_MODULE_SIG_SHA256 is not set +# CONFIG_MODULE_SIG_SHA384 is not set +CONFIG_MODULE_SIG_SHA512=y +CONFIG_MODULE_SIG_HASH="sha512" +CONFIG_MODULE_COMPRESS=y +CONFIG_MODULE_COMPRESS_GZIP=y +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +CONFIG_BLK_CMDLINE_PARSER=y +CONFIG_BLK_WBT=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set # -# RapidIO Switch drivers +# Partition Types # -CONFIG_RAPIDIO_TSI57X=y -CONFIG_RAPIDIO_CPS_XX=y -CONFIG_RAPIDIO_TSI568=y -CONFIG_RAPIDIO_CPS_GEN2=y -CONFIG_RAPIDIO_RXS_GEN3=m -CONFIG_X86_SYSFB=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_ACORN_PARTITION=y +CONFIG_ACORN_PARTITION_CUMANA=y +CONFIG_ACORN_PARTITION_EESOX=y +CONFIG_ACORN_PARTITION_ICS=y +CONFIG_ACORN_PARTITION_ADFS=y +CONFIG_ACORN_PARTITION_POWERTEC=y +CONFIG_ACORN_PARTITION_RISCIX=y +CONFIG_AIX_PARTITION=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_ATARI_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_LDM_DEBUG=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +CONFIG_SYSV68_PARTITION=y +CONFIG_CMDLINE_PARTITION=y +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y +CONFIG_BLK_PM=y # -# Executable file formats / Emulations +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +# CONFIG_MQ_IOSCHED_KYBER is not set +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats # CONFIG_BINFMT_ELF=y CONFIG_COMPAT_BINFMT_ELF=y @@ -937,18 +877,77 @@ CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_BINFMT_SCRIPT=y CONFIG_BINFMT_MISC=y CONFIG_COREDUMP=y -CONFIG_IA32_EMULATION=y -CONFIG_IA32_AOUT=y -CONFIG_X86_X32=y -CONFIG_COMPAT_32=y -CONFIG_COMPAT=y -CONFIG_COMPAT_FOR_U64_ALIGNMENT=y -CONFIG_SYSVIPC_COMPAT=y -CONFIG_X86_DEV_DMA_OPS=y + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_UKSM=y +# CONFIG_KSM_LEGACY is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +# CONFIG_HWPOISON_INJECT is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +# CONFIG_ZSWAP is not set +CONFIG_ZPOOL=m +CONFIG_ZBUD=m +CONFIG_Z3FOLD=m +CONFIG_ZSMALLOC=y +# CONFIG_PGTABLE_MAPPING is not set +# CONFIG_ZSMALLOC_STAT is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_ZONE_DEVICE=y +# CONFIG_ZONE_DEVICE is not set +CONFIG_FRAME_VECTOR=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y CONFIG_NET=y CONFIG_COMPAT_NETLINK_MESSAGES=y CONFIG_NET_INGRESS=y CONFIG_NET_EGRESS=y +CONFIG_SKB_EXTENSIONS=y # # Networking options @@ -956,6 +955,7 @@ CONFIG_NET_EGRESS=y CONFIG_PACKET=m CONFIG_PACKET_DIAG=m CONFIG_UNIX=m +CONFIG_UNIX_SCM=y CONFIG_UNIX_DIAG=m CONFIG_TLS=m # CONFIG_TLS_DEVICE is not set @@ -963,6 +963,7 @@ CONFIG_XFRM=y CONFIG_XFRM_OFFLOAD=y CONFIG_XFRM_ALGO=m CONFIG_XFRM_USER=m +# CONFIG_XFRM_INTERFACE is not set CONFIG_XFRM_SUB_POLICY=y CONFIG_XFRM_MIGRATE=y CONFIG_XFRM_STATISTICS=y @@ -972,6 +973,7 @@ CONFIG_NET_KEY_MIGRATE=y CONFIG_SMC=m CONFIG_SMC_DIAG=m CONFIG_XDP_SOCKETS=y +# CONFIG_XDP_SOCKETS_DIAG is not set CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y @@ -1082,6 +1084,7 @@ CONFIG_NETFILTER_FAMILY_ARP=y CONFIG_NETFILTER_NETLINK_ACCT=m CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m CONFIG_NF_CONNTRACK=m CONFIG_NF_LOG_COMMON=m CONFIG_NF_LOG_NETDEV=m @@ -1095,7 +1098,7 @@ CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y CONFIG_NF_CONNTRACK_LABELS=y CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_GRE=y CONFIG_NF_CT_PROTO_SCTP=y CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m @@ -1115,17 +1118,14 @@ CONFIG_NF_CT_NETLINK_HELPER=m CONFIG_NETFILTER_NETLINK_GLUE_CT=y CONFIG_NF_NAT=m CONFIG_NF_NAT_NEEDED=y -CONFIG_NF_NAT_PROTO_DCCP=y -CONFIG_NF_NAT_PROTO_UDPLITE=y -CONFIG_NF_NAT_PROTO_SCTP=y CONFIG_NF_NAT_AMANDA=m CONFIG_NF_NAT_FTP=m CONFIG_NF_NAT_IRC=m CONFIG_NF_NAT_SIP=m CONFIG_NF_NAT_TFTP=m CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y CONFIG_NETFILTER_SYNPROXY=m -CONFIG_NF_OSF=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_SET=m CONFIG_NF_TABLES_INET=y @@ -1140,6 +1140,7 @@ CONFIG_NFT_LIMIT=m CONFIG_NFT_MASQ=m CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +# CONFIG_NFT_TUNNEL is not set CONFIG_NFT_OBJREF=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m @@ -1149,7 +1150,10 @@ CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB=m CONFIG_NFT_FIB_INET=m +# CONFIG_NFT_XFRM is not set CONFIG_NFT_SOCKET=m +# CONFIG_NFT_OSF is not set +# CONFIG_NFT_TPROXY is not set CONFIG_NF_DUP_NETDEV=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m @@ -1315,7 +1319,6 @@ CONFIG_IP_VS_PE_SIP=m # IP: Netfilter Configuration # CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NF_SOCKET_IPV4=m CONFIG_NF_TPROXY_IPV4=m CONFIG_NF_TABLES_IPV4=y @@ -1329,13 +1332,7 @@ CONFIG_NF_DUP_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NF_LOG_IPV4=m CONFIG_NF_REJECT_IPV4=m -CONFIG_NF_NAT_IPV4=m -CONFIG_NFT_CHAIN_NAT_IPV4=m -CONFIG_NF_NAT_MASQUERADE_IPV4=y -CONFIG_NFT_MASQ_IPV4=m -CONFIG_NFT_REDIR_IPV4=m CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PROTO_GRE=m CONFIG_NF_NAT_PPTP=m CONFIG_NF_NAT_H323=m CONFIG_IP_NF_IPTABLES=m @@ -1363,15 +1360,10 @@ CONFIG_IP_NF_ARP_MANGLE=m # # IPv6: Netfilter Configuration # -CONFIG_NF_DEFRAG_IPV6=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NF_TPROXY_IPV6=m CONFIG_NF_TABLES_IPV6=y CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_CHAIN_NAT_IPV6=m -CONFIG_NFT_MASQ_IPV6=m -CONFIG_NFT_REDIR_IPV6=m CONFIG_NFT_REJECT_IPV6=m CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_FIB_IPV6=m @@ -1379,8 +1371,6 @@ CONFIG_NF_FLOW_TABLE_IPV6=m CONFIG_NF_DUP_IPV6=m CONFIG_NF_REJECT_IPV6=m CONFIG_NF_LOG_IPV6=m -CONFIG_NF_NAT_IPV6=m -CONFIG_NF_NAT_MASQUERADE_IPV6=y CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -1402,6 +1392,7 @@ CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_DEFRAG_IPV6=m # # DECnet: Netfilter Configuration @@ -1489,7 +1480,9 @@ CONFIG_NET_DSA_TAG_BRCM=y CONFIG_NET_DSA_TAG_BRCM_PREPEND=y CONFIG_NET_DSA_TAG_DSA=y CONFIG_NET_DSA_TAG_EDSA=y +CONFIG_NET_DSA_TAG_GSWIP=y CONFIG_NET_DSA_TAG_KSZ=y +CONFIG_NET_DSA_TAG_KSZ9477=y CONFIG_NET_DSA_TAG_LAN9303=y CONFIG_NET_DSA_TAG_MTK=y CONFIG_NET_DSA_TAG_TRAILER=y @@ -1546,15 +1539,19 @@ CONFIG_NET_SCH_SFQ=m CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_CBS=m +# CONFIG_NET_SCH_ETF is not set +# CONFIG_NET_SCH_TAPRIO is not set CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_DSMARK=m CONFIG_NET_SCH_NETEM=m CONFIG_NET_SCH_DRR=m CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set CONFIG_NET_SCH_CHOKE=m CONFIG_NET_SCH_QFQ=m CONFIG_NET_SCH_CODEL=m CONFIG_NET_SCH_FQ_CODEL=m +# CONFIG_NET_SCH_CAKE is not set CONFIG_NET_SCH_FQ=m CONFIG_NET_SCH_HHF=m CONFIG_NET_SCH_PIE=m @@ -1623,6 +1620,7 @@ CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_BATMAN_ADV_DEBUGFS=y # CONFIG_BATMAN_ADV_DEBUG is not set +# CONFIG_BATMAN_ADV_TRACING is not set CONFIG_OPENVSWITCH=m CONFIG_OPENVSWITCH_GRE=m CONFIG_OPENVSWITCH_VXLAN=m @@ -1692,7 +1690,6 @@ CONFIG_CAN_VXCAN=m CONFIG_CAN_SLCAN=m CONFIG_CAN_DEV=m CONFIG_CAN_CALC_BITTIMING=y -CONFIG_CAN_LEDS=y CONFIG_CAN_JANZ_ICAN3=m CONFIG_CAN_C_CAN=m CONFIG_CAN_C_CAN_PLATFORM=m @@ -1725,13 +1722,14 @@ CONFIG_CAN_MCP251X=m # # CAN USB interfaces # +CONFIG_CAN_8DEV_USB=m CONFIG_CAN_EMS_USB=m CONFIG_CAN_ESD_USB2=m CONFIG_CAN_GS_USB=m CONFIG_CAN_KVASER_USB=m -CONFIG_CAN_PEAK_USB=m -CONFIG_CAN_8DEV_USB=m CONFIG_CAN_MCBA_USB=m +CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set # CONFIG_CAN_DEBUG_DEVICES is not set CONFIG_BT=m CONFIG_BT_BREDR=y @@ -1770,6 +1768,7 @@ CONFIG_BT_HCIUART_ATH3K=y CONFIG_BT_HCIUART_LL=y CONFIG_BT_HCIUART_3WIRE=y CONFIG_BT_HCIUART_INTEL=y +# CONFIG_BT_HCIUART_RTL is not set CONFIG_BT_HCIUART_QCA=y CONFIG_BT_HCIUART_AG6XX=y CONFIG_BT_HCIUART_MRVL=y @@ -1784,6 +1783,7 @@ CONFIG_BT_MRVL=m CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m CONFIG_BT_WILINK=m +# CONFIG_BT_MTKUART is not set CONFIG_BT_HCIRSI=m CONFIG_AF_RXRPC=m CONFIG_AF_RXRPC_IPV6=y @@ -1817,7 +1817,6 @@ CONFIG_LIB80211_CRYPT_TKIP=m CONFIG_MAC80211=m CONFIG_MAC80211_HAS_RC=y CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_MINSTREL_HT=y CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" CONFIG_MAC80211_MESH=y @@ -1890,14 +1889,118 @@ CONFIG_LWTUNNEL=y CONFIG_LWTUNNEL_BPF=y CONFIG_DST_CACHE=y CONFIG_GRO_CELLS=y -CONFIG_NET_DEVLINK=m -CONFIG_MAY_USE_DEVLINK=m +CONFIG_NET_SOCK_MSG=y +# CONFIG_NET_DEVLINK is not set CONFIG_FAILOVER=m CONFIG_HAVE_EBPF_JIT=y # # Device Drivers # +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +CONFIG_PCIE_PTM=y +# CONFIG_PCIE_BW is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +CONFIG_PCI_REALLOC_ENABLE_AUTO=y +CONFIG_PCI_STUB=m +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_PCI_HYPERV=m +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +CONFIG_HOTPLUG_PCI_CPCI=y +CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m +CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# + +# +# Cadence PCIe controllers support +# +CONFIG_VMD=m + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCIE_DW_PLAT_EP is not set +# CONFIG_PCI_MESON is not set + +# +# PCI Endpoint +# +CONFIG_PCI_ENDPOINT=y +CONFIG_PCI_ENDPOINT_CONFIGFS=y +# CONFIG_PCI_EPF_TEST is not set + +# +# PCI switch controller drivers +# +CONFIG_PCI_SW_SWITCHTEC=m +CONFIG_PCCARD=m +CONFIG_PCMCIA=m +CONFIG_PCMCIA_LOAD_CIS=y +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +CONFIG_PD6729=m +CONFIG_I82092=m +CONFIG_PCCARD_NONSTATIC=y +CONFIG_RAPIDIO=y +CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_DISC_TIMEOUT=30 +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +CONFIG_RAPIDIO_DMA_ENGINE=y +# CONFIG_RAPIDIO_DEBUG is not set +CONFIG_RAPIDIO_ENUM_BASIC=m +CONFIG_RAPIDIO_CHMAN=m +CONFIG_RAPIDIO_MPORT_CDEV=m + +# +# RapidIO Switch drivers +# +CONFIG_RAPIDIO_TSI57X=y +CONFIG_RAPIDIO_CPS_XX=y +CONFIG_RAPIDIO_TSI568=y +CONFIG_RAPIDIO_CPS_GEN2=y +CONFIG_RAPIDIO_RXS_GEN3=m # # Generic Driver Options @@ -1926,6 +2029,7 @@ CONFIG_GENERIC_CPU_AUTOPROBE=y CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SLIMBUS=m CONFIG_REGMAP_SPI=y CONFIG_REGMAP_SPMI=m CONFIG_REGMAP_W1=m @@ -1934,24 +2038,24 @@ CONFIG_REGMAP_IRQ=y CONFIG_REGMAP_SOUNDWIRE=m CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set -# CONFIG_DMA_CMA is not set # # Bus devices # CONFIG_CONNECTOR=m +# CONFIG_GNSS is not set CONFIG_MTD=m CONFIG_MTD_TESTS=m -CONFIG_MTD_REDBOOT_PARTS=m -CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 -CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y -CONFIG_MTD_REDBOOT_PARTS_READONLY=y CONFIG_MTD_CMDLINE_PARTS=m CONFIG_MTD_AR7_PARTS=m # # Partition parsers # +CONFIG_MTD_REDBOOT_PARTS=m +CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 +CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y +CONFIG_MTD_REDBOOT_PARTS_READONLY=y # # User Modules And Translation Layers @@ -1996,6 +2100,7 @@ CONFIG_MTD_ABSENT=m CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_PHYSMAP=m # CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PHYSMAP_GPIO_ADDR is not set CONFIG_MTD_SBC_GXX=m CONFIG_MTD_AMD76XROM=m CONFIG_MTD_ICHXROM=m @@ -2007,10 +2112,8 @@ CONFIG_MTD_L440GX=m CONFIG_MTD_PCI=m CONFIG_MTD_PCMCIA=m # CONFIG_MTD_PCMCIA_ANONYMOUS is not set -CONFIG_MTD_GPIO_ADDR=m CONFIG_MTD_INTEL_VR_NOR=m CONFIG_MTD_PLATRAM=m -CONFIG_MTD_LATCH_ADDR=m # # Self-contained MTD device drivers @@ -2057,10 +2160,10 @@ CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED=y CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0x0 CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH=y CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y -CONFIG_MTD_NAND_DOCG4=m CONFIG_MTD_NAND_CAFE=m CONFIG_MTD_NAND_NANDSIM=m CONFIG_MTD_NAND_PLATFORM=m +# CONFIG_MTD_SPI_NAND is not set # # LPDDR & LPDDR2 PCM memory drivers @@ -2068,8 +2171,8 @@ CONFIG_MTD_NAND_PLATFORM=m CONFIG_MTD_LPDDR=m CONFIG_MTD_QINFO_PROBE=m CONFIG_MTD_SPI_NOR=m -CONFIG_MTD_MT81xx_NOR=m CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +CONFIG_SPI_MTK_QUADSPI=m CONFIG_SPI_INTEL_SPI=m CONFIG_SPI_INTEL_SPI_PCI=m CONFIG_SPI_INTEL_SPI_PLATFORM=m @@ -2134,7 +2237,6 @@ CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m CONFIG_ZRAM=m CONFIG_ZRAM_WRITEBACK=y # CONFIG_ZRAM_MEMORY_TRACKING is not set -CONFIG_BLK_DEV_DAC960=m CONFIG_BLK_DEV_UMEM=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 @@ -2165,11 +2267,13 @@ CONFIG_BLK_DEV_NVME=y CONFIG_NVME_FABRICS=m CONFIG_NVME_RDMA=m CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m CONFIG_NVME_TARGET=m CONFIG_NVME_TARGET_LOOP=m CONFIG_NVME_TARGET_RDMA=m CONFIG_NVME_TARGET_FC=m CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m # # Misc devices @@ -2201,6 +2305,7 @@ CONFIG_LATTICE_ECP3_CONFIG=m CONFIG_SRAM=y CONFIG_PCI_ENDPOINT_TEST=m CONFIG_MISC_RTSX=m +CONFIG_PVPANIC=m CONFIG_C2PORT=m CONFIG_C2PORT_DURAMAR_2150=m @@ -2214,6 +2319,7 @@ CONFIG_EEPROM_MAX6875=m CONFIG_EEPROM_93CX6=m CONFIG_EEPROM_93XX46=m CONFIG_EEPROM_IDT_89HPESX=m +CONFIG_EEPROM_EE1004=m CONFIG_CB710_CORE=m # CONFIG_CB710_DEBUG is not set CONFIG_CB710_DEBUG_ASSUMPTIONS=y @@ -2231,6 +2337,7 @@ CONFIG_ALTERA_STAPL=m CONFIG_INTEL_MEI=y CONFIG_INTEL_MEI_ME=y CONFIG_INTEL_MEI_TXE=m +CONFIG_INTEL_MEI_HDCP=m CONFIG_VMWARE_VMCI=m # @@ -2280,8 +2387,10 @@ CONFIG_VHOST_RING=m CONFIG_GENWQE=m CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0 CONFIG_ECHO=m +CONFIG_MISC_ALCOR_PCI=m CONFIG_MISC_RTSX_PCI=m CONFIG_MISC_RTSX_USB=m +CONFIG_HABANA_AI=m CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -2293,7 +2402,6 @@ CONFIG_RAID_ATTRS=m CONFIG_SCSI=m CONFIG_SCSI_DMA=y CONFIG_SCSI_NETLINK=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_SCSI_PROC_FS=y # @@ -2372,10 +2480,14 @@ CONFIG_SCSI_UFSHCD=m CONFIG_SCSI_UFSHCD_PCI=m CONFIG_SCSI_UFS_DWC_TC_PCI=m CONFIG_SCSI_UFSHCD_PLATFORM=m +CONFIG_SCSI_UFS_CDNS_PLATFORM=m CONFIG_SCSI_UFS_DWC_TC_PLATFORM=m +# CONFIG_SCSI_UFS_BSG is not set CONFIG_SCSI_HPTIOP=m CONFIG_SCSI_BUSLOGIC=m CONFIG_SCSI_FLASHPOINT=y +CONFIG_SCSI_MYRB=m +CONFIG_SCSI_MYRS=m CONFIG_VMWARE_PVSCSI=m CONFIG_HYPERV_STORAGE=m CONFIG_LIBFC=m @@ -2430,10 +2542,6 @@ CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m CONFIG_SCSI_DH_ALUA=m -CONFIG_SCSI_OSD_INITIATOR=m -CONFIG_SCSI_OSD_ULD=m -CONFIG_SCSI_OSD_DPRINT_SENSE=1 -# CONFIG_SCSI_OSD_DEBUG is not set CONFIG_ATA=m CONFIG_ATA_VERBOSE_ERROR=y CONFIG_ATA_ACPI=y @@ -2545,7 +2653,6 @@ CONFIG_BCACHE=m # CONFIG_BCACHE_CLOSURES_DEBUG is not set CONFIG_BLK_DEV_DM_BUILTIN=y CONFIG_BLK_DEV_DM=m -# CONFIG_DM_MQ_DEFAULT is not set # CONFIG_DM_DEBUG is not set CONFIG_DM_BUFIO=m # CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set @@ -2620,6 +2727,7 @@ CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y CONFIG_IPVLAN=m CONFIG_IPVTAP=m CONFIG_VXLAN=m @@ -2702,15 +2810,20 @@ CONFIG_B53_SPI_DRIVER=m CONFIG_B53_MDIO_DRIVER=m CONFIG_B53_MMAP_DRIVER=m CONFIG_B53_SRAB_DRIVER=m +CONFIG_B53_SERDES=m +# CONFIG_NET_DSA_BCM_SF2 is not set CONFIG_NET_DSA_LOOP=m +CONFIG_NET_DSA_LANTIQ_GSWIP=m CONFIG_NET_DSA_MT7530=m CONFIG_NET_DSA_MV88E6060=m -CONFIG_MICROCHIP_KSZ=m -CONFIG_MICROCHIP_KSZ_SPI_DRIVER=m +CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON=m +CONFIG_NET_DSA_MICROCHIP_KSZ9477=m +CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI=m CONFIG_NET_DSA_MV88E6XXX=m CONFIG_NET_DSA_MV88E6XXX_GLOBAL2=y CONFIG_NET_DSA_MV88E6XXX_PTP=y CONFIG_NET_DSA_QCA8K=m +# CONFIG_NET_DSA_REALTEK_SMI is not set CONFIG_NET_DSA_SMSC_LAN9303=m CONFIG_NET_DSA_SMSC_LAN9303_I2C=m CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m @@ -2756,19 +2869,22 @@ CONFIG_B44=m CONFIG_B44_PCI_AUTOSELECT=y CONFIG_B44_PCICORE_AUTOSELECT=y CONFIG_B44_PCI=y +# CONFIG_BCMGENET is not set CONFIG_BNX2=m CONFIG_CNIC=m CONFIG_TIGON3=m CONFIG_TIGON3_HWMON=y CONFIG_BNX2X=m CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set CONFIG_BNXT=m CONFIG_BNXT_SRIOV=y CONFIG_BNXT_FLOWER_OFFLOAD=y CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y CONFIG_NET_VENDOR_BROCADE=y CONFIG_BNA=m -CONFIG_NET_CADENCE=y +CONFIG_NET_VENDOR_CADENCE=y CONFIG_MACB=m CONFIG_MACB_USE_HWSTAMP=y CONFIG_MACB_PCI=m @@ -2815,6 +2931,10 @@ CONFIG_SUNDANCE=m CONFIG_NET_VENDOR_EMULEX=y CONFIG_BE2NET=m CONFIG_BE2NET_HWMON=y +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y CONFIG_NET_VENDOR_EZCHIP=y CONFIG_NET_VENDOR_FUJITSU=y CONFIG_PCMCIA_FMVJ18X=m @@ -2837,16 +2957,16 @@ CONFIG_IXGBE=m CONFIG_IXGBE_HWMON=y CONFIG_IXGBE_DCA=y CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y CONFIG_I40E=m CONFIG_I40E_DCB=y +CONFIG_IAVF=m CONFIG_I40EVF=m CONFIG_ICE=m CONFIG_FM10K=m -CONFIG_NET_VENDOR_EXAR=y -CONFIG_S2IO=m -CONFIG_VXGE=m -# CONFIG_VXGE_DEBUG_TRACE_ALL is not set +CONFIG_IGC=m CONFIG_JME=m CONFIG_NET_VENDOR_MARVELL=y CONFIG_MVMDIO=m @@ -2888,7 +3008,6 @@ CONFIG_ENCX24J600=m CONFIG_LAN743X=m CONFIG_NET_VENDOR_MICROSEMI=y CONFIG_MSCC_OCELOT_SWITCH=m -CONFIG_MSCC_OCELOT_SWITCH_OCELOT=m CONFIG_NET_VENDOR_MYRI=y CONFIG_MYRI10GE=m CONFIG_MYRI10GE_DCA=y @@ -2896,12 +3015,17 @@ CONFIG_FEALNX=m CONFIG_NET_VENDOR_NATSEMI=y CONFIG_NATSEMI=m CONFIG_NS83820=m +CONFIG_NET_VENDOR_NETERION=y +CONFIG_S2IO=m +CONFIG_VXGE=m +# CONFIG_VXGE_DEBUG_TRACE_ALL is not set CONFIG_NET_VENDOR_NETRONOME=y CONFIG_NFP=m # CONFIG_NFP_APP_FLOWER is not set CONFIG_NFP_APP_ABM_NIC=y # CONFIG_NFP_DEBUG is not set CONFIG_NET_VENDOR_NI=y +CONFIG_NI_XGE_MANAGEMENT_ENET=m CONFIG_NET_VENDOR_8390=y CONFIG_PCMCIA_AXNET=m CONFIG_NE2K_PCI=m @@ -2910,7 +3034,7 @@ CONFIG_NET_VENDOR_NVIDIA=y CONFIG_FORCEDETH=m CONFIG_NET_VENDOR_OKI=y CONFIG_ETHOC=m -CONFIG_NET_PACKET_ENGINE=y +CONFIG_NET_VENDOR_PACKET_ENGINES=y CONFIG_HAMACHI=m CONFIG_YELLOWFIN=m CONFIG_NET_VENDOR_QLOGIC=y @@ -2984,6 +3108,7 @@ CONFIG_DWC_XLGMAC_PCI=m CONFIG_NET_VENDOR_TEHUTI=y CONFIG_TEHUTI=m CONFIG_NET_VENDOR_TI=y +# CONFIG_TI_CPSW_PHY_SEL is not set CONFIG_TI_CPSW_ALE=m CONFIG_TLAN=m CONFIG_NET_VENDOR_VIA=y @@ -3009,6 +3134,7 @@ CONFIG_ROADRUNNER_LARGE_RINGS=y CONFIG_NET_SB1000=m CONFIG_MDIO_DEVICE=m CONFIG_MDIO_BUS=m +# CONFIG_MDIO_BCM_UNIMAC is not set CONFIG_MDIO_BITBANG=m CONFIG_MDIO_CAVIUM=m CONFIG_MDIO_GPIO=m @@ -3127,6 +3253,7 @@ CONFIG_USB_IPHETH=m CONFIG_USB_SIERRA_NET=m CONFIG_USB_VL600=m CONFIG_USB_NET_CH9200=m +CONFIG_USB_NET_AQC111=m CONFIG_WLAN=y CONFIG_WLAN_VENDOR_ADMTEK=y CONFIG_ADM8211=m @@ -3299,7 +3426,15 @@ CONFIG_MWL8K=m CONFIG_WLAN_VENDOR_MEDIATEK=y CONFIG_MT7601U=m CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76x02_LIB=m +CONFIG_MT76x0_COMMON=m +# CONFIG_MT76x0U is not set +CONFIG_MT76x0E=m +CONFIG_MT76x2_COMMON=m CONFIG_MT76x2E=m +# CONFIG_MT76x2U is not set +CONFIG_MT7603E=m CONFIG_WLAN_VENDOR_RALINK=y CONFIG_RT2X00=m CONFIG_RT2400PCI=m @@ -3377,11 +3512,12 @@ CONFIG_ZD1211RW=m # CONFIG_ZD1211RW_DEBUG is not set CONFIG_WLAN_VENDOR_QUANTENNA=y CONFIG_QTNFMAC=m -CONFIG_QTNFMAC_PEARL_PCIE=m +CONFIG_QTNFMAC_PCIE=m CONFIG_PCMCIA_RAYCS=m CONFIG_PCMCIA_WL3501=m # CONFIG_MAC80211_HWSIM is not set CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_VIRT_WIFI=m # # WiMAX Wireless Broadband devices @@ -3422,6 +3558,7 @@ CONFIG_IEEE802154_ADF7242=m CONFIG_IEEE802154_CA8210=m # CONFIG_IEEE802154_CA8210_DEBUGFS is not set CONFIG_IEEE802154_MCR20A=m +# CONFIG_IEEE802154_HWSIM is not set CONFIG_VMXNET3=m CONFIG_FUJITSU_ES=m CONFIG_THUNDERBOLT_NET=m @@ -3520,13 +3657,6 @@ CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m CONFIG_ISDN_DRV_AVMB1_AVM_CS=m CONFIG_ISDN_DRV_AVMB1_T1PCI=m CONFIG_ISDN_DRV_AVMB1_C4=m -CONFIG_CAPI_EICON=y -CONFIG_ISDN_DIVAS=m -CONFIG_ISDN_DIVAS_BRIPCI=y -CONFIG_ISDN_DIVAS_PRIPCI=y -CONFIG_ISDN_DIVAS_DIVACAPI=m -CONFIG_ISDN_DIVAS_USERIDI=m -CONFIG_ISDN_DIVAS_MAINT=m CONFIG_ISDN_DRV_GIGASET=m CONFIG_GIGASET_CAPI=y CONFIG_GIGASET_BASE=m @@ -3554,8 +3684,8 @@ CONFIG_MISDN_IPAC=m CONFIG_MISDN_ISAR=m CONFIG_ISDN_HDLC=m CONFIG_NVM=y -# CONFIG_NVM_DEBUG is not set CONFIG_NVM_PBLK=m +# CONFIG_NVM_PBLK_DEBUG is not set # # Input device support @@ -3686,10 +3816,12 @@ CONFIG_TOUCHSCREEN_AD7877=m CONFIG_TOUCHSCREEN_AD7879=m CONFIG_TOUCHSCREEN_AD7879_I2C=m CONFIG_TOUCHSCREEN_AD7879_SPI=m +# CONFIG_TOUCHSCREEN_ADC is not set CONFIG_TOUCHSCREEN_ATMEL_MXT=m # CONFIG_TOUCHSCREEN_ATMEL_MXT_T37 is not set CONFIG_TOUCHSCREEN_AUO_PIXCIR=m CONFIG_TOUCHSCREEN_BU21013=m +# CONFIG_TOUCHSCREEN_BU21029 is not set CONFIG_TOUCHSCREEN_CHIPONE_ICN8505=m CONFIG_TOUCHSCREEN_CY8CTMG110=m CONFIG_TOUCHSCREEN_CYTTSP_CORE=m @@ -3783,6 +3915,7 @@ CONFIG_INPUT_AD714X_SPI=m CONFIG_INPUT_ARIZONA_HAPTICS=m CONFIG_INPUT_BMA150=m CONFIG_INPUT_E3X0_BUTTON=m +CONFIG_INPUT_MSM_VIBRATOR=m CONFIG_INPUT_PCSPKR=m CONFIG_INPUT_MAX77693_HAPTIC=m CONFIG_INPUT_MC13783_PWRBUTTON=m @@ -3853,6 +3986,7 @@ CONFIG_SERIO_RAW=m CONFIG_SERIO_ALTERA_PS2=m CONFIG_SERIO_PS2MULT=m CONFIG_SERIO_ARC_PS2=m +CONFIG_SERIO_OLPC_APSP=m CONFIG_HYPERV_KEYBOARD=m CONFIG_SERIO_GPIO_PS2=m CONFIG_USERIO=m @@ -3890,6 +4024,7 @@ CONFIG_N_HDLC=m CONFIG_N_GSM=m CONFIG_TRACE_ROUTER=m CONFIG_TRACE_SINK=m +CONFIG_LDISC_AUTOLOAD=y CONFIG_DEVMEM=y # CONFIG_DEVKMEM is not set @@ -3954,6 +4089,7 @@ CONFIG_HVC_DRIVER=y CONFIG_VIRTIO_CONSOLE=m CONFIG_IPMI_HANDLER=m CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y CONFIG_IPMI_PANIC_EVENT=y CONFIG_IPMI_PANIC_STRING=y CONFIG_IPMI_DEVICE_INTERFACE=m @@ -3968,7 +4104,6 @@ CONFIG_HW_RANDOM_AMD=m CONFIG_HW_RANDOM_VIA=m CONFIG_HW_RANDOM_VIRTIO=m CONFIG_NVRAM=m -CONFIG_R3964=m CONFIG_APPLICOM=m # @@ -4006,6 +4141,7 @@ CONFIG_TELCLOCK=m CONFIG_DEVPORT=y CONFIG_XILLYBUS=m CONFIG_XILLYBUS_PCIE=m +# CONFIG_RANDOM_TRUST_CPU is not set # # I2C support @@ -4049,6 +4185,7 @@ CONFIG_I2C_ISMT=m CONFIG_I2C_PIIX4=m CONFIG_I2C_NFORCE2=m CONFIG_I2C_NFORCE2_S4985=m +CONFIG_I2C_NVIDIA_GPU=m CONFIG_I2C_SIS5595=m CONFIG_I2C_SIS630=m CONFIG_I2C_SIS96X=m @@ -4101,6 +4238,9 @@ CONFIG_I2C_SLAVE_EEPROM=m # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set # CONFIG_I2C_DEBUG_BUS is not set +CONFIG_I3C=m +CONFIG_CDNS_I3C_MASTER=m +CONFIG_DW_I3C_MASTER=m CONFIG_SPI=y # CONFIG_SPI_DEBUG is not set CONFIG_SPI_MASTER=y @@ -4119,6 +4259,7 @@ CONFIG_SPI_DW_PCI=m CONFIG_SPI_DW_MID_DMA=y CONFIG_SPI_DW_MMIO=m CONFIG_SPI_DLN2=m +CONFIG_SPI_NXP_FLEXSPI=m CONFIG_SPI_GPIO=m CONFIG_SPI_LM70_LLP=m CONFIG_SPI_OC_TINY=m @@ -4126,6 +4267,8 @@ CONFIG_SPI_PXA2XX=m CONFIG_SPI_PXA2XX_PCI=m CONFIG_SPI_ROCKCHIP=m CONFIG_SPI_SC18IS602=m +CONFIG_SPI_SIFIVE=m +CONFIG_SPI_MXIC=m CONFIG_SPI_XCOMM=m CONFIG_SPI_XILINX=m CONFIG_SPI_ZYNQMP_GQSPI=m @@ -4153,6 +4296,7 @@ CONFIG_HSI_BOARDINFO=y CONFIG_HSI_CHAR=m CONFIG_PPS=y # CONFIG_PPS_DEBUG is not set +# CONFIG_NTP_PPS is not set # # PPS clients support @@ -4187,6 +4331,7 @@ CONFIG_PINCTRL_CANNONLAKE=m CONFIG_PINCTRL_CEDARFORK=m CONFIG_PINCTRL_DENVERTON=m CONFIG_PINCTRL_GEMINILAKE=m +# CONFIG_PINCTRL_ICELAKE is not set CONFIG_PINCTRL_LEWISBURG=m CONFIG_PINCTRL_SUNRISEPOINT=m CONFIG_GPIOLIB=y @@ -4210,7 +4355,9 @@ CONFIG_GPIO_LYNXPOINT=y CONFIG_GPIO_MB86S7X=m CONFIG_GPIO_MENZ127=m CONFIG_GPIO_MOCKUP=m +CONFIG_GPIO_SIOX=m CONFIG_GPIO_VX855=m +CONFIG_GPIO_AMD_FCH=m # # Port-mapped I/O GPIO drivers @@ -4245,6 +4392,7 @@ CONFIG_GPIO_LP3943=m CONFIG_GPIO_LP873X=m CONFIG_GPIO_TPS65086=m CONFIG_GPIO_TPS65912=m +CONFIG_GPIO_TQMX86=m CONFIG_GPIO_UCB1400=m CONFIG_GPIO_WHISKEY_COVE=m CONFIG_GPIO_WM831X=m @@ -4300,7 +4448,6 @@ CONFIG_W1_SLAVE_DS2431=m CONFIG_W1_SLAVE_DS2433=m CONFIG_W1_SLAVE_DS2433_CRC=y CONFIG_W1_SLAVE_DS2438=m -CONFIG_W1_SLAVE_DS2760=m CONFIG_W1_SLAVE_DS2780=m CONFIG_W1_SLAVE_DS2781=m CONFIG_W1_SLAVE_DS28E04=m @@ -4315,6 +4462,7 @@ CONFIG_GENERIC_ADC_BATTERY=m CONFIG_WM831X_BACKUP=m CONFIG_WM831X_POWER=m # CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set CONFIG_BATTERY_DS2760=m CONFIG_BATTERY_DS2780=m CONFIG_BATTERY_DS2781=m @@ -4355,6 +4503,7 @@ CONFIG_CHARGER_SMB347=m CONFIG_BATTERY_GAUGE_LTC2941=m CONFIG_BATTERY_RT5033=m CONFIG_CHARGER_RT9455=m +# CONFIG_CHARGER_CROS_USBPD is not set CONFIG_HWMON=m CONFIG_HWMON_VID=m # CONFIG_HWMON_DEBUG_CHIP is not set @@ -4434,6 +4583,7 @@ CONFIG_SENSORS_MAX6650=m CONFIG_SENSORS_MAX6697=m CONFIG_SENSORS_MAX31790=m CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_MLXREG_FAN is not set CONFIG_SENSORS_TC654=m CONFIG_SENSORS_MENF21BMC_HWMON=m CONFIG_SENSORS_ADCXX=m @@ -4460,6 +4610,9 @@ CONFIG_SENSORS_NCT6683=m CONFIG_SENSORS_NCT6775=m CONFIG_SENSORS_NCT7802=m CONFIG_SENSORS_NCT7904=m +# CONFIG_SENSORS_NPCM7XX is not set +CONFIG_SENSORS_OCC_P8_I2C=m +CONFIG_SENSORS_OCC=y CONFIG_SENSORS_PCF8591=m CONFIG_PMBUS=m CONFIG_SENSORS_PMBUS=m @@ -4551,6 +4704,10 @@ CONFIG_THERMAL_GOV_USER_SPACE=y CONFIG_CLOCK_THERMAL=y CONFIG_DEVFREQ_THERMAL=y # CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# CONFIG_INTEL_POWERCLAMP=m CONFIG_X86_PKG_TEMP_THERMAL=m CONFIG_INTEL_SOC_DTS_IOSF_CORE=m @@ -4580,11 +4737,13 @@ CONFIG_DA9052_WATCHDOG=m CONFIG_DA9063_WATCHDOG=m CONFIG_DA9062_WATCHDOG=m CONFIG_MENF21BMC_WATCHDOG=m +# CONFIG_MENZ069_WATCHDOG is not set CONFIG_WDAT_WDT=m CONFIG_WM831X_WATCHDOG=m CONFIG_XILINX_WATCHDOG=m CONFIG_ZIIRAVE_WATCHDOG=m CONFIG_RAVE_SP_WATCHDOG=m +CONFIG_MLX_WDT=m CONFIG_CADENCE_WATCHDOG=m CONFIG_DW_WATCHDOG=m CONFIG_MAX63XX_WATCHDOG=m @@ -4617,6 +4776,7 @@ CONFIG_60XX_WDT=m CONFIG_CPU5_WDT=m CONFIG_SMSC_SCH311X_WDT=m CONFIG_SMSC37B787_WDT=m +CONFIG_TQMX86_WDT=m CONFIG_VIA_WDT=m CONFIG_W83627HF_WDT=m CONFIG_W83877F_WDT=m @@ -4658,7 +4818,6 @@ CONFIG_SSB_PCMCIAHOST_POSSIBLE=y CONFIG_SSB_PCMCIAHOST=y CONFIG_SSB_SDIOHOST_POSSIBLE=y CONFIG_SSB_SDIOHOST=y -# CONFIG_SSB_DEBUG is not set CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y CONFIG_SSB_DRIVER_PCICORE=y CONFIG_SSB_DRIVER_GPIO=y @@ -4683,9 +4842,8 @@ CONFIG_MFD_BD9571MWV=m CONFIG_MFD_AXP20X=m CONFIG_MFD_AXP20X_I2C=m CONFIG_MFD_CROS_EC=m -CONFIG_MFD_CROS_EC_I2C=m -CONFIG_MFD_CROS_EC_SPI=m # CONFIG_MFD_CROS_EC_CHARDEV is not set +# CONFIG_MFD_MADERA is not set CONFIG_PMIC_DA9052=y CONFIG_MFD_DA9052_SPI=y CONFIG_MFD_DA9062=m @@ -4741,6 +4899,7 @@ CONFIG_MFD_TPS65912_I2C=m CONFIG_MFD_TPS65912_SPI=y CONFIG_MFD_WL1273_CORE=m CONFIG_MFD_LM3533=m +CONFIG_MFD_TQMX86=m CONFIG_MFD_VX855=m CONFIG_MFD_ARIZONA=y CONFIG_MFD_ARIZONA_I2C=m @@ -4836,6 +4995,7 @@ CONFIG_IR_SHARP_DECODER=m CONFIG_IR_MCE_KBD_DECODER=m CONFIG_IR_XMP_DECODER=m CONFIG_IR_IMON_DECODER=m +CONFIG_IR_RCMM_DECODER=m CONFIG_RC_DEVICES=y CONFIG_RC_ATI_REMOTE=m CONFIG_IR_ENE=m @@ -4855,6 +5015,7 @@ CONFIG_RC_LOOPBACK=m CONFIG_IR_SERIAL=m CONFIG_IR_SERIAL_TRANSMITTER=y CONFIG_IR_SIR=m +CONFIG_RC_XBOX_DVD=m CONFIG_MEDIA_SUPPORT=m # @@ -4870,12 +5031,12 @@ CONFIG_MEDIA_CEC_RC=y # CONFIG_CEC_PIN_ERROR_INJ is not set CONFIG_MEDIA_CONTROLLER=y CONFIG_MEDIA_CONTROLLER_DVB=y +CONFIG_MEDIA_CONTROLLER_REQUEST_API=y CONFIG_VIDEO_DEV=m CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_VIDEO_V4L2=m # CONFIG_VIDEO_ADV_DEBUG is not set # CONFIG_VIDEO_FIXED_MINOR_RANGES is not set -CONFIG_VIDEO_PCI_SKELETON=m CONFIG_VIDEO_TUNER=m CONFIG_V4L2_MEM2MEM_DEV=m CONFIG_V4L2_FLASH_LED_CLASS=m @@ -5019,7 +5180,6 @@ CONFIG_DVB_USB_PCTV452E=m CONFIG_DVB_USB_DW2102=m CONFIG_DVB_USB_CINERGY_T2=m CONFIG_DVB_USB_DTV5100=m -CONFIG_DVB_USB_FRIIO=m CONFIG_DVB_USB_AZ6027=m CONFIG_DVB_USB_TECHNISAT_USB2=m CONFIG_DVB_USB_V2=m @@ -5082,6 +5242,7 @@ CONFIG_VIDEO_IVTV=m # CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set CONFIG_VIDEO_IVTV_ALSA=m CONFIG_VIDEO_FB_IVTV=m +# CONFIG_VIDEO_FB_IVTV_FORCE_PAT is not set CONFIG_VIDEO_HEXIUM_GEMINI=m CONFIG_VIDEO_HEXIUM_ORION=m CONFIG_VIDEO_MXB=m @@ -5144,15 +5305,17 @@ CONFIG_VIDEO_CAFE_CCIC=m CONFIG_VIDEO_CADENCE=y CONFIG_VIDEO_CADENCE_CSI2RX=m CONFIG_VIDEO_CADENCE_CSI2TX=m -CONFIG_SOC_CAMERA=m -CONFIG_SOC_CAMERA_PLATFORM=m +CONFIG_VIDEO_ASPEED=m CONFIG_V4L_MEM2MEM_DRIVERS=y CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m CONFIG_VIDEO_SH_VEU=m # CONFIG_V4L_TEST_DRIVERS is not set CONFIG_DVB_PLATFORM_DRIVERS=y CONFIG_CEC_PLATFORM_DRIVERS=y +# CONFIG_VIDEO_CROS_EC_CEC is not set CONFIG_CEC_GPIO=m +CONFIG_VIDEO_SECO_CEC=m +# CONFIG_VIDEO_SECO_RC is not set CONFIG_SDR_PLATFORM_DRIVERS=y # @@ -5279,7 +5442,6 @@ CONFIG_VIDEO_ADV7175=m CONFIG_VIDEO_OV2640=m CONFIG_VIDEO_OV7640=m CONFIG_VIDEO_OV7670=m -CONFIG_VIDEO_MT9M111=m CONFIG_VIDEO_MT9V011=m # @@ -5307,24 +5469,6 @@ CONFIG_VIDEO_SAA6752HS=m CONFIG_VIDEO_M52790=m # -# Sensors used on soc_camera driver -# - -# -# soc_camera sensor drivers -# -CONFIG_SOC_CAMERA_MT9M001=m -CONFIG_SOC_CAMERA_MT9M111=m -CONFIG_SOC_CAMERA_MT9T112=m -CONFIG_SOC_CAMERA_MT9V022=m -CONFIG_SOC_CAMERA_OV5642=m -CONFIG_SOC_CAMERA_OV772X=m -CONFIG_SOC_CAMERA_OV9640=m -CONFIG_SOC_CAMERA_OV9740=m -CONFIG_SOC_CAMERA_RJ54N1=m -CONFIG_SOC_CAMERA_TW9910=m - -# # Media SPI Adapters # CONFIG_CXD2880_SPI_DRV=m @@ -5547,6 +5691,7 @@ CONFIG_DRM_KMS_FB_HELPER=y CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_FBDEV_OVERALLOC=100 # CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +# CONFIG_DRM_DP_CEC is not set CONFIG_DRM_TTM=m CONFIG_DRM_GEM_CMA_HELPER=y CONFIG_DRM_KMS_CMA_HELPER=y @@ -5560,6 +5705,10 @@ CONFIG_DRM_I2C_CH7006=m CONFIG_DRM_I2C_SIL164=m CONFIG_DRM_I2C_NXP_TDA998X=m CONFIG_DRM_I2C_NXP_TDA9950=m + +# +# ARM devices +# CONFIG_DRM_RADEON=m # CONFIG_DRM_RADEON_USERPTR is not set CONFIG_DRM_AMDGPU=m @@ -5577,9 +5726,10 @@ CONFIG_DRM_AMD_ACP=y # Display Engine Configuration # CONFIG_DRM_AMD_DC=y -# CONFIG_DRM_AMD_DC_FBC is not set CONFIG_DRM_AMD_DC_DCN1_0=y +CONFIG_DRM_AMD_DC_DCN1_01=y # CONFIG_DEBUG_KERNEL_DC is not set +CONFIG_HSA_AMD=y # # AMD Library routines @@ -5588,6 +5738,7 @@ CONFIG_CHASH=m # CONFIG_CHASH_STATS is not set # CONFIG_CHASH_SELFTEST is not set CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y CONFIG_NOUVEAU_DEBUG=5 CONFIG_NOUVEAU_DEBUG_DEFAULT=3 # CONFIG_NOUVEAU_DEBUG_MMU is not set @@ -5600,6 +5751,7 @@ CONFIG_DRM_I915_USERPTR=y CONFIG_DRM_I915_GVT=y CONFIG_DRM_I915_GVT_KVMGT=m CONFIG_DRM_VGEM=m +# CONFIG_DRM_VKMS is not set CONFIG_DRM_VMWGFX=m CONFIG_DRM_VMWGFX_FBCON=y CONFIG_DRM_GMA500=m @@ -5625,11 +5777,14 @@ CONFIG_DRM_PANEL_BRIDGE=y # Display Interface Bridges # CONFIG_DRM_ANALOGIX_ANX78XX=m -CONFIG_HSA_AMD=m +CONFIG_DRM_ETNAVIV=m +CONFIG_DRM_ETNAVIV_THERMAL=y CONFIG_DRM_HISI_HIBMC=m CONFIG_DRM_TINYDRM=m CONFIG_TINYDRM_MIPI_DBI=m +CONFIG_TINYDRM_HX8357D=m CONFIG_TINYDRM_ILI9225=m +CONFIG_TINYDRM_ILI9341=m CONFIG_TINYDRM_MI0283QT=m CONFIG_TINYDRM_REPAPER=m CONFIG_TINYDRM_ST7586=m @@ -5641,10 +5796,10 @@ CONFIG_DRM_LIB_RANDOM=y # # Frame buffer Devices # -CONFIG_FB=y -CONFIG_FIRMWARE_EDID=y CONFIG_FB_CMDLINE=y CONFIG_FB_NOTIFY=y +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y CONFIG_FB_BOOT_VESA_SUPPORT=y CONFIG_FB_CFB_FILLRECT=y CONFIG_FB_CFB_COPYAREA=y @@ -5658,7 +5813,7 @@ CONFIG_FB_BOTH_ENDIAN=y # CONFIG_FB_LITTLE_ENDIAN is not set CONFIG_FB_SYS_FOPS=m CONFIG_FB_DEFERRED_IO=y -CONFIG_FB_BACKLIGHT=y +CONFIG_FB_BACKLIGHT=m CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y @@ -5707,7 +5862,6 @@ CONFIG_FB_EFI=y # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set # CONFIG_FB_MB862XX is not set -# CONFIG_FB_BROADSHEET is not set # CONFIG_FB_HYPERV is not set CONFIG_FB_SIMPLE=y # CONFIG_FB_SM712 is not set @@ -5721,8 +5875,6 @@ CONFIG_LCD_ILI9320=m CONFIG_LCD_TDO24M=m CONFIG_LCD_VGG2432A4=m CONFIG_LCD_PLATFORM=m -CONFIG_LCD_S6E63M0=m -CONFIG_LCD_LD9040=m CONFIG_LCD_AMS369FG06=m CONFIG_LCD_LMS501KF03=m CONFIG_LCD_HX8357=m @@ -5763,6 +5915,7 @@ CONFIG_DUMMY_CONSOLE_ROWS=25 CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set CONFIG_LOGO=y CONFIG_LOGO_LINUX_MONO=y CONFIG_LOGO_LINUX_VGA16=y @@ -5923,6 +6076,7 @@ CONFIG_SND_HDA_GENERIC=m CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 CONFIG_SND_HDA_CORE=m CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y CONFIG_SND_HDA_I915=y CONFIG_SND_HDA_EXT_CORE=m CONFIG_SND_HDA_PREALLOC_SIZE=64 @@ -5964,6 +6118,7 @@ CONFIG_SND_SOC_ACPI=m CONFIG_SND_SOC_AMD_ACP=m CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH=m CONFIG_SND_SOC_AMD_CZ_RT5645_MACH=m +CONFIG_SND_SOC_AMD_ACP3x=m CONFIG_SND_ATMEL_SOC=m CONFIG_SND_DESIGNWARE_I2S=m CONFIG_SND_DESIGNWARE_PCM=y @@ -5980,6 +6135,7 @@ CONFIG_SND_SOC_FSL_SAI=m CONFIG_SND_SOC_FSL_SSI=m CONFIG_SND_SOC_FSL_SPDIF=m CONFIG_SND_SOC_FSL_ESAI=m +CONFIG_SND_SOC_FSL_MICFIL=m CONFIG_SND_SOC_IMX_AUDMUX=m CONFIG_SND_I2S_HI6210_I2S=m CONFIG_SND_SOC_IMG=y @@ -6000,8 +6156,17 @@ CONFIG_SND_SOC_INTEL_HASWELL=m CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI=m CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=m -CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m CONFIG_SND_SOC_INTEL_SKYLAKE=m +CONFIG_SND_SOC_INTEL_SKL=m +CONFIG_SND_SOC_INTEL_APL=m +CONFIG_SND_SOC_INTEL_KBL=m +CONFIG_SND_SOC_INTEL_GLK=m +CONFIG_SND_SOC_INTEL_CNL=m +CONFIG_SND_SOC_INTEL_CFL=m +CONFIG_SND_SOC_INTEL_SKYLAKE_FAMILY=m +CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m +# CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC is not set +CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON=m CONFIG_SND_SOC_ACPI_INTEL_MATCH=m CONFIG_SND_SOC_INTEL_MACH=y CONFIG_SND_SOC_INTEL_HASWELL_MACH=m @@ -6024,10 +6189,17 @@ CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH=m +CONFIG_SND_SOC_INTEL_KBL_RT5660_MACH=m +# CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH is not set +CONFIG_SND_SOC_MTK_BTCVSD=m # # STMicroelectronics STM32 SOC audio support # +CONFIG_SND_SOC_XILINX_I2S=m +CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=m +CONFIG_SND_SOC_XILINX_SPDIF=m CONFIG_SND_SOC_XTFPGA_I2S=m CONFIG_ZX_TDM=m CONFIG_SND_SOC_I2C_AND_SPI=m @@ -6044,6 +6216,7 @@ CONFIG_SND_SOC_ADAU1761_I2C=m CONFIG_SND_SOC_ADAU1761_SPI=m CONFIG_SND_SOC_ADAU7002=m CONFIG_SND_SOC_AK4104=m +CONFIG_SND_SOC_AK4118=m CONFIG_SND_SOC_AK4458=m CONFIG_SND_SOC_AK4554=m CONFIG_SND_SOC_AK4613=m @@ -6053,10 +6226,12 @@ CONFIG_SND_SOC_AK5558=m CONFIG_SND_SOC_ALC5623=m CONFIG_SND_SOC_BD28623=m # CONFIG_SND_SOC_BT_SCO is not set +CONFIG_SND_SOC_CROS_EC_CODEC=m CONFIG_SND_SOC_CS35L32=m CONFIG_SND_SOC_CS35L33=m CONFIG_SND_SOC_CS35L34=m CONFIG_SND_SOC_CS35L35=m +CONFIG_SND_SOC_CS35L36=m CONFIG_SND_SOC_CS42L42=m CONFIG_SND_SOC_CS42L51=m CONFIG_SND_SOC_CS42L51_I2C=m @@ -6071,14 +6246,15 @@ CONFIG_SND_SOC_CS4271_SPI=m CONFIG_SND_SOC_CS42XX8=m CONFIG_SND_SOC_CS42XX8_I2C=m CONFIG_SND_SOC_CS43130=m +CONFIG_SND_SOC_CS4341=m CONFIG_SND_SOC_CS4349=m CONFIG_SND_SOC_CS53L30=m CONFIG_SND_SOC_DA7213=m CONFIG_SND_SOC_DA7219=m -CONFIG_SND_SOC_DIO2125=m CONFIG_SND_SOC_DMIC=m CONFIG_SND_SOC_HDMI_CODEC=m CONFIG_SND_SOC_ES7134=m +# CONFIG_SND_SOC_ES7241 is not set CONFIG_SND_SOC_ES8316=m CONFIG_SND_SOC_ES8328=m CONFIG_SND_SOC_ES8328_I2C=m @@ -6086,6 +6262,7 @@ CONFIG_SND_SOC_ES8328_SPI=m CONFIG_SND_SOC_GTM601=m CONFIG_SND_SOC_HDAC_HDMI=m CONFIG_SND_SOC_INNO_RK3036=m +CONFIG_SND_SOC_MAX98088=m CONFIG_SND_SOC_MAX98090=m CONFIG_SND_SOC_MAX98357A=m CONFIG_SND_SOC_MAX98504=m @@ -6104,12 +6281,16 @@ CONFIG_SND_SOC_PCM179X_SPI=m CONFIG_SND_SOC_PCM186X=m CONFIG_SND_SOC_PCM186X_I2C=m CONFIG_SND_SOC_PCM186X_SPI=m +CONFIG_SND_SOC_PCM3060=m +CONFIG_SND_SOC_PCM3060_I2C=m +CONFIG_SND_SOC_PCM3060_SPI=m CONFIG_SND_SOC_PCM3168A=m CONFIG_SND_SOC_PCM3168A_I2C=m CONFIG_SND_SOC_PCM3168A_SPI=m CONFIG_SND_SOC_PCM512x=m CONFIG_SND_SOC_PCM512x_I2C=m CONFIG_SND_SOC_PCM512x_SPI=m +CONFIG_SND_SOC_RK3328=m CONFIG_SND_SOC_RL6231=m CONFIG_SND_SOC_RL6347A=m CONFIG_SND_SOC_RT286=m @@ -6121,6 +6302,7 @@ CONFIG_SND_SOC_RT5631=m CONFIG_SND_SOC_RT5640=m CONFIG_SND_SOC_RT5645=m CONFIG_SND_SOC_RT5651=m +CONFIG_SND_SOC_RT5660=m CONFIG_SND_SOC_RT5663=m CONFIG_SND_SOC_RT5670=m CONFIG_SND_SOC_RT5677=m @@ -6130,6 +6312,7 @@ CONFIG_SND_SOC_SI476X=m CONFIG_SND_SOC_SIGMADSP=m CONFIG_SND_SOC_SIGMADSP_I2C=m CONFIG_SND_SOC_SIGMADSP_REGMAP=m +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set CONFIG_SND_SOC_SIRF_AUDIO_CODEC=m CONFIG_SND_SOC_SPDIF=m CONFIG_SND_SOC_SSM2305=m @@ -6158,6 +6341,7 @@ CONFIG_SND_SOC_TLV320AIC3X=m CONFIG_SND_SOC_TS3A227E=m CONFIG_SND_SOC_TSCS42XX=m CONFIG_SND_SOC_TSCS454=m +CONFIG_SND_SOC_WCD9335=m CONFIG_SND_SOC_WM8510=m CONFIG_SND_SOC_WM8523=m CONFIG_SND_SOC_WM8524=m @@ -6176,6 +6360,7 @@ CONFIG_SND_SOC_WM8804=m CONFIG_SND_SOC_WM8804_I2C=m CONFIG_SND_SOC_WM8804_SPI=m CONFIG_SND_SOC_WM8903=m +CONFIG_SND_SOC_WM8904=m CONFIG_SND_SOC_WM8960=m CONFIG_SND_SOC_WM8962=m CONFIG_SND_SOC_WM8974=m @@ -6184,8 +6369,10 @@ CONFIG_SND_SOC_WM8985=m CONFIG_SND_SOC_ZX_AUD96P22=m CONFIG_SND_SOC_MAX9759=m CONFIG_SND_SOC_MT6351=m +CONFIG_SND_SOC_MT6358=m CONFIG_SND_SOC_NAU8540=m CONFIG_SND_SOC_NAU8810=m +CONFIG_SND_SOC_NAU8822=m CONFIG_SND_SOC_NAU8824=m CONFIG_SND_SOC_NAU8825=m CONFIG_SND_SOC_TPA6130A2=m @@ -6218,9 +6405,11 @@ CONFIG_HID_ASUS=m CONFIG_HID_AUREAL=m CONFIG_HID_BELKIN=m CONFIG_HID_BETOP_FF=m +CONFIG_HID_BIGBEN_FF=m CONFIG_HID_CHERRY=m CONFIG_HID_CHICONY=m CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set CONFIG_HID_PRODIKEYS=m CONFIG_HID_CMEDIA=m CONFIG_HID_CP2112=m @@ -6242,6 +6431,7 @@ CONFIG_HID_KEYTOUCH=m CONFIG_HID_KYE=m CONFIG_HID_UCLOGIC=m CONFIG_HID_WALTOP=m +CONFIG_HID_VIEWSONIC=m CONFIG_HID_GYRATION=m CONFIG_HID_ICADE=m CONFIG_HID_ITE=m @@ -6259,6 +6449,7 @@ CONFIG_LOGIRUMBLEPAD2_FF=y CONFIG_LOGIG940_FF=y CONFIG_LOGIWHEELS_FF=y CONFIG_HID_MAGICMOUSE=m +CONFIG_HID_MALTRON=m CONFIG_HID_MAYFLASH=m CONFIG_HID_REDRAGON=m CONFIG_HID_MICROSOFT=m @@ -6345,6 +6536,7 @@ CONFIG_USB_OTG=y # CONFIG_USB_OTG_BLACKLIST_HUB is not set CONFIG_USB_OTG_FSM=m CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 CONFIG_USB_MON=m CONFIG_USB_WUSB=m CONFIG_USB_WUSB_CBAF=m @@ -6362,6 +6554,7 @@ CONFIG_USB_EHCI_HCD=m CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_TT_NEWSCHED=y CONFIG_USB_EHCI_PCI=m +CONFIG_USB_EHCI_FSL=m CONFIG_USB_EHCI_HCD_PLATFORM=m CONFIG_USB_OXU210HP_HCD=m CONFIG_USB_ISP116X_HCD=m @@ -6451,6 +6644,7 @@ CONFIG_USB_DWC3_DUAL_ROLE=y # Platform Glue Driver Support # CONFIG_USB_DWC3_PCI=m +CONFIG_USB_DWC3_HAPS=m CONFIG_USB_DWC2=m # CONFIG_USB_DWC2_HOST is not set @@ -6685,8 +6879,11 @@ CONFIG_USB_G_HID=m CONFIG_USB_G_WEBCAM=m CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m CONFIG_TYPEC_FUSB302=m CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_CCG=m CONFIG_UCSI_ACPI=m CONFIG_TYPEC_TPS6598X=m @@ -6694,10 +6891,15 @@ CONFIG_TYPEC_TPS6598X=m # USB Type-C Multiplexer/DeMultiplexer Switch support # CONFIG_TYPEC_MUX_PI3USB30532=m + +# +# USB Type-C Alternate Mode drivers +# +# CONFIG_TYPEC_DP_ALTMODE is not set +CONFIG_USB_ROLE_SWITCH=m CONFIG_USB_ROLES_INTEL_XHCI=m CONFIG_USB_LED_TRIG=y CONFIG_USB_ULPI_BUS=m -CONFIG_USB_ROLE_SWITCH=m CONFIG_UWB=m CONFIG_UWB_HWA=m CONFIG_UWB_WHCI=m @@ -6719,6 +6921,7 @@ CONFIG_MMC_SDHCI_ACPI=m CONFIG_MMC_SDHCI_PLTFM=m CONFIG_MMC_SDHCI_F_SDH30=m CONFIG_MMC_WBSD=m +CONFIG_MMC_ALCOR=m CONFIG_MMC_TIFM_SD=m CONFIG_MMC_SPI=m CONFIG_MMC_SDRICOH_CS=m @@ -6825,6 +7028,8 @@ CONFIG_LEDS_TRIGGER_TRANSIENT=m CONFIG_LEDS_TRIGGER_CAMERA=m CONFIG_LEDS_TRIGGER_PANIC=y CONFIG_LEDS_TRIGGER_NETDEV=m +CONFIG_LEDS_TRIGGER_PATTERN=m +CONFIG_LEDS_TRIGGER_AUDIO=m CONFIG_ACCESSIBILITY=y CONFIG_A11Y_BRAILLE_CONSOLE=y CONFIG_INFINIBAND=m @@ -6849,6 +7054,13 @@ CONFIG_INFINIBAND_NES=m CONFIG_INFINIBAND_OCRDMA=m CONFIG_INFINIBAND_VMWARE_PVRDMA=m CONFIG_INFINIBAND_USNIC=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_HFI1=m +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set +# CONFIG_SDMA_VERBOSITY is not set +CONFIG_INFINIBAND_QEDR=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_IPOIB_CM=y CONFIG_INFINIBAND_IPOIB_DEBUG=y @@ -6858,13 +7070,6 @@ CONFIG_INFINIBAND_SRPT=m CONFIG_INFINIBAND_ISER=m CONFIG_INFINIBAND_ISERT=m CONFIG_INFINIBAND_OPA_VNIC=m -CONFIG_INFINIBAND_RDMAVT=m -CONFIG_RDMA_RXE=m -CONFIG_INFINIBAND_HFI1=m -# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set -# CONFIG_SDMA_VERBOSITY is not set -CONFIG_INFINIBAND_QEDR=m -CONFIG_INFINIBAND_BNXT_RE=m CONFIG_EDAC_ATOMIC_SCRUB=y CONFIG_EDAC_SUPPORT=y CONFIG_EDAC=y @@ -6887,6 +7092,7 @@ CONFIG_EDAC_I5100=m CONFIG_EDAC_I7300=m CONFIG_EDAC_SBRIDGE=m CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m CONFIG_EDAC_PND2=m CONFIG_RTC_LIB=y CONFIG_RTC_MC146818_LIB=y @@ -6912,9 +7118,9 @@ CONFIG_RTC_INTF_DEV_UIE_EMUL=y # CONFIG_RTC_DRV_88PM80X=m CONFIG_RTC_DRV_ABB5ZES3=m +CONFIG_RTC_DRV_ABEOZ9=m CONFIG_RTC_DRV_ABX80X=m CONFIG_RTC_DRV_DS1307=m -CONFIG_RTC_DRV_DS1307_HWMON=y # CONFIG_RTC_DRV_DS1307_CENTURY is not set CONFIG_RTC_DRV_DS1374=m CONFIG_RTC_DRV_DS1374_WDT=y @@ -6939,7 +7145,9 @@ CONFIG_RTC_DRV_RX8010=m CONFIG_RTC_DRV_RX8581=m CONFIG_RTC_DRV_RX8025=m CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV3028=m CONFIG_RTC_DRV_RV8803=m +CONFIG_RTC_DRV_SD3078=m # # SPI RTC drivers @@ -6983,8 +7191,6 @@ CONFIG_RTC_DRV_DS1685=y # CONFIG_RTC_DRV_DS17285 is not set # CONFIG_RTC_DRV_DS17485 is not set # CONFIG_RTC_DRV_DS17885 is not set -CONFIG_RTC_DS1685_PROC_REGS=y -CONFIG_RTC_DS1685_SYSFS_REGS=y CONFIG_RTC_DRV_DS1742=m CONFIG_RTC_DRV_DS2404=m CONFIG_RTC_DRV_DA9052=m @@ -7013,6 +7219,7 @@ CONFIG_RTC_DRV_MT6397=m # HID Sensor RTC drivers # CONFIG_RTC_DRV_HID_SENSOR_TIME=m +CONFIG_RTC_DRV_WILCO_EC=m CONFIG_DMADEVICES=y # CONFIG_DMADEVICES_DEBUG is not set @@ -7045,6 +7252,7 @@ CONFIG_DMA_ENGINE_RAID=y # CONFIG_SYNC_FILE=y # CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set CONFIG_DCA=m CONFIG_AUXDISPLAY=y CONFIG_HD44780=m @@ -7054,10 +7262,14 @@ CONFIG_KS0108_DELAY=2 CONFIG_CFAG12864B=m CONFIG_CFAG12864B_RATE=20 CONFIG_IMG_ASCII_LCD=m -CONFIG_PANEL=m +CONFIG_PARPORT_PANEL=m CONFIG_PANEL_PARPORT=0 CONFIG_PANEL_PROFILE=5 # CONFIG_PANEL_CHANGE_MESSAGE is not set +# CONFIG_CHARLCD_BL_OFF is not set +CONFIG_CHARLCD_BL_ON=y +# CONFIG_CHARLCD_BL_FLASH is not set +CONFIG_PANEL=m CONFIG_CHARLCD=m CONFIG_UIO=m CONFIG_UIO_CIF=m @@ -7110,7 +7322,6 @@ CONFIG_COMEDI_MISC_DRIVERS=y CONFIG_COMEDI_BOND=m CONFIG_COMEDI_TEST=m CONFIG_COMEDI_PARPORT=m -CONFIG_COMEDI_SERIAL2002=m CONFIG_COMEDI_ISA_DRIVERS=y CONFIG_COMEDI_PCL711=m CONFIG_COMEDI_PCL724=m @@ -7240,6 +7451,7 @@ CONFIG_COMEDI_ISADMA=m CONFIG_COMEDI_NI_LABPC=m CONFIG_COMEDI_NI_LABPC_ISADMA=m CONFIG_COMEDI_NI_TIO=m +CONFIG_COMEDI_NI_ROUTING=m CONFIG_RTL8192U=m CONFIG_RTLLIB=m CONFIG_RTLLIB_CRYPTO_CCMP=m @@ -7269,9 +7481,6 @@ CONFIG_ADIS16240=m # # Analog to digital converters # -CONFIG_AD7606=m -CONFIG_AD7606_IFACE_PARALLEL=m -CONFIG_AD7606_IFACE_SPI=m CONFIG_AD7780=m CONFIG_AD7816=m CONFIG_AD7192=m @@ -7288,7 +7497,6 @@ CONFIG_ADT7316_I2C=m # Capacitance to digital converters # CONFIG_AD7150=m -CONFIG_AD7152=m CONFIG_AD7746=m # @@ -7298,11 +7506,6 @@ CONFIG_AD9832=m CONFIG_AD9834=m # -# Digital gyroscope sensors -# -CONFIG_ADIS16060=m - -# # Network Analyzer, Impedance Converters # CONFIG_AD5933=m @@ -7317,10 +7520,8 @@ CONFIG_ADE7854_SPI=m # # Resolver to digital converters # -CONFIG_AD2S90=m CONFIG_AD2S1210=m CONFIG_FB_SM750=m -CONFIG_FB_XGI=m # # Speakup console speech @@ -7339,8 +7540,6 @@ CONFIG_SPEAKUP_SYNTH_TXPRT=m # CONFIG_SPEAKUP_SYNTH_DUMMY is not set CONFIG_STAGING_MEDIA=y CONFIG_I2C_BCM2048=m -CONFIG_SOC_CAMERA_IMX074=m -CONFIG_SOC_CAMERA_MT9T031=m CONFIG_VIDEO_ZORAN=m CONFIG_VIDEO_ZORAN_DC30=m CONFIG_VIDEO_ZORAN_ZR36060=m @@ -7349,6 +7548,11 @@ CONFIG_VIDEO_ZORAN_DC10=m CONFIG_VIDEO_ZORAN_LML33=m CONFIG_VIDEO_ZORAN_LML33R10=m CONFIG_VIDEO_ZORAN_AVS6EYES=m +CONFIG_VIDEO_IPU3_IMGU=m + +# +# soc_camera sensor drivers +# # # Android @@ -7357,11 +7561,7 @@ CONFIG_LTE_GDM724X=m CONFIG_FIREWIRE_SERIAL=m CONFIG_FWTTY_MAX_TOTAL_PORTS=64 CONFIG_FWTTY_MAX_CARD_PORTS=32 -CONFIG_MTD_SPINAND_MT29F=m -CONFIG_MTD_SPINAND_ONDIEECC=y -CONFIG_DGNC=m CONFIG_GS_FPGABOOT=m -CONFIG_CRYPTO_SKEIN=m CONFIG_UNISYSSPAR=y CONFIG_FB_TFT=m CONFIG_FB_TFT_AGM1264K_FL=m @@ -7429,23 +7629,21 @@ CONFIG_GREYBUS_SDIO=m CONFIG_GREYBUS_SPI=m CONFIG_GREYBUS_UART=m CONFIG_GREYBUS_USB=m +CONFIG_DRM_VBOXVIDEO=m +CONFIG_PI433=m # -# USB Power Delivery and Type-C drivers +# Gasket devices # -CONFIG_TYPEC_TCPCI=m -CONFIG_TYPEC_RT1711H=m -# CONFIG_DRM_VBOXVIDEO is not set -CONFIG_PI433=m -CONFIG_MTK_MMC=m -# CONFIG_MTK_AEE_KDUMP is not set -# CONFIG_MTK_MMC_CD_POLL is not set +# CONFIG_STAGING_GASKET_FRAMEWORK is not set +# CONFIG_EROFS_FS is not set CONFIG_X86_PLATFORM_DEVICES=y CONFIG_ACER_WMI=m CONFIG_ACER_WIRELESS=m CONFIG_ACERHDF=m CONFIG_ALIENWARE_WMI=m CONFIG_ASUS_LAPTOP=m +CONFIG_DCDBAS=m CONFIG_DELL_SMBIOS=m CONFIG_DELL_SMBIOS_WMI=y CONFIG_DELL_SMBIOS_SMM=y @@ -7456,6 +7654,7 @@ CONFIG_DELL_WMI_AIO=m CONFIG_DELL_WMI_LED=m CONFIG_DELL_SMO8800=m CONFIG_DELL_RBTN=m +CONFIG_DELL_RBU=m CONFIG_FUJITSU_LAPTOP=m CONFIG_FUJITSU_TABLET=m CONFIG_AMILO_RFKILL=m @@ -7463,6 +7662,7 @@ CONFIG_GPD_POCKET_FAN=m CONFIG_HP_ACCEL=m CONFIG_HP_WIRELESS=m CONFIG_HP_WMI=m +CONFIG_LG_LAPTOP=m CONFIG_MSI_LAPTOP=m CONFIG_PANASONIC_LAPTOP=m CONFIG_COMPAL_LAPTOP=m @@ -7509,7 +7709,6 @@ CONFIG_SAMSUNG_Q10=m CONFIG_APPLE_GMUX=m CONFIG_INTEL_RST=m CONFIG_INTEL_SMARTCONNECT=m -CONFIG_PVPANIC=m CONFIG_INTEL_PMC_IPC=m CONFIG_INTEL_BXTWC_PMIC_TMU=m CONFIG_SURFACE_PRO3_BUTTON=m @@ -7519,17 +7718,26 @@ CONFIG_INTEL_TELEMETRY=m CONFIG_MLX_PLATFORM=m # CONFIG_INTEL_TURBO_MAX_3 is not set CONFIG_INTEL_CHTDC_TI_PWRBTN=m +# CONFIG_I2C_MULTI_INSTANTIATE is not set +CONFIG_INTEL_ATOMISP2_PM=m +CONFIG_HUAWEI_WMI=m +CONFIG_PCENGINES_APU2=m CONFIG_PMC_ATOM=y CONFIG_CHROME_PLATFORMS=y CONFIG_CHROMEOS_LAPTOP=m CONFIG_CHROMEOS_PSTORE=m CONFIG_CHROMEOS_TBMC=m +# CONFIG_CROS_EC_I2C is not set +# CONFIG_CROS_EC_SPI is not set CONFIG_CROS_EC_LPC=m CONFIG_CROS_EC_LPC_MEC=y CONFIG_CROS_EC_PROTO=y CONFIG_CROS_KBD_LED_BACKLIGHT=m +CONFIG_WILCO_EC=m +# CONFIG_WILCO_EC_DEBUGFS is not set CONFIG_MELLANOX_PLATFORM=y CONFIG_MLXREG_HOTPLUG=m +# CONFIG_MLXREG_IO is not set CONFIG_CLKDEV_LOOKUP=y CONFIG_HAVE_CLK_PREPARE=y CONFIG_COMMON_CLK=y @@ -7538,6 +7746,7 @@ CONFIG_COMMON_CLK=y # Common Clock Framework # CONFIG_COMMON_CLK_WM831X=m +# CONFIG_COMMON_CLK_MAX9485 is not set CONFIG_COMMON_CLK_SI5351=m CONFIG_COMMON_CLK_SI544=m CONFIG_COMMON_CLK_CDCE706=m @@ -7554,13 +7763,15 @@ CONFIG_CLKBLD_I8253=y CONFIG_MAILBOX=y CONFIG_PCC=y CONFIG_ALTERA_MBOX=m +CONFIG_IOMMU_IOVA=y CONFIG_IOMMU_API=y CONFIG_IOMMU_SUPPORT=y # # Generic IOMMU Pagetable Support # -CONFIG_IOMMU_IOVA=y +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_AMD_IOMMU=y CONFIG_AMD_IOMMU_V2=m CONFIG_DMAR_TABLE=y @@ -7569,6 +7780,7 @@ CONFIG_INTEL_IOMMU_SVM=y # CONFIG_INTEL_IOMMU_DEFAULT_ON is not set CONFIG_INTEL_IOMMU_FLOPPY_WA=y CONFIG_IRQ_REMAP=y +CONFIG_HYPERV_IOMMU=y # # Remoteproc drivers @@ -7605,6 +7817,10 @@ CONFIG_SOUNDWIRE_INTEL=m # # +# NXP/Freescale QorIQ SoC drivers +# + +# # i.MX SoC drivers # @@ -7645,6 +7861,7 @@ CONFIG_EXTCON_GPIO=m CONFIG_EXTCON_MAX14577=m CONFIG_EXTCON_MAX3355=m CONFIG_EXTCON_MAX77693=m +CONFIG_EXTCON_PTN5150=m CONFIG_EXTCON_RT8973A=m CONFIG_EXTCON_SM5502=m # CONFIG_EXTCON_USB_GPIO is not set @@ -7668,6 +7885,9 @@ CONFIG_IIO_TRIGGERED_EVENT=m # CONFIG_ADIS16201=m CONFIG_ADIS16209=m +CONFIG_ADXL372=m +CONFIG_ADXL372_SPI=m +CONFIG_ADXL372_I2C=m CONFIG_BMA180=m CONFIG_BMA220=m CONFIG_BMC150_ACCEL=m @@ -7705,15 +7925,21 @@ CONFIG_STK8BA50=m # Analog to digital converters # CONFIG_AD_SIGMA_DELTA=m +CONFIG_AD7124=m CONFIG_AD7266=m CONFIG_AD7291=m CONFIG_AD7298=m CONFIG_AD7476=m +CONFIG_AD7606=m +CONFIG_AD7606_IFACE_PARALLEL=m +CONFIG_AD7606_IFACE_SPI=m CONFIG_AD7766=m +CONFIG_AD7768_1=m CONFIG_AD7791=m CONFIG_AD7793=m CONFIG_AD7887=m CONFIG_AD7923=m +CONFIG_AD7949=m CONFIG_AD799X=m CONFIG_AXP20X_ADC=m CONFIG_AXP288_ADC=m @@ -7733,11 +7959,13 @@ CONFIG_MAX1363=m CONFIG_MAX9611=m CONFIG_MCP320X=m CONFIG_MCP3422=m +CONFIG_MCP3911=m CONFIG_MEN_Z188_ADC=m CONFIG_NAU7802=m CONFIG_QCOM_VADC_COMMON=m CONFIG_QCOM_SPMI_IADC=m CONFIG_QCOM_SPMI_VADC=m +CONFIG_QCOM_SPMI_ADC5=m CONFIG_TI_ADC081C=m CONFIG_TI_ADC0832=m CONFIG_TI_ADC084S021=m @@ -7764,8 +7992,12 @@ CONFIG_AD8366=m # Chemical Sensors # CONFIG_ATLAS_PH_SENSOR=m +# CONFIG_BME680 is not set CONFIG_CCS811=m CONFIG_IAQCORE=m +CONFIG_PMS7003=m +CONFIG_SENSIRION_SGP30=m +CONFIG_SPS30=m CONFIG_VZ89X=m CONFIG_IIO_CROS_EC_SENSORS_CORE=m CONFIG_IIO_CROS_EC_SENSORS=m @@ -7804,11 +8036,13 @@ CONFIG_AD5592R=m CONFIG_AD5593R=m CONFIG_AD5504=m CONFIG_AD5624R_SPI=m +CONFIG_LTC1660=m CONFIG_LTC2632=m CONFIG_AD5686=m CONFIG_AD5686_SPI=m CONFIG_AD5696_I2C=m CONFIG_AD5755=m +# CONFIG_AD5758 is not set CONFIG_AD5761=m CONFIG_AD5764=m CONFIG_AD5791=m @@ -7821,6 +8055,8 @@ CONFIG_MCP4725=m CONFIG_MCP4922=m CONFIG_TI_DAC082S085=m CONFIG_TI_DAC5571=m +CONFIG_TI_DAC7311=m +CONFIG_TI_DAC7612=m # # IIO dummy driver @@ -7931,8 +8167,10 @@ CONFIG_SENSORS_LM3533=m CONFIG_LTR501=m CONFIG_LV0104CS=m CONFIG_MAX44000=m +CONFIG_MAX44009=m CONFIG_OPT3001=m CONFIG_PA12203001=m +# CONFIG_SI1133 is not set CONFIG_SI1145=m CONFIG_STK3310=m CONFIG_ST_UVIS25=m @@ -7946,6 +8184,7 @@ CONFIG_TSL2772=m CONFIG_TSL4531=m CONFIG_US5182D=m CONFIG_VCNL4000=m +CONFIG_VCNL4035=m CONFIG_VEML6070=m CONFIG_VL6180=m CONFIG_ZOPT2201=m @@ -7967,6 +8206,9 @@ CONFIG_IIO_ST_MAGN_SPI_3AXIS=m CONFIG_SENSORS_HMC5843=m CONFIG_SENSORS_HMC5843_I2C=m CONFIG_SENSORS_HMC5843_SPI=m +CONFIG_SENSORS_RM3100=m +CONFIG_SENSORS_RM3100_I2C=m +CONFIG_SENSORS_RM3100_SPI=m # # Multiplexers @@ -7996,6 +8238,7 @@ CONFIG_MAX5487=m CONFIG_MCP4018=m CONFIG_MCP4131=m CONFIG_MCP4531=m +CONFIG_MCP41010=m CONFIG_TPL0102=m # @@ -8038,15 +8281,18 @@ CONFIG_AS3935=m # # Proximity and distance sensors # +# CONFIG_ISL29501 is not set CONFIG_LIDAR_LITE_V2=m CONFIG_RFD77402=m CONFIG_SRF04=m CONFIG_SX9500=m CONFIG_SRF08=m +CONFIG_VL53L0X_I2C=m # # Resolver to digital converters # +CONFIG_AD2S90=m CONFIG_AD2S1200=m # @@ -8125,6 +8371,7 @@ CONFIG_PHY_SAMSUNG_USB2=m CONFIG_PHY_TUSB1210=m CONFIG_POWERCAP=y CONFIG_INTEL_RAPL=m +# CONFIG_IDLE_INJECT is not set CONFIG_MCB=m CONFIG_MCB_PCI=m CONFIG_MCB_LPC=m @@ -8146,9 +8393,11 @@ CONFIG_ND_BLK=m CONFIG_ND_CLAIM=y CONFIG_ND_BTT=m CONFIG_BTT=y +CONFIG_NVDIMM_KEYS=y CONFIG_DAX_DRIVER=y CONFIG_DAX=y CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_KMEM=m CONFIG_NVMEM=y CONFIG_RAVE_SP_EEPROM=m @@ -8156,6 +8405,8 @@ CONFIG_RAVE_SP_EEPROM=m # HW tracing support # CONFIG_STM=m +CONFIG_STM_PROTO_BASIC=m +CONFIG_STM_PROTO_SYS_T=m # CONFIG_STM_DUMMY is not set CONFIG_STM_SOURCE_CONSOLE=m CONFIG_STM_SOURCE_HEARTBEAT=m @@ -8175,67 +8426,23 @@ CONFIG_FPGA_MGR_ALTERA_CVP=m CONFIG_FPGA_MGR_XILINX_SPI=m CONFIG_FPGA_MGR_MACHXO2_SPI=m CONFIG_FPGA_BRIDGE=m +CONFIG_ALTERA_FREEZE_BRIDGE=m CONFIG_XILINX_PR_DECOUPLER=m CONFIG_FPGA_REGION=m +# CONFIG_FPGA_DFL is not set CONFIG_PM_OPP=y # CONFIG_UNISYS_VISORBUS is not set CONFIG_SIOX=m CONFIG_SIOX_BUS_GPIO=m CONFIG_SLIMBUS=m CONFIG_SLIM_QCOM_CTRL=m - -# -# Firmware Drivers -# -CONFIG_EDD=m -# CONFIG_EDD_OFF is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_DELL_RBU=m -CONFIG_DCDBAS=m -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=m -CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y -CONFIG_ISCSI_IBFT_FIND=y -CONFIG_ISCSI_IBFT=m -CONFIG_FW_CFG_SYSFS=m -# CONFIG_FW_CFG_SYSFS_CMDLINE is not set -CONFIG_GOOGLE_FIRMWARE=y -CONFIG_GOOGLE_SMI=m -CONFIG_GOOGLE_COREBOOT_TABLE=m -CONFIG_GOOGLE_COREBOOT_TABLE_ACPI=m -CONFIG_GOOGLE_MEMCONSOLE=m -CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY=m -CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT=m -CONFIG_GOOGLE_MEMCONSOLE_COREBOOT=m -CONFIG_GOOGLE_VPD=m - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_VARS=m -CONFIG_EFI_ESRT=y -CONFIG_EFI_VARS_PSTORE=m -CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y -CONFIG_EFI_RUNTIME_MAP=y -# CONFIG_EFI_FAKE_MEMMAP is not set -CONFIG_EFI_RUNTIME_WRAPPERS=y -CONFIG_EFI_BOOTLOADER_CONTROL=m -CONFIG_EFI_CAPSULE_LOADER=m -CONFIG_EFI_TEST=m -CONFIG_APPLE_PROPERTIES=y -CONFIG_RESET_ATTACK_MITIGATION=y -CONFIG_UEFI_CPER=y -CONFIG_UEFI_CPER_X86=y -CONFIG_EFI_DEV_PATH_PARSER=y - -# -# Tegra firmware driver -# +CONFIG_INTERCONNECT=m # # File systems # CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_VALIDATE_FS_PARSER=y CONFIG_FS_IOMAP=y CONFIG_EXT2_FS=m CONFIG_EXT2_FS_XATTR=y @@ -8247,8 +8454,6 @@ CONFIG_EXT3_FS_SECURITY=y CONFIG_EXT4_FS=m CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y # CONFIG_EXT4_DEBUG is not set CONFIG_JBD2=m # CONFIG_JBD2_DEBUG is not set @@ -8294,7 +8499,6 @@ CONFIG_F2FS_FS_XATTR=y CONFIG_F2FS_FS_POSIX_ACL=y CONFIG_F2FS_FS_SECURITY=y CONFIG_F2FS_CHECK_FS=y -CONFIG_F2FS_FS_ENCRYPTION=y # CONFIG_F2FS_IO_TRACE is not set # CONFIG_F2FS_FAULT_INJECTION is not set CONFIG_FS_DAX=y @@ -8303,7 +8507,7 @@ CONFIG_EXPORTFS=y CONFIG_EXPORTFS_BLOCK_OPS=y CONFIG_FILE_LOCKING=y CONFIG_MANDATORY_FILE_LOCKING=y -CONFIG_FS_ENCRYPTION=m +CONFIG_FS_ENCRYPTION=y CONFIG_FSNOTIFY=y CONFIG_DNOTIFY=y CONFIG_INOTIFY_USER=y @@ -8328,6 +8532,7 @@ CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y CONFIG_OVERLAY_FS_INDEX=y CONFIG_OVERLAY_FS_NFS_EXPORT=y # CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set # # Caches @@ -8390,7 +8595,6 @@ CONFIG_ECRYPT_FS=m CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m -CONFIG_HFSPLUS_FS_POSIX_ACL=y CONFIG_BEFS_FS=m # CONFIG_BEFS_DEBUG is not set CONFIG_BFS_FS=m @@ -8417,8 +8621,9 @@ CONFIG_UBIFS_FS_ADVANCED_COMPR=y CONFIG_UBIFS_FS_LZO=y CONFIG_UBIFS_FS_ZLIB=y CONFIG_UBIFS_ATIME_SUPPORT=y -CONFIG_UBIFS_FS_ENCRYPTION=y +CONFIG_UBIFS_FS_XATTR=y CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_UBIFS_FS_AUTHENTICATION is not set CONFIG_CRAMFS=m CONFIG_CRAMFS_BLOCKDEV=y CONFIG_CRAMFS_MTD=y @@ -8453,15 +8658,18 @@ CONFIG_ROMFS_ON_MTD=y CONFIG_PSTORE=y CONFIG_PSTORE_DEFLATE_COMPRESS=y CONFIG_PSTORE_LZO_COMPRESS=y -# CONFIG_PSTORE_LZ4_COMPRESS is not set -CONFIG_PSTORE_LZ4HC_COMPRESS=m +CONFIG_PSTORE_LZ4_COMPRESS=y +CONFIG_PSTORE_LZ4HC_COMPRESS=y CONFIG_PSTORE_842_COMPRESS=y +CONFIG_PSTORE_ZSTD_COMPRESS=y CONFIG_PSTORE_COMPRESS=y -CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +# CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT is not set # CONFIG_PSTORE_LZO_COMPRESS_DEFAULT is not set +CONFIG_PSTORE_LZ4_COMPRESS_DEFAULT=y # CONFIG_PSTORE_LZ4HC_COMPRESS_DEFAULT is not set # CONFIG_PSTORE_842_COMPRESS_DEFAULT is not set -CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT is not set +CONFIG_PSTORE_COMPRESS_DEFAULT="lz4" # CONFIG_PSTORE_CONSOLE is not set CONFIG_PSTORE_PMSG=y # CONFIG_PSTORE_FTRACE is not set @@ -8470,9 +8678,6 @@ CONFIG_SYSV_FS=m CONFIG_UFS_FS=m # CONFIG_UFS_FS_WRITE is not set # CONFIG_UFS_DEBUG is not set -CONFIG_EXOFS_FS=m -# CONFIG_EXOFS_DEBUG is not set -CONFIG_ORE=m CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=m CONFIG_NFS_V2=m @@ -8512,14 +8717,15 @@ CONFIG_SUNRPC_GSS=m CONFIG_SUNRPC_BACKCHANNEL=y CONFIG_SUNRPC_SWAP=y CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set # CONFIG_SUNRPC_DEBUG is not set CONFIG_SUNRPC_XPRT_RDMA=m CONFIG_CEPH_FS=m CONFIG_CEPH_FSCACHE=y CONFIG_CEPH_FS_POSIX_ACL=y CONFIG_CIFS=m -CONFIG_CIFS_STATS=y CONFIG_CIFS_STATS2=y +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y CONFIG_CIFS_WEAK_PW_HASH=y CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y @@ -8527,13 +8733,13 @@ CONFIG_CIFS_POSIX=y CONFIG_CIFS_ACL=y # CONFIG_CIFS_DEBUG is not set CONFIG_CIFS_DFS_UPCALL=y -CONFIG_CIFS_SMB311=y # CONFIG_CIFS_SMB_DIRECT is not set CONFIG_CIFS_FSCACHE=y CONFIG_CODA_FS=m CONFIG_AFS_FS=m # CONFIG_AFS_DEBUG is not set CONFIG_AFS_FSCACHE=y +# CONFIG_AFS_DEBUG_CURSOR is not set CONFIG_9P_FS=m CONFIG_9P_FSCACHE=y CONFIG_9P_FS_POSIX_ACL=y @@ -8593,254 +8799,6 @@ CONFIG_DLM=m # CONFIG_DLM_DEBUG is not set # -# Kernel hacking -# -CONFIG_TRACE_IRQFLAGS_SUPPORT=y - -# -# printk and dmesg options -# -# CONFIG_PRINTK_TIME is not set -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=1 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=1 -# CONFIG_BOOT_PRINTK_DELAY is not set -# CONFIG_DYNAMIC_DEBUG is not set - -# -# Compile-time checks and compiler options -# -# CONFIG_DEBUG_INFO is not set -# CONFIG_ENABLE_WARN_DEPRECATED is not set -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_FRAME_WARN=0 -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_PAGE_OWNER is not set -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_CHECK is not set -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_STACK_VALIDATION=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_DEBUG_KERNEL=y - -# -# Memory Debugging -# -# CONFIG_PAGE_EXTENSION is not set -# CONFIG_DEBUG_PAGEALLOC is not set -# CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_PAGE_REF is not set -# CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_OBJECTS is not set -CONFIG_SLUB_DEBUG_ON=y -# CONFIG_SLUB_STATS is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_VM is not set -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_DEBUG_STACKOVERFLOW=y -# CONFIG_DEBUG_STACKOVERFLOW is not set -CONFIG_HAVE_ARCH_KASAN=y -# CONFIG_KASAN is not set -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set -# CONFIG_DEBUG_SHIRQ is not set - -# -# Debug Lockups and Hangs -# -# CONFIG_SOFTLOCKUP_DETECTOR is not set -CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y -# CONFIG_HARDLOCKUP_DETECTOR is not set -# CONFIG_DETECT_HUNG_TASK is not set -# CONFIG_WQ_WATCHDOG is not set -# CONFIG_PANIC_ON_OOPS is not set -CONFIG_PANIC_ON_OOPS_VALUE=0 -CONFIG_PANIC_TIMEOUT=0 -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_STACK_END_CHECK=y -# CONFIG_DEBUG_TIMEKEEPING is not set -# CONFIG_DEBUG_PREEMPT is not set - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -CONFIG_LOCK_DEBUGGING_SUPPORT=y -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_RWSEMS is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -# CONFIG_WW_MUTEX_SELFTEST is not set -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_LIST=y -CONFIG_DEBUG_PI_LIST=y -CONFIG_DEBUG_SG=y -CONFIG_DEBUG_NOTIFIERS=y -CONFIG_DEBUG_CREDENTIALS=y - -# -# RCU Debugging -# -CONFIG_TORTURE_TEST=m -CONFIG_RCU_PERF_TEST=m -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -CONFIG_FUNCTION_ERROR_INJECTION=y -# CONFIG_FAULT_INJECTION is not set -CONFIG_LATENCYTOP=y -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_FENTRY=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -CONFIG_FUNCTION_TRACER=y -# CONFIG_FUNCTION_GRAPH_TRACER is not set -# CONFIG_PREEMPTIRQ_EVENTS is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_PREEMPT_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_HWLAT_TRACER is not set -CONFIG_FTRACE_SYSCALLS=y -# CONFIG_TRACER_SNAPSHOT is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KPROBE_EVENTS=y -# CONFIG_UPROBE_EVENTS is not set -CONFIG_BPF_EVENTS=y -CONFIG_PROBE_EVENTS=y -CONFIG_DYNAMIC_FTRACE=y -CONFIG_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_FUNCTION_PROFILER=y -# CONFIG_BPF_KPROBE_OVERRIDE is not set -CONFIG_FTRACE_MCOUNT_RECORD=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_MMIOTRACE is not set -# CONFIG_HIST_TRIGGERS is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_TRACE_EVAL_MAP_FILE is not set -CONFIG_TRACING_EVENTS_GPIO=y -# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set -# CONFIG_DMA_API_DEBUG is not set -CONFIG_RUNTIME_TESTING_MENU=y -CONFIG_LKDTM=m -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_TEST_SORT is not set -# CONFIG_KPROBES_SANITY_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -# CONFIG_PERCPU_TEST is not set -# CONFIG_ATOMIC64_SELFTEST is not set -# CONFIG_ASYNC_RAID6_TEST is not set -# CONFIG_TEST_HEXDUMP is not set -# CONFIG_TEST_STRING_HELPERS is not set -# CONFIG_TEST_KSTRTOX is not set -# CONFIG_TEST_PRINTF is not set -# CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_UUID is not set -# CONFIG_TEST_OVERFLOW is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_HASH is not set -# CONFIG_TEST_PARMAN is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_USER_COPY is not set -# CONFIG_TEST_BPF is not set -# CONFIG_FIND_BIT_BENCHMARK is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_SYSCTL is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_TEST_STATIC_KEYS is not set -# CONFIG_TEST_KMOD is not set -CONFIG_MEMTEST=y -# CONFIG_BUG_ON_DATA_CORRUPTION is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -# CONFIG_UBSAN is not set -CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y -CONFIG_STRICT_DEVMEM=y -CONFIG_IO_STRICT_DEVMEM=y -CONFIG_EARLY_PRINTK_USB=y -CONFIG_X86_VERBOSE_BOOTUP=y -CONFIG_EARLY_PRINTK=y -CONFIG_EARLY_PRINTK_DBGP=y -CONFIG_EARLY_PRINTK_EFI=y -CONFIG_EARLY_PRINTK_USB_XDBC=y -CONFIG_X86_PTDUMP_CORE=y -# CONFIG_X86_PTDUMP is not set -# CONFIG_EFI_PGT_DUMP is not set -CONFIG_DEBUG_WX=y -CONFIG_DOUBLEFAULT=y -# CONFIG_DEBUG_TLBFLUSH is not set -# CONFIG_IOMMU_DEBUG is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -# CONFIG_X86_DECODER_SELFTEST is not set -CONFIG_IO_DELAY_TYPE_0X80=0 -CONFIG_IO_DELAY_TYPE_0XED=1 -CONFIG_IO_DELAY_TYPE_UDELAY=2 -CONFIG_IO_DELAY_TYPE_NONE=3 -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEFAULT_IO_DELAY_TYPE=0 -# CONFIG_DEBUG_BOOT_PARAMS is not set -# CONFIG_CPA_DEBUG is not set -# CONFIG_OPTIMIZE_INLINING is not set -# CONFIG_DEBUG_ENTRY is not set -# CONFIG_DEBUG_NMI_SELFTEST is not set -CONFIG_X86_DEBUG_FPU=y -# CONFIG_PUNIT_ATOM_DEBUG is not set -CONFIG_UNWINDER_ORC=y -# CONFIG_UNWINDER_FRAME_POINTER is not set - -# # Security options # CONFIG_KEYS=y @@ -8872,16 +8830,16 @@ CONFIG_PAGE_SANITIZE_VERIFY=y # CONFIG_SECURITY_SMACK is not set # CONFIG_SECURITY_TOMOYO is not set CONFIG_SECURITY_APPARMOR=y -CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1 CONFIG_SECURITY_APPARMOR_HASH=y CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y # CONFIG_SECURITY_APPARMOR_DEBUG is not set # CONFIG_SECURITY_LOADPIN is not set CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set # CONFIG_INTEGRITY is not set CONFIG_DEFAULT_SECURITY_APPARMOR=y # CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_DEFAULT_SECURITY="apparmor" +CONFIG_LSM="apparmor" CONFIG_XOR_BLOCKS=m CONFIG_ASYNC_CORE=m CONFIG_ASYNC_MEMCPY=m @@ -8922,7 +8880,6 @@ CONFIG_CRYPTO_NULL2=y CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_WORKQUEUE=y CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_SIMD=m @@ -8954,15 +8911,20 @@ CONFIG_CRYPTO_ECHAINIV=m # # Block modes # -CONFIG_CRYPTO_CBC=m +CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_CTR=m -CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_CTS=y CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_XTS=y CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_NHPOLY1305=m +CONFIG_CRYPTO_NHPOLY1305_SSE2=m +CONFIG_CRYPTO_NHPOLY1305_AVX2=m +CONFIG_CRYPTO_ADIANTUM=m # # Hash modes @@ -8995,13 +8957,11 @@ CONFIG_CRYPTO_SHA1=y CONFIG_CRYPTO_SHA1_SSSE3=m CONFIG_CRYPTO_SHA256_SSSE3=m CONFIG_CRYPTO_SHA512_SSSE3=m -CONFIG_CRYPTO_SHA1_MB=m -CONFIG_CRYPTO_SHA256_MB=m -CONFIG_CRYPTO_SHA512_MB=m -CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m @@ -9040,7 +9000,6 @@ CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m CONFIG_CRYPTO_SERPENT_AVX_X86_64=m CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_TWOFISH_COMMON=m @@ -9073,6 +9032,7 @@ CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m +# CONFIG_CRYPTO_STATS is not set CONFIG_CRYPTO_HASH_INFO=y CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_DEV_PADLOCK=m @@ -9098,7 +9058,10 @@ CONFIG_CRYPTO_DEV_CHELSIO_TLS=m CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS8_PRIVATE_KEY_PARSER=y +CONFIG_TPM_KEY_PARSER=m CONFIG_PKCS7_MESSAGE_PARSER=y CONFIG_PKCS7_TEST_KEY=m CONFIG_SIGNED_PE_FILE_VERIFICATION=y @@ -9113,36 +9076,13 @@ CONFIG_SYSTEM_TRUSTED_KEYS="" CONFIG_SECONDARY_TRUSTED_KEYRING=y CONFIG_SYSTEM_BLACKLIST_KEYRING=y CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" -CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_IRQFD=y -CONFIG_HAVE_KVM_IRQ_ROUTING=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_KVM_ASYNC_PF=y -CONFIG_HAVE_KVM_MSI=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_KVM_VFIO=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_KVM_COMPAT=y -CONFIG_HAVE_KVM_IRQ_BYPASS=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m -CONFIG_KVM_INTEL=m -CONFIG_KVM_AMD=m -CONFIG_KVM_AMD_SEV=y -# CONFIG_KVM_MMU_AUDIT is not set -CONFIG_VHOST_NET=m -CONFIG_VHOST_SCSI=m -CONFIG_VHOST_VSOCK=m -CONFIG_VHOST=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set CONFIG_BINARY_PRINTF=y # # Library routines # CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y CONFIG_BITREVERSE=y CONFIG_RATIONAL=y CONFIG_GENERIC_STRNCPY_FROM_USER=y @@ -9163,6 +9103,7 @@ CONFIG_CRC32_SLICEBY8=y # CONFIG_CRC32_SLICEBY4 is not set # CONFIG_CRC32_SARWATE is not set # CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m CONFIG_CRC4=m CONFIG_CRC7=m CONFIG_LIBCRC32C=m @@ -9208,7 +9149,7 @@ CONFIG_TEXTSEARCH_BM=m CONFIG_TEXTSEARCH_FSM=m CONFIG_BTREE=y CONFIG_INTERVAL_TREE=y -CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_XARRAY_MULTI=y CONFIG_ASSOCIATIVE_ARRAY=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT_MAP=y @@ -9216,9 +9157,21 @@ CONFIG_HAS_DMA=y CONFIG_NEED_SG_DMA_LENGTH=y CONFIG_NEED_DMA_MAP_STATE=y CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_DMA_DIRECT_OPS=y +CONFIG_DMA_DECLARE_COHERENT=y CONFIG_DMA_VIRT_OPS=y CONFIG_SWIOTLB=y +CONFIG_DMA_CMA=y + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=0 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set CONFIG_SGL_ALLOC=y CONFIG_IOMMU_HELPER=y CONFIG_CHECK_SIGNATURE=y @@ -9249,8 +9202,8 @@ CONFIG_FONT_8x16=y # CONFIG_FONT_10x18 is not set # CONFIG_FONT_SUN8x16 is not set # CONFIG_FONT_SUN12x22 is not set +# CONFIG_FONT_TER16x32 is not set CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_SG_CHAIN=y CONFIG_ARCH_HAS_PMEM_API=y CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y CONFIG_ARCH_HAS_UACCESS_MCSAFE=y @@ -9258,3 +9211,265 @@ CONFIG_SBITMAP=y CONFIG_PARMAN=m CONFIG_PRIME_NUMBERS=m # CONFIG_STRING_SELFTEST is not set +CONFIG_OBJAGG=m + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +# CONFIG_PRINTK_TIME is not set +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=1 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=1 +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_DYNAMIC_DEBUG is not set + +# +# Compile-time checks and compiler options +# +# CONFIG_DEBUG_INFO is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=0 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_DEBUG_WRITABLE_FUNCTION_POINTERS_VERBOSE is not set +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +CONFIG_SLUB_DEBUG_ON=y +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_CC_HAS_KASAN_GENERIC=y +# CONFIG_KASAN is not set +CONFIG_KASAN_STACK=1 +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_LIST=y +CONFIG_DEBUG_PI_LIST=y +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_DEBUG_CREDENTIALS=y + +# +# RCU Debugging +# +CONFIG_TORTURE_TEST=m +CONFIG_RCU_PERF_TEST=m +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_LATENCYTOP=y +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_FUNCTION_TRACER=y +# CONFIG_FUNCTION_GRAPH_TRACER is not set +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_STACK_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +# CONFIG_UPROBE_EVENTS is not set +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_FUNCTION_PROFILER=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_MMIOTRACE is not set +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +CONFIG_TRACING_EVENTS_GPIO=y +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set +CONFIG_RUNTIME_TESTING_MENU=y +CONFIG_LKDTM=m +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_ASYNC_RAID6_TEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_STACKINIT is not set +CONFIG_MEMTEST=y +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_UBSAN_ALIGNMENT=y +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +CONFIG_IO_STRICT_DEVMEM=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_EARLY_PRINTK_USB=y +CONFIG_X86_VERBOSE_BOOTUP=y +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_USB_XDBC=y +CONFIG_X86_PTDUMP_CORE=y +# CONFIG_X86_PTDUMP is not set +# CONFIG_EFI_PGT_DUMP is not set +CONFIG_DEBUG_WX=y +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +# CONFIG_IOMMU_DEBUG is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +# CONFIG_X86_DECODER_SELFTEST is not set +CONFIG_IO_DELAY_TYPE_0X80=0 +CONFIG_IO_DELAY_TYPE_0XED=1 +CONFIG_IO_DELAY_TYPE_UDELAY=2 +CONFIG_IO_DELAY_TYPE_NONE=3 +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEFAULT_IO_DELAY_TYPE=0 +# CONFIG_DEBUG_BOOT_PARAMS is not set +# CONFIG_CPA_DEBUG is not set +# CONFIG_OPTIMIZE_INLINING is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +CONFIG_X86_DEBUG_FPU=y +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set diff --git a/sys-kernel/linux-sources-redcore/files/ata-fix-NCQ-LOG-strings-and-move-to-debug.patch b/sys-kernel/linux-sources-redcore/files/5.1-ata-fix-NCQ-LOG-strings-and-move-to-debug.patch index 344a8c4b..344a8c4b 100644 --- a/sys-kernel/linux-sources-redcore/files/ata-fix-NCQ-LOG-strings-and-move-to-debug.patch +++ b/sys-kernel/linux-sources-redcore/files/5.1-ata-fix-NCQ-LOG-strings-and-move-to-debug.patch diff --git a/sys-kernel/linux-sources-redcore/files/drop_ancient-and-wrong-msg.patch b/sys-kernel/linux-sources-redcore/files/5.1-drop_ancient-and-wrong-msg.patch index f184b08e..f184b08e 100644 --- a/sys-kernel/linux-sources-redcore/files/drop_ancient-and-wrong-msg.patch +++ b/sys-kernel/linux-sources-redcore/files/5.1-drop_ancient-and-wrong-msg.patch diff --git a/sys-kernel/linux-sources-redcore/files/enable_alx_wol.patch b/sys-kernel/linux-sources-redcore/files/5.1-enable_alx_wol.patch index 1b7f6e13..1b7f6e13 100644 --- a/sys-kernel/linux-sources-redcore/files/enable_alx_wol.patch +++ b/sys-kernel/linux-sources-redcore/files/5.1-enable_alx_wol.patch diff --git a/sys-kernel/linux-sources-redcore/files/5.1-fix-acpi_dbg_level.patch b/sys-kernel/linux-sources-redcore/files/5.1-fix-acpi_dbg_level.patch new file mode 100644 index 00000000..d7fb610f --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/5.1-fix-acpi_dbg_level.patch @@ -0,0 +1,12 @@ +diff -Naur linux-5.1/drivers/acpi/bus.c linux-5.1-p/drivers/acpi/bus.c +--- linux-5.1/drivers/acpi/bus.c 2019-05-06 02:42:58.000000000 +0200 ++++ linux-5.1-p/drivers/acpi/bus.c 2019-05-07 08:50:58.819866218 +0200 +@@ -1043,6 +1043,8 @@ + + acpi_permanent_mmap = true; + ++ acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR; ++ + #ifdef CONFIG_X86 + /* + * If the machine falls into the DMI check table, diff --git a/sys-kernel/linux-sources-redcore/files/linux-hardened.patch b/sys-kernel/linux-sources-redcore/files/5.1-linux-hardened.patch index 7a46a91b..cfb24d72 100644 --- a/sys-kernel/linux-sources-redcore/files/linux-hardened.patch +++ b/sys-kernel/linux-sources-redcore/files/5.1-linux-hardened.patch @@ -1,8 +1,8 @@ diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index 1370b424a453..54d7125d6912 100644 +index c7937f379d22..6a9c38fdd2e9 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt -@@ -496,16 +496,6 @@ +@@ -505,16 +505,6 @@ nosocket -- Disable socket memory accounting. nokmem -- Disable kernel memory accounting. @@ -19,7 +19,7 @@ index 1370b424a453..54d7125d6912 100644 cio_ignore= [S390] See Documentation/s390/CommonIO for details. clk_ignore_unused -@@ -3066,6 +3056,11 @@ +@@ -3241,6 +3231,11 @@ the specified number of seconds. This is to be used if your oopses keep scrolling off the screen. @@ -32,18 +32,18 @@ index 1370b424a453..54d7125d6912 100644 pcd. [PARIDE] diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt -index eded671d55eb..0abfc33f101d 100644 +index aa058aa7bf28..228632fa5f66 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt -@@ -92,6 +92,7 @@ show up in /proc/sys/kernel: +@@ -97,6 +97,7 @@ show up in /proc/sys/kernel: - sysctl_writes_strict - - tainted + - tainted ==> Documentation/admin-guide/tainted-kernels.rst - threads-max +- tiocsti_restrict - unknown_nmi_panic - watchdog - watchdog_thresh -@@ -1016,6 +1017,26 @@ available RAM pages threads-max is reduced accordingly. +@@ -1082,6 +1083,26 @@ available RAM pages threads-max is reduced accordingly. ============================================================== @@ -71,13 +71,13 @@ index eded671d55eb..0abfc33f101d 100644 The value in this file affects behavior of handling NMI. When the diff --git a/Makefile b/Makefile -index a41692c5827a..ce817616d8a9 100644 +index d7b3c8e3ff3e..029b58e4e67f 100644 --- a/Makefile +++ b/Makefile -@@ -688,6 +688,9 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong +@@ -717,6 +717,9 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong KBUILD_CFLAGS += $(stackp-flags-y) - ifeq ($(cc-name),clang) + ifdef CONFIG_CC_IS_CLANG +ifdef CONFIG_LOCAL_INIT +KBUILD_CFLAGS += -fsanitize=local-init +endif @@ -85,22 +85,10 @@ index a41692c5827a..ce817616d8a9 100644 KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) diff --git a/arch/Kconfig b/arch/Kconfig -index d1f2ed462ac8..19a595a47dae 100644 +index 9092e0ffe4d3..837a0297b720 100644 --- a/arch/Kconfig +++ b/arch/Kconfig -@@ -470,6 +470,11 @@ config GCC_PLUGIN_LATENT_ENTROPY - is some slowdown of the boot process (about 0.5%) and fork and - irq processing. - -+ When extra_latent_entropy is passed on the kernel command line, -+ entropy will be extracted from up to the first 4GB of RAM while the -+ runtime memory allocator is being initialized. This costs even more -+ slowdown of the boot process. -+ - Note that entropy extracted this way is not cryptographically - secure! - -@@ -731,7 +736,7 @@ config ARCH_MMAP_RND_BITS +@@ -631,7 +631,7 @@ config ARCH_MMAP_RND_BITS int "Number of bits to use for ASLR of mmap base address" if EXPERT range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT @@ -109,7 +97,7 @@ index d1f2ed462ac8..19a595a47dae 100644 depends on HAVE_ARCH_MMAP_RND_BITS help This value can be used to select the number of bits to use to -@@ -765,7 +770,7 @@ config ARCH_MMAP_RND_COMPAT_BITS +@@ -665,7 +665,7 @@ config ARCH_MMAP_RND_COMPAT_BITS int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT @@ -118,7 +106,7 @@ index d1f2ed462ac8..19a595a47dae 100644 depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS help This value can be used to select the number of bits to use to -@@ -967,6 +972,7 @@ config ARCH_HAS_REFCOUNT +@@ -873,6 +873,7 @@ config ARCH_HAS_REFCOUNT config REFCOUNT_FULL bool "Perform full reference count validation at the expense of speed" @@ -127,10 +115,10 @@ index d1f2ed462ac8..19a595a47dae 100644 Enabling this switches the refcounting infrastructure from a fast unchecked atomic_t implementation to a fully state checked diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 42c090cf0292..a6e2276009e4 100644 +index d218729ec852..d0e94e76885f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig -@@ -1005,6 +1005,7 @@ endif +@@ -1166,6 +1166,7 @@ endif config ARM64_SW_TTBR0_PAN bool "Emulate Privileged Access Never using TTBR0_EL1 switching" @@ -138,7 +126,7 @@ index 42c090cf0292..a6e2276009e4 100644 help Enabling this option prevents the kernel from accessing user-space memory directly by pointing TTBR0_EL1 to a reserved -@@ -1180,6 +1181,7 @@ config RANDOMIZE_BASE +@@ -1393,6 +1394,7 @@ config RANDOMIZE_BASE bool "Randomize the address of the kernel image" select ARM64_MODULE_PLTS if MODULES select RELOCATABLE @@ -147,10 +135,10 @@ index 42c090cf0292..a6e2276009e4 100644 Randomizes the virtual address at which the kernel image is loaded, as a security feature that deters exploit attempts diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug -index cc6bd559af85..01d5442d4722 100644 +index 69c9170bdd24..a786227db0e3 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug -@@ -45,6 +45,7 @@ config ARM64_RANDOMIZE_TEXT_OFFSET +@@ -42,6 +42,7 @@ config ARM64_RANDOMIZE_TEXT_OFFSET config DEBUG_WX bool "Warn on W+X mappings at boot" select ARM64_PTDUMP_CORE @@ -159,7 +147,7 @@ index cc6bd559af85..01d5442d4722 100644 Generate a warning if any W+X mappings are found at boot. diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig -index f9a186f6af8a..e628231a5a92 100644 +index 32fb03503b0b..228d3770d924 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1,4 +1,3 @@ @@ -168,10 +156,10 @@ index f9a186f6af8a..e628231a5a92 100644 CONFIG_AUDIT=y CONFIG_NO_HZ_IDLE=y diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h -index 433b9554c6a1..1f4b06317c9f 100644 +index 6adc1a90e7e6..1f4b06317c9f 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h -@@ -114,10 +114,10 @@ +@@ -114,14 +114,10 @@ /* * This is the base location for PIE (ET_DYN with INTERP) loads. On @@ -179,12 +167,16 @@ index 433b9554c6a1..1f4b06317c9f 100644 + * 64-bit, this is raised to 4GB to leave the entire 32-bit address * space open for things that want to use the area for 32-bit pointers. */ +-#ifdef CONFIG_ARM64_FORCE_52BIT -#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) +-#else +-#define ELF_ET_DYN_BASE (2 * DEFAULT_MAP_WINDOW_64 / 3) +-#endif /* CONFIG_ARM64_FORCE_52BIT */ +#define ELF_ET_DYN_BASE 0x100000000UL #ifndef __ASSEMBLY__ -@@ -171,10 +171,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, +@@ -175,10 +171,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, /* 1GB of VA */ #ifdef CONFIG_COMPAT #define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \ @@ -199,10 +191,10 @@ index 433b9554c6a1..1f4b06317c9f 100644 #ifdef __AARCH64EB__ diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c -index e10bc363f533..8e5701978b7c 100644 +index 3767fb21a5b8..776cf5d48f7d 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c -@@ -481,9 +481,9 @@ unsigned long arch_align_stack(unsigned long sp) +@@ -538,9 +538,9 @@ unsigned long arch_align_stack(unsigned long sp) unsigned long arch_randomize_brk(struct mm_struct *mm) { if (is_compat_task()) @@ -215,7 +207,7 @@ index e10bc363f533..8e5701978b7c 100644 /* diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 6b8065d718bd..347c8d1c3a45 100644 +index 62fc3fda1a05..fe5f612d7014 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1202,8 +1202,7 @@ config VM86 @@ -228,7 +220,7 @@ index 6b8065d718bd..347c8d1c3a45 100644 depends on MODIFY_LDT_SYSCALL ---help--- This option is required by programs like Wine to run 16-bit -@@ -2295,7 +2294,7 @@ config COMPAT_VDSO +@@ -2291,7 +2290,7 @@ config COMPAT_VDSO choice prompt "vsyscall table for legacy applications" depends on X86_64 @@ -237,7 +229,7 @@ index 6b8065d718bd..347c8d1c3a45 100644 help Legacy user code that does not know how to find the vDSO expects to be able to issue three syscalls by calling fixed addresses in -@@ -2376,8 +2375,7 @@ config CMDLINE_OVERRIDE +@@ -2372,8 +2371,7 @@ config CMDLINE_OVERRIDE be set to 'N' under normal conditions. config MODIFY_LDT_SYSCALL @@ -248,10 +240,10 @@ index 6b8065d718bd..347c8d1c3a45 100644 Linux can allow user programs to install a per-process x86 Local Descriptor Table (LDT) using the modify_ldt(2) system diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug -index c6dd1d980081..0acf0d1d67de 100644 +index 15d0fbe27872..5c32c9818bd4 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug -@@ -104,6 +104,7 @@ config EFI_PGT_DUMP +@@ -91,6 +91,7 @@ config EFI_PGT_DUMP config DEBUG_WX bool "Warn on W+X mappings at boot" select X86_PTDUMP_CORE @@ -260,7 +252,7 @@ index c6dd1d980081..0acf0d1d67de 100644 Generate a warning if any W+X mappings are found at boot. diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig -index e32fc1f274d8..d08acc76502a 100644 +index 1d3badfda09e..bd67e1778ffa 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -1,5 +1,4 @@ @@ -270,10 +262,10 @@ index e32fc1f274d8..d08acc76502a 100644 CONFIG_BSD_PROCESS_ACCT=y CONFIG_TASKSTATS=y diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c -index 5b8b556dbb12..a569f08b4478 100644 +index babc4e7a519c..197b79a50bf5 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c -@@ -204,55 +204,9 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) +@@ -198,55 +198,9 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) } #ifdef CONFIG_X86_64 @@ -331,10 +323,10 @@ index 5b8b556dbb12..a569f08b4478 100644 #endif diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h -index 0d157d2a1e2a..770c8ae97f92 100644 +index 69c0f892e310..f9f7a85bb71e 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h -@@ -249,11 +249,11 @@ extern int force_personality32; +@@ -248,11 +248,11 @@ extern int force_personality32; /* * This is the base location for PIE (ET_DYN with INTERP) loads. On @@ -348,7 +340,7 @@ index 0d157d2a1e2a..770c8ae97f92 100644 /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, -@@ -313,8 +313,8 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len); +@@ -312,8 +312,8 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len); #ifdef CONFIG_X86_32 @@ -359,7 +351,7 @@ index 0d157d2a1e2a..770c8ae97f92 100644 #define ARCH_DLINFO ARCH_DLINFO_IA32 -@@ -323,7 +323,11 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len); +@@ -322,7 +322,11 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len); #else /* CONFIG_X86_32 */ /* 1GB for 64bit, 8MB for 32bit */ @@ -372,17 +364,17 @@ index 0d157d2a1e2a..770c8ae97f92 100644 #define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32()) #define ARCH_DLINFO \ -@@ -381,5 +385,4 @@ struct va_alignment { +@@ -380,5 +384,4 @@ struct va_alignment { } ____cacheline_aligned; extern struct va_alignment va_align; -extern unsigned long align_vdso_addr(unsigned long); #endif /* _ASM_X86_ELF_H */ diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h -index 6690cd3fc8b1..300a1c2819fb 100644 +index f4204bf377fc..8ccc7aa0ece0 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h -@@ -266,6 +266,7 @@ static inline void cr4_set_bits(unsigned long mask) +@@ -294,6 +294,7 @@ static inline void cr4_set_bits(unsigned long mask) local_irq_save(flags); cr4 = this_cpu_read(cpu_tlbstate.cr4); @@ -390,7 +382,7 @@ index 6690cd3fc8b1..300a1c2819fb 100644 if ((cr4 | mask) != cr4) __cr4_set(cr4 | mask); local_irq_restore(flags); -@@ -278,6 +279,7 @@ static inline void cr4_clear_bits(unsigned long mask) +@@ -306,6 +307,7 @@ static inline void cr4_clear_bits(unsigned long mask) local_irq_save(flags); cr4 = this_cpu_read(cpu_tlbstate.cr4); @@ -398,7 +390,7 @@ index 6690cd3fc8b1..300a1c2819fb 100644 if ((cr4 & ~mask) != cr4) __cr4_set(cr4 & ~mask); local_irq_restore(flags); -@@ -288,6 +290,7 @@ static inline void cr4_toggle_bits_irqsoff(unsigned long mask) +@@ -316,6 +318,7 @@ static inline void cr4_toggle_bits_irqsoff(unsigned long mask) unsigned long cr4; cr4 = this_cpu_read(cpu_tlbstate.cr4); @@ -406,7 +398,7 @@ index 6690cd3fc8b1..300a1c2819fb 100644 __cr4_set(cr4 ^ mask); } -@@ -394,6 +397,7 @@ static inline void __native_flush_tlb_global(void) +@@ -422,6 +425,7 @@ static inline void __native_flush_tlb_global(void) raw_local_irq_save(flags); cr4 = this_cpu_read(cpu_tlbstate.cr4); @@ -415,10 +407,10 @@ index 6690cd3fc8b1..300a1c2819fb 100644 native_write_cr4(cr4 ^ X86_CR4_PGE); /* write old PGE again and flush TLBs */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c -index b41b72bd8bb8..d54a3c30902a 100644 +index 132a63dc5a76..05c8de9b452e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c -@@ -1726,7 +1726,6 @@ void cpu_init(void) +@@ -1768,7 +1768,6 @@ void cpu_init(void) wrmsrl(MSR_KERNEL_GS_BASE, 0); barrier(); @@ -427,19 +419,19 @@ index b41b72bd8bb8..d54a3c30902a 100644 /* diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c -index 30ca2d1a9231..bde0a18cd9f9 100644 +index 957eae13b370..01b7bb76bdbc 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c -@@ -39,6 +39,8 @@ - #include <asm/desc.h> +@@ -42,6 +42,8 @@ #include <asm/prctl.h> #include <asm/spec-ctrl.h> + #include <asm/proto.h> +#include <asm/elf.h> +#include <linux/sizes.h> - /* - * per-CPU TSS segments. Threads are completely 'soft' on Linux, -@@ -718,7 +720,10 @@ unsigned long arch_align_stack(unsigned long sp) + #include "process.h" + +@@ -798,7 +800,10 @@ unsigned long arch_align_stack(unsigned long sp) unsigned long arch_randomize_brk(struct mm_struct *mm) { @@ -452,7 +444,7 @@ index 30ca2d1a9231..bde0a18cd9f9 100644 /* diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c -index 6a78d4b36a79..715009f7a96c 100644 +index f7476ce23b6e..652169a2b23a 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -54,13 +54,6 @@ static unsigned long get_align_bits(void) @@ -473,7 +465,7 @@ index 6a78d4b36a79..715009f7a96c 100644 } *begin = get_mmap_base(1); -- if (in_compat_syscall()) +- if (in_32bit_syscall()) - *end = task_size_32bit(); - else - *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW); @@ -491,10 +483,10 @@ index 6a78d4b36a79..715009f7a96c 100644 /* diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c -index 979e0a02cbe1..d6ab882a0091 100644 +index 85c94f9a87f8..6b14ddb6c688 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c -@@ -560,9 +560,9 @@ static void __init pagetable_init(void) +@@ -559,9 +559,9 @@ static void __init pagetable_init(void) #define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL) /* Bits supported by the hardware: */ @@ -506,34 +498,11 @@ index 979e0a02cbe1..d6ab882a0091 100644 EXPORT_SYMBOL_GPL(__supported_pte_mask); /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ EXPORT_SYMBOL(__default_kernel_pte_mask); -@@ -873,7 +873,7 @@ int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) - #endif - #endif - --int kernel_set_to_readonly __read_mostly; -+int kernel_set_to_readonly __ro_after_init; - - void set_kernel_text_rw(void) - { -@@ -925,12 +925,11 @@ void mark_rodata_ro(void) - unsigned long start = PFN_ALIGN(_text); - unsigned long size = PFN_ALIGN(_etext) - start; - -+ kernel_set_to_readonly = 1; - set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); - printk(KERN_INFO "Write protecting the kernel text: %luk\n", - size >> 10); - -- kernel_set_to_readonly = 1; -- - #ifdef CONFIG_CPA_DEBUG - printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", - start, start+size); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c -index 68c292cb1ebf..b81cd1f2d6df 100644 +index bccff68e3267..b4e3a62c2e50 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c -@@ -66,9 +66,9 @@ +@@ -65,9 +65,9 @@ */ /* Bits supported by the hardware: */ @@ -545,28 +514,8 @@ index 68c292cb1ebf..b81cd1f2d6df 100644 EXPORT_SYMBOL_GPL(__supported_pte_mask); /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ EXPORT_SYMBOL(__default_kernel_pte_mask); -@@ -1207,7 +1207,7 @@ void __init mem_init(void) - mem_init_print_info(NULL); - } - --int kernel_set_to_readonly; -+int kernel_set_to_readonly __ro_after_init; - - void set_kernel_text_rw(void) - { -@@ -1256,9 +1256,8 @@ void mark_rodata_ro(void) - - printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", - (end - start) >> 10); -- set_memory_ro(start, (end - start) >> PAGE_SHIFT); -- - kernel_set_to_readonly = 1; -+ set_memory_ro(start, (end - start) >> PAGE_SHIFT); - - /* - * The rodata/data/bss/brk section (but not the kernel text!) diff --git a/block/blk-softirq.c b/block/blk-softirq.c -index 15c1f5e12eb8..ff72cccec5b8 100644 +index 457d9ba3eb20..5f987fc1c0a0 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -20,7 +20,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); @@ -579,10 +528,10 @@ index 15c1f5e12eb8..ff72cccec5b8 100644 struct list_head *cpu_list, local_list; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c -index cc71c63df381..44432ecffbcd 100644 +index 133fed8e4a8b..a04fccec45db 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c -@@ -5154,7 +5154,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) +@@ -5161,7 +5161,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) struct ata_port *ap; unsigned int tag; @@ -591,7 +540,7 @@ index cc71c63df381..44432ecffbcd 100644 ap = qc->ap; qc->flags = 0; -@@ -5171,7 +5171,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) +@@ -5178,7 +5178,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) struct ata_port *ap; struct ata_link *link; @@ -601,7 +550,7 @@ index cc71c63df381..44432ecffbcd 100644 ap = qc->ap; link = qc->dev->link; diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig -index 212f447938ae..0982c7ddd88c 100644 +index 466ebd84ad17..a093e3f158c9 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -9,7 +9,6 @@ source "drivers/tty/Kconfig" @@ -612,7 +561,7 @@ index 212f447938ae..0982c7ddd88c 100644 help Say Y here if you want to support the /dev/mem device. The /dev/mem device is used to access areas of physical -@@ -531,7 +530,6 @@ config TELCLOCK +@@ -536,7 +535,6 @@ config TELCLOCK config DEVPORT bool "/dev/port character device" depends on ISA || PCI @@ -621,7 +570,7 @@ index 212f447938ae..0982c7ddd88c 100644 Say Y here if you want to support the /dev/port device. The /dev/port device is similar to /dev/mem, but for I/O ports. diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig -index 0840d27381ea..ae292fcedaca 100644 +index e0a04bfc873e..ec93f827c599 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -122,7 +122,6 @@ config UNIX98_PTYS @@ -633,10 +582,10 @@ index 0840d27381ea..ae292fcedaca 100644 A pseudo terminal (PTY) is a software device consisting of two halves: a master and a slave. The slave device behaves identical to diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c -index aba59521ad48..90d4779683a3 100644 +index 5fa250157025..fabcb1ebd24b 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c -@@ -172,6 +172,7 @@ static void free_tty_struct(struct tty_struct *tty) +@@ -173,6 +173,7 @@ static void free_tty_struct(struct tty_struct *tty) put_device(tty->dev); kfree(tty->write_buf); tty->magic = 0xDEADDEAD; @@ -644,7 +593,7 @@ index aba59521ad48..90d4779683a3 100644 kfree(tty); } -@@ -2164,11 +2165,19 @@ static int tty_fasync(int fd, struct file *filp, int on) +@@ -2178,11 +2179,19 @@ static int tty_fasync(int fd, struct file *filp, int on) * FIXME: may race normal receive processing */ @@ -664,7 +613,7 @@ index aba59521ad48..90d4779683a3 100644 if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(ch, p)) -@@ -2851,6 +2860,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx) +@@ -3008,6 +3017,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx) tty->index = idx; tty_line_name(driver, idx, tty->name); tty->dev = tty_get_device(tty); @@ -672,11 +621,26 @@ index aba59521ad48..90d4779683a3 100644 return tty; } +diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c +index e38f104db174..8570a8514e46 100644 +--- a/drivers/tty/tty_ldisc.c ++++ b/drivers/tty/tty_ldisc.c +@@ -855,8 +855,8 @@ void tty_ldisc_deinit(struct tty_struct *tty) + tty->ldisc = NULL; + } + +-static int zero; +-static int one = 1; ++static int zero __read_only; ++static int one __read_only = 1; + static struct ctl_table tty_table[] = { + { + .procname = "ldisc_autoload", diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c -index 1fb266809966..db145cb734e4 100644 +index 310eef451db8..2c4c1f7045bf 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c -@@ -41,6 +41,8 @@ +@@ -42,6 +42,8 @@ #define USB_TP_TRANSMISSION_DELAY 40 /* ns */ #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */ @@ -685,7 +649,7 @@ index 1fb266809966..db145cb734e4 100644 /* Protect struct usb_device->state and ->children members * Note: Both are also protected by ->dev.sem, except that ->state can * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */ -@@ -4881,6 +4883,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, +@@ -4981,6 +4983,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, goto done; return; } @@ -699,7 +663,7 @@ index 1fb266809966..db145cb734e4 100644 unit_load = 150; else diff --git a/fs/exec.c b/fs/exec.c -index bdd0eacefdf5..20908a84550c 100644 +index 2e0033348d8e..003933540718 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -62,6 +62,7 @@ @@ -710,7 +674,7 @@ index bdd0eacefdf5..20908a84550c 100644 #include <linux/uaccess.h> #include <asm/mmu_context.h> -@@ -320,6 +321,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm) +@@ -275,6 +276,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm) arch_bprm_mm_init(mm, vma); up_write(&mm->mmap_sem); bprm->p = vma->vm_end - sizeof(void *); @@ -720,17 +684,21 @@ index bdd0eacefdf5..20908a84550c 100644 err: up_write(&mm->mmap_sem); diff --git a/fs/namei.c b/fs/namei.c -index 734cef54fdf8..8e3b3ae0cf30 100644 +index dede0147b3f6..c708a5887276 100644 --- a/fs/namei.c +++ b/fs/namei.c -@@ -885,8 +885,8 @@ static inline void put_link(struct nameidata *nd) +@@ -883,10 +883,10 @@ static inline void put_link(struct nameidata *nd) path_put(&last->link); } -int sysctl_protected_symlinks __read_mostly = 0; -int sysctl_protected_hardlinks __read_mostly = 0; +-int sysctl_protected_fifos __read_mostly; +-int sysctl_protected_regular __read_mostly; +int sysctl_protected_symlinks __read_mostly = 1; +int sysctl_protected_hardlinks __read_mostly = 1; ++int sysctl_protected_fifos __read_mostly = 2; ++int sysctl_protected_regular __read_mostly = 2; /** * may_follow_link - Check symlink following for unsafe situations @@ -744,10 +712,10 @@ index 5f93cfacb3d1..cea0d7d3b23e 100644 select CRC32 - default y diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig -index 0eaeb41453f5..8cd1e66aa408 100644 +index 817c02b13b1d..b8cd62b5cbc3 100644 --- a/fs/proc/Kconfig +++ b/fs/proc/Kconfig -@@ -39,7 +39,6 @@ config PROC_KCORE +@@ -40,7 +40,6 @@ config PROC_KCORE config PROC_VMCORE bool "/proc/vmcore support" depends on PROC_FS && CRASH_DUMP @@ -756,7 +724,7 @@ index 0eaeb41453f5..8cd1e66aa408 100644 Exports the dump image of crashed kernel in ELF format. diff --git a/fs/stat.c b/fs/stat.c -index f8e6fb2c3657..240c1432e18f 100644 +index c38e4c2e1221..6135fbaf7298 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -40,8 +40,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat) @@ -775,10 +743,10 @@ index f8e6fb2c3657..240c1432e18f 100644 stat->ctime = inode->i_ctime; stat->blksize = i_blocksize(inode); stat->blocks = inode->i_blocks; -@@ -75,9 +80,14 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat, - stat->result_mask |= STATX_BASIC_STATS; - request_mask &= STATX_ALL; - query_flags &= KSTAT_QUERY_FLAGS; +@@ -77,9 +82,14 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat, + if (IS_AUTOMOUNT(inode)) + stat->attributes |= STATX_ATTR_AUTOMOUNT; + - if (inode->i_op->getattr) - return inode->i_op->getattr(path, stat, request_mask, - query_flags); @@ -807,18 +775,18 @@ index 750621e41d1c..e7157c18c62c 100644 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) #endif diff --git a/include/linux/capability.h b/include/linux/capability.h -index f640dcbc880c..2b4f5d651f19 100644 +index ecce0f43c73a..e46306dd4401 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h -@@ -207,6 +207,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap); +@@ -208,6 +208,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap); extern bool has_ns_capability_noaudit(struct task_struct *t, struct user_namespace *ns, int cap); extern bool capable(int cap); +extern bool capable_noaudit(int cap); extern bool ns_capable(struct user_namespace *ns, int cap); extern bool ns_capable_noaudit(struct user_namespace *ns, int cap); - #else -@@ -232,6 +233,10 @@ static inline bool capable(int cap) + extern bool ns_capable_setid(struct user_namespace *ns, int cap); +@@ -234,6 +235,10 @@ static inline bool capable(int cap) { return true; } @@ -830,12 +798,12 @@ index f640dcbc880c..2b4f5d651f19 100644 { return true; diff --git a/include/linux/fs.h b/include/linux/fs.h -index 805bf22898cf..e3a036f29e69 100644 +index dd28e7679089..cc0030c688d1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h -@@ -3438,4 +3438,15 @@ static inline bool dir_relax_shared(struct inode *inode) - extern bool path_noexec(const struct path *path); - extern void inode_nohighmem(struct inode *inode); +@@ -3534,4 +3534,15 @@ static inline struct sock *io_uring_get_socket(struct file *file) + } + #endif +extern int device_sidechannel_restrict; + @@ -850,11 +818,11 @@ index 805bf22898cf..e3a036f29e69 100644 + #endif /* _LINUX_FS_H */ diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h -index bdaf22582f6e..326ff15d4637 100644 +index e30d6132c633..d62017d489fa 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h -@@ -181,6 +181,9 @@ static inline void fsnotify_access(struct file *file) - struct inode *inode = path->dentry->d_inode; +@@ -207,6 +207,9 @@ static inline void fsnotify_access(struct file *file) + struct inode *inode = file_inode(file); __u32 mask = FS_ACCESS; + if (is_sidechannel_device(inode)) @@ -863,8 +831,8 @@ index bdaf22582f6e..326ff15d4637 100644 if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; -@@ -199,6 +202,9 @@ static inline void fsnotify_modify(struct file *file) - struct inode *inode = path->dentry->d_inode; +@@ -223,6 +226,9 @@ static inline void fsnotify_modify(struct file *file) + struct inode *inode = file_inode(file); __u32 mask = FS_MODIFY; + if (is_sidechannel_device(inode)) @@ -874,10 +842,10 @@ index bdaf22582f6e..326ff15d4637 100644 mask |= FS_ISDIR; diff --git a/include/linux/gfp.h b/include/linux/gfp.h -index a6afcec53795..dea3241398bb 100644 +index fdab7de7490d..13755aff72ab 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h -@@ -513,9 +513,9 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, +@@ -530,9 +530,9 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); @@ -890,10 +858,10 @@ index a6afcec53795..dea3241398bb 100644 #define __get_free_page(gfp_mask) \ __get_free_pages((gfp_mask), 0) diff --git a/include/linux/highmem.h b/include/linux/highmem.h -index 0690679832d4..b9394bc86fad 100644 +index ea5cdbd8c2c3..805b84d6bbca 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h -@@ -191,6 +191,13 @@ static inline void clear_highpage(struct page *page) +@@ -215,6 +215,13 @@ static inline void clear_highpage(struct page *page) kunmap_atomic(kaddr); } @@ -908,10 +876,10 @@ index 0690679832d4..b9394bc86fad 100644 unsigned start1, unsigned end1, unsigned start2, unsigned end2) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index eeceac3376fc..78ad558bce5f 100644 +index 690b238a44d5..06e831f45016 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h -@@ -490,7 +490,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS]; +@@ -535,7 +535,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS]; struct softirq_action { @@ -920,7 +888,7 @@ index eeceac3376fc..78ad558bce5f 100644 }; asmlinkage void do_softirq(void); -@@ -505,7 +505,7 @@ static inline void do_softirq_own_stack(void) +@@ -550,7 +550,7 @@ static inline void do_softirq_own_stack(void) } #endif @@ -943,10 +911,10 @@ index 069aa2ebef90..cb9e3637a620 100644 const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); diff --git a/include/linux/mm.h b/include/linux/mm.h -index 68a5121694ef..4f6c2e842744 100644 +index 6b10c21630f5..e992a6c03666 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h -@@ -570,7 +570,7 @@ static inline int is_vmalloc_or_module_addr(const void *x) +@@ -599,7 +599,7 @@ static inline int is_vmalloc_or_module_addr(const void *x) } #endif @@ -956,7 +924,7 @@ index 68a5121694ef..4f6c2e842744 100644 { return kvmalloc_node(size, flags, NUMA_NO_NODE); diff --git a/include/linux/percpu.h b/include/linux/percpu.h -index 296bbe49d5d1..b26652c9a98d 100644 +index 70b7123f38c7..09f3019489b2 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -129,7 +129,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, @@ -980,10 +948,10 @@ index 296bbe49d5d1..b26652c9a98d 100644 extern phys_addr_t per_cpu_ptr_to_phys(void *addr); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h -index 87f6db437e4a..bbcd76ec7d6e 100644 +index 1f678f023850..54a9effb6831 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h -@@ -1179,6 +1179,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, +@@ -1205,6 +1205,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, int perf_event_max_stack_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); @@ -996,10 +964,10 @@ index 87f6db437e4a..bbcd76ec7d6e 100644 { return sysctl_perf_event_paranoid > -1; diff --git a/include/linux/slab.h b/include/linux/slab.h -index 14e3fe4bd6a1..2b1d16e90e75 100644 +index 9449b19c5f10..f7beb5b69b60 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h -@@ -178,8 +178,8 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *); +@@ -180,8 +180,8 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *); /* * Common kmalloc functions provided by all allocators */ @@ -1010,7 +978,7 @@ index 14e3fe4bd6a1..2b1d16e90e75 100644 void kfree(const void *); void kzfree(const void *); size_t ksize(const void *); -@@ -352,7 +352,7 @@ static __always_inline unsigned int kmalloc_index(size_t size) +@@ -385,7 +385,7 @@ static __always_inline unsigned int kmalloc_index(size_t size) } #endif /* !CONFIG_SLOB */ @@ -1019,7 +987,7 @@ index 14e3fe4bd6a1..2b1d16e90e75 100644 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *, void *); -@@ -376,7 +376,7 @@ static __always_inline void kfree_bulk(size_t size, void **p) +@@ -409,7 +409,7 @@ static __always_inline void kfree_bulk(size_t size, void **p) } #ifdef CONFIG_NUMA @@ -1028,16 +996,16 @@ index 14e3fe4bd6a1..2b1d16e90e75 100644 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; #else static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) -@@ -498,7 +498,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) - * for general use, and so are not documented here. For a full list of - * potential flags, always refer to linux/gfp.h. +@@ -530,7 +530,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) + * Try really hard to succeed the allocation but fail + * eventually. */ -static __always_inline void *kmalloc(size_t size, gfp_t flags) +static __always_inline __attribute__((alloc_size(1))) void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { - if (size > KMALLOC_MAX_CACHE_SIZE) -@@ -538,7 +538,7 @@ static __always_inline unsigned int kmalloc_size(unsigned int n) + #ifndef CONFIG_SLOB +@@ -572,7 +572,7 @@ static __always_inline unsigned int kmalloc_size(unsigned int n) return 0; } @@ -1047,7 +1015,7 @@ index 14e3fe4bd6a1..2b1d16e90e75 100644 #ifndef CONFIG_SLOB if (__builtin_constant_p(size) && diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h -index 3a1a1dbc6f49..ff38fec9eb76 100644 +index d2153789bd9f..97da977d6060 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -121,6 +121,11 @@ struct kmem_cache { @@ -1063,10 +1031,10 @@ index 3a1a1dbc6f49..ff38fec9eb76 100644 /* * Defragmentation by allocating from a remote node. diff --git a/include/linux/string.h b/include/linux/string.h -index 4a5a0eb7df51..be86cf21d0ce 100644 +index 6ab0a6fa512e..d3c5b10a4102 100644 --- a/include/linux/string.h +++ b/include/linux/string.h -@@ -235,10 +235,16 @@ void __read_overflow2(void) __compiletime_error("detected read beyond size of ob +@@ -245,10 +245,16 @@ void __read_overflow2(void) __compiletime_error("detected read beyond size of ob void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter"); void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); @@ -1084,7 +1052,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 if (__builtin_constant_p(size) && p_size < size) __write_overflow(); if (p_size < size) -@@ -248,7 +254,7 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) +@@ -258,7 +264,7 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) __FORTIFY_INLINE char *strcat(char *p, const char *q) { @@ -1093,7 +1061,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 if (p_size == (size_t)-1) return __builtin_strcat(p, q); if (strlcat(p, q, p_size) >= p_size) -@@ -259,7 +265,7 @@ __FORTIFY_INLINE char *strcat(char *p, const char *q) +@@ -269,7 +275,7 @@ __FORTIFY_INLINE char *strcat(char *p, const char *q) __FORTIFY_INLINE __kernel_size_t strlen(const char *p) { __kernel_size_t ret; @@ -1102,7 +1070,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 /* Work around gcc excess stack consumption issue */ if (p_size == (size_t)-1 || -@@ -274,7 +280,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) +@@ -284,7 +290,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen); __FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen) { @@ -1111,7 +1079,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size); if (p_size <= ret && maxlen != ret) fortify_panic(__func__); -@@ -286,8 +292,8 @@ extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); +@@ -296,8 +302,8 @@ extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) { size_t ret; @@ -1122,7 +1090,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 if (p_size == (size_t)-1 && q_size == (size_t)-1) return __real_strlcpy(p, q, size); ret = strlen(q); -@@ -307,8 +313,8 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) +@@ -317,8 +323,8 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) { size_t p_len, copy_len; @@ -1133,7 +1101,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 if (p_size == (size_t)-1 && q_size == (size_t)-1) return __builtin_strncat(p, q, count); p_len = strlen(p); -@@ -421,8 +427,8 @@ __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp) +@@ -431,8 +437,8 @@ __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp) /* defined after fortified strlen and memcpy to reuse them */ __FORTIFY_INLINE char *strcpy(char *p, const char *q) { @@ -1145,7 +1113,7 @@ index 4a5a0eb7df51..be86cf21d0ce 100644 return __builtin_strcpy(p, q); memcpy(p, q, strlen(q) + 1); diff --git a/include/linux/tty.h b/include/linux/tty.h -index c56e3978b00f..1625c85f31f0 100644 +index bfa4e2ee94a9..3e18d583fc8d 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -14,6 +14,7 @@ @@ -1208,10 +1176,10 @@ index 398e9c95cd61..baab7195306a 100644 extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags); static inline void *__vmalloc_node_flags_caller(unsigned long size, int node, diff --git a/init/Kconfig b/init/Kconfig -index 041f3a022122..0ddcf8b083d4 100644 +index 4592bf7997c0..2a5dfc8ed04f 100644 --- a/init/Kconfig +++ b/init/Kconfig -@@ -294,6 +294,7 @@ config USELIB +@@ -342,6 +342,7 @@ config USELIB config AUDIT bool "Auditing support" depends on NET @@ -1219,7 +1187,7 @@ index 041f3a022122..0ddcf8b083d4 100644 help Enable auditing infrastructure that can be used with another kernel subsystem, such as SELinux (which requires this for -@@ -1036,6 +1037,12 @@ config CC_OPTIMIZE_FOR_SIZE +@@ -1139,6 +1140,12 @@ config CC_OPTIMIZE_FOR_SIZE endchoice @@ -1232,7 +1200,7 @@ index 041f3a022122..0ddcf8b083d4 100644 config HAVE_LD_DEAD_CODE_DATA_ELIMINATION bool help -@@ -1319,8 +1326,7 @@ config SHMEM +@@ -1425,8 +1432,7 @@ config SHMEM which may be appropriate on small systems without swap. config AIO @@ -1242,7 +1210,7 @@ index 041f3a022122..0ddcf8b083d4 100644 help This option enables POSIX asynchronous I/O which may by used by some high performance threaded applications. Disabling -@@ -1549,7 +1555,7 @@ config VM_EVENT_COUNTERS +@@ -1652,7 +1658,7 @@ config VM_EVENT_COUNTERS config SLUB_DEBUG default y @@ -1251,7 +1219,7 @@ index 041f3a022122..0ddcf8b083d4 100644 depends on SLUB && SYSFS help SLUB has extensive debug support features. Disabling these can -@@ -1573,7 +1579,6 @@ config SLUB_MEMCG_SYSFS_ON +@@ -1676,7 +1682,6 @@ config SLUB_MEMCG_SYSFS_ON config COMPAT_BRK bool "Disable heap randomization" @@ -1259,7 +1227,7 @@ index 041f3a022122..0ddcf8b083d4 100644 help Randomizing heap placement makes heap exploits harder, but it also breaks ancient binaries (including anything libc5 based). -@@ -1620,7 +1625,6 @@ endchoice +@@ -1723,7 +1728,6 @@ endchoice config SLAB_MERGE_DEFAULT bool "Allow slab caches to be merged" @@ -1267,7 +1235,7 @@ index 041f3a022122..0ddcf8b083d4 100644 help For reduced kernel memory fragmentation, slab caches can be merged when they share the same size and other characteristics. -@@ -1633,9 +1637,9 @@ config SLAB_MERGE_DEFAULT +@@ -1736,9 +1740,9 @@ config SLAB_MERGE_DEFAULT command line. config SLAB_FREELIST_RANDOM @@ -1278,7 +1246,7 @@ index 041f3a022122..0ddcf8b083d4 100644 help Randomizes the freelist order used on creating new pages. This security feature reduces the predictability of the kernel slab -@@ -1644,12 +1648,56 @@ config SLAB_FREELIST_RANDOM +@@ -1747,12 +1751,56 @@ config SLAB_FREELIST_RANDOM config SLAB_FREELIST_HARDENED bool "Harden slab freelist metadata" depends on SLUB @@ -1336,10 +1304,10 @@ index 041f3a022122..0ddcf8b083d4 100644 default y depends on SLUB && SMP diff --git a/kernel/audit.c b/kernel/audit.c -index e7478cb58079..69be132956df 100644 +index c89ea48c70a6..eeac2e76739d 100644 --- a/kernel/audit.c +++ b/kernel/audit.c -@@ -1631,6 +1631,9 @@ static int __init audit_enable(char *str) +@@ -1641,6 +1641,9 @@ static int __init audit_enable(char *str) if (audit_default == AUDIT_OFF) audit_initialized = AUDIT_DISABLED; @@ -1350,23 +1318,23 @@ index e7478cb58079..69be132956df 100644 pr_err("audit: error setting audit state (%d)\n", audit_default); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c -index 1e5625d46414..71cac92b6629 100644 +index 06ba9c5f156b..135eaaeff0b5 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c -@@ -367,7 +367,7 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) +@@ -521,7 +521,7 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) #ifdef CONFIG_BPF_JIT /* All BPF JIT sysctl knobs here. */ int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); -int bpf_jit_harden __read_mostly; +int bpf_jit_harden __read_mostly = 2; int bpf_jit_kallsyms __read_mostly; + long bpf_jit_limit __read_mostly; - static __always_inline void diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c -index a31a1ba0f8ea..1d5093b51c63 100644 +index db6e825e2958..9b8d4b5368b1 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c -@@ -48,7 +48,7 @@ static DEFINE_SPINLOCK(prog_idr_lock); +@@ -47,7 +47,7 @@ static DEFINE_SPINLOCK(prog_idr_lock); static DEFINE_IDR(map_idr); static DEFINE_SPINLOCK(map_idr_lock); @@ -1376,10 +1344,10 @@ index a31a1ba0f8ea..1d5093b51c63 100644 static const struct bpf_map_ops * const bpf_map_types[] = { #define BPF_PROG_TYPE(_id, _ops) diff --git a/kernel/capability.c b/kernel/capability.c -index 1e1c0236f55b..452062fe45ce 100644 +index 1444f3954d75..8cc9dd7992f2 100644 --- a/kernel/capability.c +++ b/kernel/capability.c -@@ -431,6 +431,12 @@ bool capable(int cap) +@@ -449,6 +449,12 @@ bool capable(int cap) return ns_capable(&init_user_ns, cap); } EXPORT_SYMBOL(capable); @@ -1393,10 +1361,10 @@ index 1e1c0236f55b..452062fe45ce 100644 /** diff --git a/kernel/events/core.c b/kernel/events/core.c -index eec2d5fb676b..9040756bbb0a 100644 +index dc7dead2d2cc..e078b1fec819 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c -@@ -397,8 +397,13 @@ static cpumask_var_t perf_online_mask; +@@ -398,8 +398,13 @@ static cpumask_var_t perf_online_mask; * 0 - disallow raw tracepoint access for unpriv * 1 - disallow cpu events for unpriv * 2 - disallow kernel profiling for unpriv @@ -1410,7 +1378,7 @@ index eec2d5fb676b..9040756bbb0a 100644 /* Minimum for 512 kiB + 1 user control page */ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ -@@ -10408,6 +10413,9 @@ SYSCALL_DEFINE5(perf_event_open, +@@ -10748,6 +10753,9 @@ SYSCALL_DEFINE5(perf_event_open, if (flags & ~PERF_FLAG_ALL) return -EINVAL; @@ -1421,7 +1389,7 @@ index eec2d5fb676b..9040756bbb0a 100644 if (err) return err; diff --git a/kernel/fork.c b/kernel/fork.c -index 1b27babc4c78..a88dd3ccd31c 100644 +index 2628f3773ca8..a2da35b446a6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -103,6 +103,11 @@ @@ -1436,7 +1404,7 @@ index 1b27babc4c78..a88dd3ccd31c 100644 /* * Minimum number of threads to boot the kernel -@@ -1624,6 +1629,10 @@ static __latent_entropy struct task_struct *copy_process( +@@ -1719,6 +1724,10 @@ static __latent_entropy struct task_struct *copy_process( if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); @@ -1447,7 +1415,7 @@ index 1b27babc4c78..a88dd3ccd31c 100644 /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. -@@ -2420,6 +2429,12 @@ int ksys_unshare(unsigned long unshare_flags) +@@ -2554,6 +2563,12 @@ int ksys_unshare(unsigned long unshare_flags) if (unshare_flags & CLONE_NEWNS) unshare_flags |= CLONE_FS; @@ -1461,10 +1429,10 @@ index 1b27babc4c78..a88dd3ccd31c 100644 if (err) goto bad_unshare_out; diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c -index 3d37c279c090..0789ca413f09 100644 +index f08a1e4ee1d4..ece99ca24ed0 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c -@@ -1138,7 +1138,7 @@ void free_basic_memory_bitmaps(void) +@@ -1142,7 +1142,7 @@ void free_basic_memory_bitmaps(void) void clear_free_pages(void) { @@ -1473,7 +1441,7 @@ index 3d37c279c090..0789ca413f09 100644 struct memory_bitmap *bm = free_pages_map; unsigned long pfn; -@@ -1155,7 +1155,7 @@ void clear_free_pages(void) +@@ -1159,7 +1159,7 @@ void clear_free_pages(void) } memory_bm_position_reset(bm); pr_info("free pages cleared after restore\n"); @@ -1483,36 +1451,36 @@ index 3d37c279c090..0789ca413f09 100644 /** diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c -index a64eee0db39e..4d7de378fe4c 100644 +index 911bd9076d43..b65e2ee716c4 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c -@@ -164,7 +164,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) - } +@@ -74,7 +74,7 @@ void rcu_sched_clock_irq(int user) } + /* Invoke the RCU callbacks whose grace period has elapsed. */ -static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) +static __latent_entropy void rcu_process_callbacks(void) { - __rcu_process_callbacks(&rcu_sched_ctrlblk); - __rcu_process_callbacks(&rcu_bh_ctrlblk); + struct rcu_head *next, *list; + unsigned long flags; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c -index aa7cade1b9f3..d6be47e1c86f 100644 +index acd6ccf56faf..17db4829d2c0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c -@@ -2791,7 +2791,7 @@ __rcu_process_callbacks(struct rcu_state *rsp) - /* - * Do RCU core processing for the current CPU. - */ --static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) -+static __latent_entropy void rcu_process_callbacks(void) - { - struct rcu_state *rsp; +@@ -2731,7 +2731,7 @@ void rcu_fwd_progress_check(unsigned long j) + EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); + /* Perform RCU core processing work for the current CPU. */ +-static __latent_entropy void rcu_core(struct softirq_action *unused) ++static __latent_entropy void rcu_core(void) + { + unsigned long flags; + struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 9c219f7b0970..963a68e64593 100644 +index 232491e3ed0d..194d10702841 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -9862,7 +9862,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) +@@ -10117,7 +10117,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) * run_rebalance_domains is triggered when needed from the scheduler tick. * Also triggered for nohz idle balancing (with nohz_balancing_kick set). */ @@ -1522,7 +1490,7 @@ index 9c219f7b0970..963a68e64593 100644 struct rq *this_rq = this_rq(); enum cpu_idle_type idle = this_rq->idle_balance ? diff --git a/kernel/softirq.c b/kernel/softirq.c -index 6f584861d329..1943fe60f3b9 100644 +index 10277429ed84..d1323bdfc20c 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -53,7 +53,7 @@ DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); @@ -1534,7 +1502,7 @@ index 6f584861d329..1943fe60f3b9 100644 DEFINE_PER_CPU(struct task_struct *, ksoftirqd); -@@ -289,7 +289,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) +@@ -290,7 +290,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) kstat_incr_softirqs_this_cpu(vec_nr); trace_softirq_entry(vec_nr); @@ -1543,7 +1511,7 @@ index 6f584861d329..1943fe60f3b9 100644 trace_softirq_exit(vec_nr); if (unlikely(prev_count != preempt_count())) { pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", -@@ -451,7 +451,7 @@ void __raise_softirq_irqoff(unsigned int nr) +@@ -453,7 +453,7 @@ void __raise_softirq_irqoff(unsigned int nr) or_softirq_pending(1UL << nr); } @@ -1552,7 +1520,7 @@ index 6f584861d329..1943fe60f3b9 100644 { softirq_vec[nr].action = action; } -@@ -497,8 +497,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) +@@ -499,8 +499,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) } EXPORT_SYMBOL(__tasklet_hi_schedule); @@ -1562,7 +1530,7 @@ index 6f584861d329..1943fe60f3b9 100644 unsigned int softirq_nr) { struct tasklet_struct *list; -@@ -535,14 +534,14 @@ static void tasklet_action_common(struct softirq_action *a, +@@ -537,14 +536,14 @@ static void tasklet_action_common(struct softirq_action *a, } } @@ -1582,18 +1550,18 @@ index 6f584861d329..1943fe60f3b9 100644 void tasklet_init(struct tasklet_struct *t, diff --git a/kernel/sysctl.c b/kernel/sysctl.c -index 2d9837c0aff4..852fea68d574 100644 +index 387efbaf464a..238a6d0c25dc 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c -@@ -67,6 +67,7 @@ +@@ -66,6 +66,7 @@ + #include <linux/kexec.h> #include <linux/bpf.h> #include <linux/mount.h> - #include <linux/pipe_fs_i.h> +#include <linux/tty.h> - #include <linux/uaccess.h> - #include <asm/processor.h> -@@ -99,12 +100,19 @@ + #include "../lib/kstrtox.h" + +@@ -102,12 +103,19 @@ #if defined(CONFIG_SYSCTL) /* External variables not in a header file. */ @@ -1613,7 +1581,7 @@ index 2d9837c0aff4..852fea68d574 100644 extern int pid_max; extern int pid_max_min, pid_max_max; extern int percpu_pagelist_fraction; -@@ -116,40 +124,43 @@ extern int sysctl_nr_trim_pages; +@@ -119,35 +127,35 @@ extern int sysctl_nr_trim_pages; /* Constants used for minimum and maximum */ #ifdef CONFIG_LOCKUP_DETECTOR @@ -1624,17 +1592,22 @@ index 2d9837c0aff4..852fea68d574 100644 -static int __maybe_unused neg_one = -1; +static int __maybe_unused neg_one __read_only = -1; - static int zero; +-static int zero; -static int __maybe_unused one = 1; -static int __maybe_unused two = 2; -static int __maybe_unused four = 4; +-static unsigned long zero_ul; -static unsigned long one_ul = 1; +-static unsigned long long_max = LONG_MAX; -static int one_hundred = 100; -static int one_thousand = 1000; ++static int zero __read_only; +static int __maybe_unused one __read_only = 1; +static int __maybe_unused two __read_only = 2; +static int __maybe_unused four __read_only = 4; ++static unsigned long zero_ul __read_only; +static unsigned long one_ul __read_only = 1; ++static unsigned long long_max __read_only = LONG_MAX; +static int one_hundred __read_only = 100; +static int one_thousand __read_only = 1000; #ifdef CONFIG_PRINTK @@ -1660,7 +1633,10 @@ index 2d9837c0aff4..852fea68d574 100644 +static int ngroups_max __read_only = NGROUPS_MAX; static const int cap_last_cap = CAP_LAST_CAP; - /*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */ + /* +@@ -155,9 +163,12 @@ static const int cap_last_cap = CAP_LAST_CAP; + * and hung_task_check_interval_secs + */ #ifdef CONFIG_DETECT_HUNG_TASK -static unsigned long hung_task_timeout_max = (LONG_MAX/HZ); +static unsigned long hung_task_timeout_max __read_only = (LONG_MAX/HZ); @@ -1672,7 +1648,7 @@ index 2d9837c0aff4..852fea68d574 100644 #ifdef CONFIG_INOTIFY_USER #include <linux/inotify.h> #endif -@@ -293,19 +304,19 @@ static struct ctl_table sysctl_base_table[] = { +@@ -306,19 +317,19 @@ static struct ctl_table sysctl_base_table[] = { }; #ifdef CONFIG_SCHED_DEBUG @@ -1700,7 +1676,7 @@ index 2d9837c0aff4..852fea68d574 100644 #endif static struct ctl_table kern_table[] = { -@@ -519,6 +530,15 @@ static struct ctl_table kern_table[] = { +@@ -535,6 +546,15 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif @@ -1716,13 +1692,13 @@ index 2d9837c0aff4..852fea68d574 100644 #ifdef CONFIG_PROC_SYSCTL { .procname = "tainted", -@@ -867,6 +887,37 @@ static struct ctl_table kern_table[] = { +@@ -890,6 +910,37 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &two, }, +#endif +#if defined CONFIG_TTY -+ { ++ { + .procname = "tiocsti_restrict", + .data = &tiocsti_restrict, + .maxlen = sizeof(int), @@ -1730,7 +1706,7 @@ index 2d9837c0aff4..852fea68d574 100644 + .proc_handler = proc_dointvec_minmax_sysadmin, + .extra1 = &zero, + .extra2 = &one, -+ }, ++ }, +#endif + { + .procname = "device_sidechannel_restrict", @@ -1755,10 +1731,10 @@ index 2d9837c0aff4..852fea68d574 100644 { .procname = "ngroups_max", diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 3e93c54bd3a1..0fed811ed850 100644 +index 41dfff23c1f9..298a1554c3e4 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c -@@ -1462,7 +1462,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, +@@ -1453,7 +1453,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, } } @@ -1768,10 +1744,10 @@ index 3e93c54bd3a1..0fed811ed850 100644 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); unsigned long flags; diff --git a/kernel/time/timer.c b/kernel/time/timer.c -index cc2d23e6ff61..438be7646454 100644 +index 2fce056f8a49..acac240068c1 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c -@@ -1672,7 +1672,7 @@ static inline void __run_timers(struct timer_base *base) +@@ -1687,7 +1687,7 @@ static inline void __run_timers(struct timer_base *base) /* * This function runs timers and the timer-tq in bottom half context. */ @@ -1781,7 +1757,7 @@ index cc2d23e6ff61..438be7646454 100644 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c -index c3d7583fcd21..6ee37e516869 100644 +index 923414a246e9..6b9dbc257e34 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -26,6 +26,9 @@ @@ -1795,10 +1771,20 @@ index c3d7583fcd21..6ee37e516869 100644 static DEFINE_MUTEX(userns_state_mutex); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug -index 8838d1158d19..a208770a0e9e 100644 +index d5a4a4036d2f..b16d39c4c407 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug -@@ -945,6 +945,7 @@ endmenu # "Debug lockups and hangs" +@@ -350,6 +350,9 @@ config SECTION_MISMATCH_WARN_ONLY + + If unsure, say Y. + ++config DEBUG_WRITABLE_FUNCTION_POINTERS_VERBOSE ++ bool "Enable verbose reporting of writable function pointers" ++ + # + # Select this config option from the architecture Kconfig, if it + # is preferred to always offer frame pointers as a config +@@ -966,6 +969,7 @@ endmenu # "Debug lockups and hangs" config PANIC_ON_OOPS bool "Panic on Oops" @@ -1806,7 +1792,7 @@ index 8838d1158d19..a208770a0e9e 100644 help Say Y here to enable the kernel to panic when it oopses. This has the same effect as setting oops=panic on the kernel command -@@ -954,7 +955,7 @@ config PANIC_ON_OOPS +@@ -975,7 +979,7 @@ config PANIC_ON_OOPS anything erroneous after an oops which could result in data corruption or other issues. @@ -1815,7 +1801,7 @@ index 8838d1158d19..a208770a0e9e 100644 config PANIC_ON_OOPS_VALUE int -@@ -1323,6 +1324,7 @@ config DEBUG_BUGVERBOSE +@@ -1344,6 +1348,7 @@ config DEBUG_BUGVERBOSE config DEBUG_LIST bool "Debug linked list manipulation" depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION @@ -1823,7 +1809,7 @@ index 8838d1158d19..a208770a0e9e 100644 help Enable this to turn on extended checks in the linked-list walking routines. -@@ -1983,6 +1985,7 @@ config MEMTEST +@@ -2026,6 +2031,7 @@ config MEMTEST config BUG_ON_DATA_CORRUPTION bool "Trigger a BUG when data corruption is detected" select DEBUG_LIST @@ -1831,7 +1817,7 @@ index 8838d1158d19..a208770a0e9e 100644 help Select this option if the kernel should BUG when it encounters data corruption in kernel memory structures when they get checked -@@ -2022,6 +2025,7 @@ config STRICT_DEVMEM +@@ -2065,6 +2071,7 @@ config STRICT_DEVMEM config IO_STRICT_DEVMEM bool "Filter I/O access to /dev/mem" depends on STRICT_DEVMEM @@ -1840,7 +1826,7 @@ index 8838d1158d19..a208770a0e9e 100644 If this option is disabled, you allow userspace (root) access to all io-memory regardless of whether a driver is actively using that diff --git a/lib/irq_poll.c b/lib/irq_poll.c -index 86a709954f5a..6f15787fcb1b 100644 +index 2f17b488d58e..b6e7996a0058 100644 --- a/lib/irq_poll.c +++ b/lib/irq_poll.c @@ -75,7 +75,7 @@ void irq_poll_complete(struct irq_poll *iop) @@ -1853,10 +1839,10 @@ index 86a709954f5a..6f15787fcb1b 100644 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll); int rearm = 0, budget = irq_poll_budget; diff --git a/lib/kobject.c b/lib/kobject.c -index 18989b5b3b56..bd46da8243a6 100644 +index aa89edcd2b63..c505d13ba323 100644 --- a/lib/kobject.c +++ b/lib/kobject.c -@@ -952,9 +952,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add); +@@ -978,9 +978,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add); static DEFINE_SPINLOCK(kobj_ns_type_lock); @@ -1869,10 +1855,10 @@ index 18989b5b3b56..bd46da8243a6 100644 enum kobj_ns_type type = ops->type; int error; diff --git a/lib/nlattr.c b/lib/nlattr.c -index dfa55c873c13..c6b0436f473d 100644 +index d26de6156b97..ed11787fcfe7 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c -@@ -364,6 +364,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count) +@@ -539,6 +539,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count) { int minlen = min_t(int, count, nla_len(src)); @@ -1882,11 +1868,11 @@ index dfa55c873c13..c6b0436f473d 100644 if (count > minlen) memset(dest + minlen, 0, count - minlen); diff --git a/lib/vsprintf.c b/lib/vsprintf.c -index a48aaa79d352..a57213b70cad 100644 +index 791b6fa36905..6d6a3ad3bf0f 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c -@@ -1371,7 +1371,7 @@ char *pointer_string(char *buf, char *end, const void *ptr, - return number(buf, end, (unsigned long int)ptr, spec); +@@ -1476,7 +1476,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr, + return string(buf, end, uuid, spec); } -int kptr_restrict __read_mostly; @@ -1895,10 +1881,10 @@ index a48aaa79d352..a57213b70cad 100644 static noinline_for_stack char *restricted_pointer(char *buf, char *end, const void *ptr, diff --git a/mm/Kconfig b/mm/Kconfig -index ce95491abd6a..19b62893da3d 100644 +index 2e6d24d783f7..c378add17049 100644 --- a/mm/Kconfig +++ b/mm/Kconfig -@@ -312,7 +312,8 @@ config KSM +@@ -306,7 +306,8 @@ config KSM config DEFAULT_MMAP_MIN_ADDR int "Low address space to protect from user allocation" depends on MMU @@ -1909,10 +1895,10 @@ index ce95491abd6a..19b62893da3d 100644 This is the portion of low virtual memory which should be protected from userspace allocation. Keeping a user from writing to low pages diff --git a/mm/mmap.c b/mm/mmap.c -index 17bbf4d3e24f..40ac799db35b 100644 +index 2d6a6662edb9..e154e7fc1d8a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c -@@ -229,6 +229,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) +@@ -233,6 +233,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) newbrk = PAGE_ALIGN(brk); oldbrk = PAGE_ALIGN(mm->brk); @@ -1923,24 +1909,24 @@ index 17bbf4d3e24f..40ac799db35b 100644 + if (mm->brk == min_brk) + oldbrk -= PAGE_SIZE; + } - if (oldbrk == newbrk) - goto set_brk; - + if (oldbrk == newbrk) { + mm->brk = brk; + goto success; diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index 3222193c46c6..b8e36bed196d 100644 +index 475ca5b1a824..87371ac3ad1e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -67,6 +67,7 @@ - #include <linux/ftrace.h> #include <linux/lockdep.h> #include <linux/nmi.h> + #include <linux/psi.h> +#include <linux/random.h> #include <asm/sections.h> #include <asm/tlbflush.h> -@@ -100,6 +101,15 @@ int _node_numa_mem_[MAX_NUMNODES]; +@@ -104,6 +105,15 @@ struct pcpu_drain { DEFINE_MUTEX(pcpu_drain_mutex); - DEFINE_PER_CPU(struct work_struct, pcpu_drain); + DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain); +bool __meminitdata extra_latent_entropy; + @@ -1954,7 +1940,7 @@ index 3222193c46c6..b8e36bed196d 100644 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY volatile unsigned long latent_entropy __latent_entropy; EXPORT_SYMBOL(latent_entropy); -@@ -1027,6 +1037,13 @@ static __always_inline bool free_pages_prepare(struct page *page, +@@ -1142,6 +1152,13 @@ static __always_inline bool free_pages_prepare(struct page *page, debug_check_no_obj_freed(page_address(page), PAGE_SIZE << order); } @@ -1968,10 +1954,13 @@ index 3222193c46c6..b8e36bed196d 100644 arch_free_page(page, order); kernel_poison_pages(page, 1 << order, 0); kernel_map_pages(page, 1 << order, 0); -@@ -1267,6 +1284,21 @@ static void __init __free_pages_boot_core(struct page *page, unsigned int order) - __ClearPageReserved(p); - set_page_count(p, 0); +@@ -1373,6 +1390,25 @@ static void __free_pages_ok(struct page *page, unsigned int order) + local_irq_restore(flags); + } ++static void __init __gather_extra_latent_entropy(struct page *page, ++ unsigned int nr_pages) ++{ + if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) { + unsigned long hash = 0; + size_t index, end = PAGE_SIZE * nr_pages / sizeof hash; @@ -1986,11 +1975,44 @@ index 3222193c46c6..b8e36bed196d 100644 + add_device_randomness((const void *)&hash, sizeof(hash)); +#endif + } ++} + - page_zone(page)->managed_pages += nr_pages; + void __free_pages_core(struct page *page, unsigned int order) + { + unsigned int nr_pages = 1 << order; +@@ -1387,7 +1423,6 @@ void __free_pages_core(struct page *page, unsigned int order) + } + __ClearPageReserved(p); + set_page_count(p, 0); +- + atomic_long_add(nr_pages, &page_zone(page)->managed_pages); set_page_refcounted(page); __free_pages(page, order); -@@ -1855,8 +1887,8 @@ static inline int check_new_page(struct page *page) +@@ -1452,6 +1487,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn, + { + if (early_page_uninitialised(pfn)) + return; ++ __gather_extra_latent_entropy(page, 1 << order); + __free_pages_core(page, order); + } + +@@ -1542,6 +1578,7 @@ static void __init deferred_free_range(unsigned long pfn, + if (nr_pages == pageblock_nr_pages && + (pfn & (pageblock_nr_pages - 1)) == 0) { + set_pageblock_migratetype(page, MIGRATE_MOVABLE); ++ __gather_extra_latent_entropy(page, 1 << pageblock_order); + __free_pages_core(page, pageblock_order); + return; + } +@@ -1549,6 +1586,7 @@ static void __init deferred_free_range(unsigned long pfn, + for (i = 0; i < nr_pages; i++, page++, pfn++) { + if ((pfn & (pageblock_nr_pages - 1)) == 0) + set_pageblock_migratetype(page, MIGRATE_MOVABLE); ++ __gather_extra_latent_entropy(page, 1); + __free_pages_core(page, 0); + } + } +@@ -1969,8 +2007,8 @@ static inline int check_new_page(struct page *page) static inline bool free_pages_prezeroed(void) { @@ -2001,7 +2023,7 @@ index 3222193c46c6..b8e36bed196d 100644 } #ifdef CONFIG_DEBUG_VM -@@ -1913,6 +1945,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags +@@ -2027,6 +2065,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags post_alloc_hook(page, order, gfp_flags); @@ -2014,10 +2036,10 @@ index 3222193c46c6..b8e36bed196d 100644 for (i = 0; i < (1 << order); i++) clear_highpage(page + i); diff --git a/mm/slab.h b/mm/slab.h -index 68bdf498da3b..079ff5df3b16 100644 +index 43ac818b8592..0f4fc66322c4 100644 --- a/mm/slab.h +++ b/mm/slab.h -@@ -313,7 +313,11 @@ static inline bool is_root_cache(struct kmem_cache *s) +@@ -310,7 +310,11 @@ static inline bool is_root_cache(struct kmem_cache *s) static inline bool slab_equal_or_root(struct kmem_cache *s, struct kmem_cache *p) { @@ -2029,7 +2051,7 @@ index 68bdf498da3b..079ff5df3b16 100644 } static inline const char *cache_name(struct kmem_cache *s) -@@ -365,18 +369,26 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) +@@ -362,18 +366,26 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) * to not do even the assignment. In that case, slab_equal_or_root * will also be a constant. */ @@ -2057,7 +2079,7 @@ index 68bdf498da3b..079ff5df3b16 100644 return s; } -@@ -401,7 +413,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s) +@@ -398,7 +410,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s) * back there or track user information then we can * only use the space before that information. */ @@ -2067,7 +2089,7 @@ index 68bdf498da3b..079ff5df3b16 100644 /* * Else we can use all the padding etc for the allocation diff --git a/mm/slab_common.c b/mm/slab_common.c -index 2296caf87bfb..7abd5a11e12d 100644 +index 58251ba63e4a..fbaacef2acaf 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -27,10 +27,10 @@ @@ -2093,10 +2115,10 @@ index 2296caf87bfb..7abd5a11e12d 100644 static int __init setup_slab_nomerge(char *str) { diff --git a/mm/slub.c b/mm/slub.c -index 51258eff4178..075266bfbccf 100644 +index d30ede89f4a6..37db8891a099 100644 --- a/mm/slub.c +++ b/mm/slub.c -@@ -125,6 +125,16 @@ static inline int kmem_cache_debug(struct kmem_cache *s) +@@ -124,6 +124,16 @@ static inline int kmem_cache_debug(struct kmem_cache *s) #endif } @@ -2113,7 +2135,7 @@ index 51258eff4178..075266bfbccf 100644 void *fixup_red_left(struct kmem_cache *s, void *p) { if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) -@@ -299,6 +309,35 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) +@@ -308,6 +318,35 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); } @@ -2149,7 +2171,7 @@ index 51258eff4178..075266bfbccf 100644 /* Loop over all objects in a slab */ #define for_each_object(__p, __s, __addr, __objects) \ for (__p = fixup_red_left(__s, __addr); \ -@@ -471,13 +510,13 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p) +@@ -475,13 +514,13 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p) * Debug settings: */ #if defined(CONFIG_SLUB_DEBUG_ON) @@ -2167,7 +2189,7 @@ index 51258eff4178..075266bfbccf 100644 /* * slub is about to manipulate internal object metadata. This memory lies -@@ -537,6 +576,9 @@ static struct track *get_track(struct kmem_cache *s, void *object, +@@ -542,6 +581,9 @@ static struct track *get_track(struct kmem_cache *s, void *object, else p = object + s->inuse; @@ -2177,7 +2199,7 @@ index 51258eff4178..075266bfbccf 100644 return p + alloc; } -@@ -676,6 +718,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) +@@ -681,6 +723,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) else off = s->inuse; @@ -2187,7 +2209,7 @@ index 51258eff4178..075266bfbccf 100644 if (s->flags & SLAB_STORE_USER) off += 2 * sizeof(struct track); -@@ -805,6 +850,9 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) +@@ -810,6 +855,9 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) /* Freepointer is placed after the object. */ off += sizeof(void *); @@ -2197,18 +2219,18 @@ index 51258eff4178..075266bfbccf 100644 if (s->flags & SLAB_STORE_USER) /* We also have user information there */ off += 2 * sizeof(struct track); -@@ -1419,8 +1467,9 @@ static void setup_object(struct kmem_cache *s, struct page *page, +@@ -1475,8 +1523,9 @@ static void *setup_object(struct kmem_cache *s, struct page *page, void *object) { setup_object_debug(s, page, object); + set_canary(s, object, s->random_inactive); - kasan_init_slab_obj(s, object); + object = kasan_init_slab_obj(s, object); - if (unlikely(s->ctor)) { + if (unlikely(s->ctor) && !has_sanitize_verify(s)) { kasan_unpoison_object_data(s, object); s->ctor(object); kasan_poison_object_data(s, object); -@@ -2702,9 +2751,21 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, +@@ -2750,9 +2799,21 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, stat(s, ALLOC_FASTPATH); } @@ -2231,7 +2253,7 @@ index 51258eff4178..075266bfbccf 100644 slab_post_alloc_hook(s, gfpflags, 1, &object); return object; -@@ -2911,6 +2972,27 @@ static __always_inline void do_slab_free(struct kmem_cache *s, +@@ -2959,6 +3020,27 @@ static __always_inline void do_slab_free(struct kmem_cache *s, void *tail_obj = tail ? : head; struct kmem_cache_cpu *c; unsigned long tid; @@ -2259,7 +2281,7 @@ index 51258eff4178..075266bfbccf 100644 redo: /* * Determine the currently cpus per cpu slab. -@@ -3087,7 +3169,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, +@@ -3135,7 +3217,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { struct kmem_cache_cpu *c; @@ -2268,7 +2290,7 @@ index 51258eff4178..075266bfbccf 100644 /* memcg and kmem_cache debug support */ s = slab_pre_alloc_hook(s, flags); -@@ -3124,13 +3206,29 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, +@@ -3172,13 +3254,29 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, local_irq_enable(); /* Clear memory outside IRQ disabled fastpath loop */ @@ -2299,7 +2321,7 @@ index 51258eff4178..075266bfbccf 100644 /* memcg and kmem_cache debug support */ slab_post_alloc_hook(s, flags, size, p); return i; -@@ -3162,9 +3260,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk); +@@ -3210,9 +3308,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk); * and increases the number of allocations possible without having to * take the list_lock. */ @@ -2312,15 +2334,15 @@ index 51258eff4178..075266bfbccf 100644 /* * Calculate the order of allocation given an slab object size. -@@ -3336,6 +3434,7 @@ static void early_kmem_cache_node_alloc(int node) +@@ -3380,6 +3478,7 @@ static void early_kmem_cache_node_alloc(int node) init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); init_tracking(kmem_cache_node, n); #endif + set_canary(kmem_cache_node, n, kmem_cache_node->random_active); - kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node), + n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node), GFP_KERNEL); - init_kmem_cache_node(n); -@@ -3492,6 +3591,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) + page->freelist = get_freepointer(kmem_cache_node, n); +@@ -3540,6 +3639,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) size += sizeof(void *); } @@ -2330,7 +2352,7 @@ index 51258eff4178..075266bfbccf 100644 #ifdef CONFIG_SLUB_DEBUG if (flags & SLAB_STORE_USER) /* -@@ -3561,6 +3663,10 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) +@@ -3612,6 +3714,10 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) #ifdef CONFIG_SLAB_FREELIST_HARDENED s->random = get_random_long(); #endif @@ -2341,7 +2363,7 @@ index 51258eff4178..075266bfbccf 100644 if (!calculate_sizes(s, -1)) goto error; -@@ -3837,6 +3943,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, +@@ -3887,6 +3993,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, offset -= s->red_left_pad; } @@ -2350,7 +2372,7 @@ index 51258eff4178..075266bfbccf 100644 /* Allow address range falling entirely within usercopy region. */ if (offset >= s->useroffset && offset - s->useroffset <= s->usersize && -@@ -3870,7 +3978,11 @@ static size_t __ksize(const void *object) +@@ -3920,7 +4028,11 @@ static size_t __ksize(const void *object) page = virt_to_head_page(object); if (unlikely(!PageSlab(page))) { @@ -2362,7 +2384,7 @@ index 51258eff4178..075266bfbccf 100644 return PAGE_SIZE << compound_order(page); } -@@ -4730,7 +4842,7 @@ enum slab_stat_type { +@@ -4777,7 +4889,7 @@ enum slab_stat_type { #define SO_TOTAL (1 << SL_TOTAL) #ifdef CONFIG_MEMCG @@ -2372,10 +2394,10 @@ index 51258eff4178..075266bfbccf 100644 static int __init setup_slub_memcg_sysfs(char *str) { diff --git a/mm/swap.c b/mm/swap.c -index 26fc9b5f1b6c..7c9312ca8982 100644 +index 301ed4e04320..cff1e4d6d04f 100644 --- a/mm/swap.c +++ b/mm/swap.c -@@ -93,6 +93,13 @@ static void __put_compound_page(struct page *page) +@@ -92,6 +92,13 @@ static void __put_compound_page(struct page *page) if (!PageHuge(page)) __page_cache_release(page); dtor = get_compound_page_dtor(page); @@ -2390,10 +2412,10 @@ index 26fc9b5f1b6c..7c9312ca8982 100644 } diff --git a/net/core/dev.c b/net/core/dev.c -index 559a91271f82..1e3b8deea1a7 100644 +index c6b2f6db0a9b..89b55db262a0 100644 --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -4259,7 +4259,7 @@ int netif_rx_ni(struct sk_buff *skb) +@@ -4588,7 +4588,7 @@ int netif_rx_ni(struct sk_buff *skb) } EXPORT_SYMBOL(netif_rx_ni); @@ -2402,7 +2424,7 @@ index 559a91271f82..1e3b8deea1a7 100644 { struct softnet_data *sd = this_cpu_ptr(&softnet_data); -@@ -5811,7 +5811,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) +@@ -6402,7 +6402,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) return work; } @@ -2412,7 +2434,7 @@ index 559a91271f82..1e3b8deea1a7 100644 struct softnet_data *sd = this_cpu_ptr(&softnet_data); unsigned long time_limit = jiffies + diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig -index 80dad301361d..0c85c2a29ac1 100644 +index 32cae39cdff6..9141d7ae99b2 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -266,6 +266,7 @@ config IP_PIMSM_V2 @@ -2423,19 +2445,48 @@ index 80dad301361d..0c85c2a29ac1 100644 ---help--- Normal TCP/IP networking is open to an attack known as "SYN flooding". This denial-of-service attack prevents legitimate remote +diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost +index 6b7f354f189a..360b015678a7 100644 +--- a/scripts/Makefile.modpost ++++ b/scripts/Makefile.modpost +@@ -78,6 +78,7 @@ modpost = scripts/mod/modpost \ + $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \ + $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \ + $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \ ++ $(if $(CONFIG_DEBUG_WRITABLE_FUNCTION_POINTERS_VERBOSE),-f) \ + $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) + + MODPOST_OPT=$(subst -i,-n,$(filter -i,$(MAKEFLAGS))) +diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig +index 74271dba4f94..21c842d26556 100644 +--- a/scripts/gcc-plugins/Kconfig ++++ b/scripts/gcc-plugins/Kconfig +@@ -59,6 +59,11 @@ config GCC_PLUGIN_LATENT_ENTROPY + is some slowdown of the boot process (about 0.5%) and fork and + irq processing. + ++ When extra_latent_entropy is passed on the kernel command line, ++ entropy will be extracted from up to the first 4GB of RAM while the ++ runtime memory allocator is being initialized. This costs even more ++ slowdown of the boot process. ++ + Note that entropy extracted this way is not cryptographically + secure! + diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c -index 1663fb19343a..4b44744fc1be 100644 +index f277e116e0eb..f93c582acc69 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c -@@ -35,6 +35,7 @@ static int vmlinux_section_warnings = 1; - static int warn_unresolved = 0; +@@ -36,6 +36,8 @@ static int warn_unresolved = 0; /* How a symbol is exported */ static int sec_mismatch_count = 0; -+static int writable_fptr_count = 0; - static int sec_mismatch_verbose = 1; static int sec_mismatch_fatal = 0; ++static int writable_fptr_count = 0; ++static int writable_fptr_verbose = 0; /* ignore missing files */ -@@ -954,6 +955,7 @@ enum mismatch { + static int ignore_missing_files; + +@@ -953,6 +955,7 @@ enum mismatch { ANY_EXIT_TO_ANY_INIT, EXPORT_TO_INIT_EXIT, EXTABLE_TO_NON_TEXT, @@ -2443,7 +2494,7 @@ index 1663fb19343a..4b44744fc1be 100644 }; /** -@@ -1080,6 +1082,12 @@ static const struct sectioncheck sectioncheck[] = { +@@ -1079,6 +1082,12 @@ static const struct sectioncheck sectioncheck[] = { .good_tosec = {ALL_TEXT_SECTIONS , NULL}, .mismatch = EXTABLE_TO_NON_TEXT, .handler = extable_mismatch_handler, @@ -2456,9 +2507,9 @@ index 1663fb19343a..4b44744fc1be 100644 } }; -@@ -1229,10 +1237,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr, +@@ -1266,10 +1275,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr, continue; - if (ELF_ST_TYPE(sym->st_info) == STT_SECTION) + if (!is_valid_name(elf, sym)) continue; - if (sym->st_value == addr) - return sym; @@ -2469,54 +2520,70 @@ index 1663fb19343a..4b44744fc1be 100644 if (d < 0) d = addr - sym->st_value; if (d < distance) { -@@ -1391,7 +1399,11 @@ static void report_sec_mismatch(const char *modname, +@@ -1404,7 +1413,13 @@ static void report_sec_mismatch(const char *modname, char *prl_from; char *prl_to; - sec_mismatch_count++; -+ if (mismatch->mismatch == DATA_TO_TEXT) ++ if (mismatch->mismatch == DATA_TO_TEXT) { + writable_fptr_count++; -+ else ++ if (!writable_fptr_verbose) ++ return; ++ } else { + sec_mismatch_count++; -+ - if (!sec_mismatch_verbose) - return; ++ } -@@ -1515,6 +1527,14 @@ static void report_sec_mismatch(const char *modname, + get_pretty_name(from_is_func, &from, &from_p); + get_pretty_name(to_is_func, &to, &to_p); +@@ -1526,6 +1541,12 @@ static void report_sec_mismatch(const char *modname, fatal("There's a special handler for this mismatch type, " "we should never get here."); break; + case DATA_TO_TEXT: -+#if 0 + fprintf(stderr, + "The %s %s:%s references\n" + "the %s %s:%s%s\n", + from, fromsec, fromsym, to, tosec, tosym, to_p); -+#endif + break; } fprintf(stderr, "\n"); } -@@ -2523,6 +2543,14 @@ int main(int argc, char **argv) - } - } +@@ -2428,7 +2449,7 @@ int main(int argc, char **argv) + struct ext_sym_list *extsym_iter; + struct ext_sym_list *extsym_start = NULL; + +- while ((opt = getopt(argc, argv, "i:I:e:mnsT:o:awE")) != -1) { ++ while ((opt = getopt(argc, argv, "i:I:e:fmnsT:o:awE")) != -1) { + switch (opt) { + case 'i': + kernel_read = optarg; +@@ -2445,6 +2466,9 @@ int main(int argc, char **argv) + extsym_iter->file = optarg; + extsym_start = extsym_iter; + break; ++ case 'f': ++ writable_fptr_verbose = 1; ++ break; + case 'm': + modversions = 1; + break; +@@ -2521,6 +2545,11 @@ int main(int argc, char **argv) + fatal("modpost: Section mismatches detected.\n" + "Set CONFIG_SECTION_MISMATCH_WARN_ONLY=y to allow them.\n"); free(buf.p); -+ if (writable_fptr_count) { -+ if (!sec_mismatch_verbose) { -+ warn("modpost: Found %d writable function pointer(s).\n" -+ "To see full details build your kernel with:\n" -+ "'make CONFIG_DEBUG_SECTION_MISMATCH=y'\n", -+ writable_fptr_count); -+ } -+ } ++ if (writable_fptr_count && !writable_fptr_verbose) ++ warn("modpost: Found %d writable function pointer%s.\n" ++ "To see full details build your kernel with:\n" ++ "'make CONFIG_DEBUG_WRITABLE_FUNCTION_POINTERS_VERBOSE=y'\n", ++ writable_fptr_count, (writable_fptr_count == 1 ? "" : "s")); return err; } diff --git a/security/Kconfig b/security/Kconfig -index c4302067a3ad..f5f5ff98e6b8 100644 +index 353cfef71d4e..d6b7f1d89985 100644 --- a/security/Kconfig +++ b/security/Kconfig -@@ -8,7 +8,7 @@ source security/keys/Kconfig +@@ -8,7 +8,7 @@ source "security/keys/Kconfig" config SECURITY_DMESG_RESTRICT bool "Restrict unprivileged access to the kernel syslog" @@ -2560,7 +2627,7 @@ index c4302067a3ad..f5f5ff98e6b8 100644 help This allows you to choose different security modules to be configured into your kernel. -@@ -48,6 +72,7 @@ config SECURITYFS +@@ -47,6 +71,7 @@ config SECURITYFS config SECURITY_NETWORK bool "Socket and Networking Security Hooks" depends on SECURITY @@ -2568,15 +2635,15 @@ index c4302067a3ad..f5f5ff98e6b8 100644 help This enables the socket and networking security hooks. If enabled, a security module can use these hooks to -@@ -155,6 +180,7 @@ config HARDENED_USERCOPY +@@ -153,6 +178,7 @@ config HARDENED_USERCOPY + bool "Harden memory copies between kernel and userspace" depends on HAVE_HARDENED_USERCOPY_ALLOCATOR - select BUG imply STRICT_DEVMEM + default y help This option checks for obviously wrong memory regions when copying memory to/from the kernel (via copy_to_user() and -@@ -167,7 +193,6 @@ config HARDENED_USERCOPY +@@ -165,7 +191,6 @@ config HARDENED_USERCOPY config HARDENED_USERCOPY_FALLBACK bool "Allow usercopy whitelist violations to fallback to object size" depends on HARDENED_USERCOPY @@ -2584,7 +2651,7 @@ index c4302067a3ad..f5f5ff98e6b8 100644 help This is a temporary option that allows missing usercopy whitelists to be discovered via a WARN() to the kernel log, instead of -@@ -192,10 +217,36 @@ config HARDENED_USERCOPY_PAGESPAN +@@ -190,10 +215,36 @@ config HARDENED_USERCOPY_PAGESPAN config FORTIFY_SOURCE bool "Harden common str/mem functions against buffer overflows" depends on ARCH_HAS_FORTIFY_SOURCE @@ -2622,7 +2689,7 @@ index c4302067a3ad..f5f5ff98e6b8 100644 bool "Force all usermode helper calls through a single binary" help diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig -index 8af7a690eb40..6539694b0fd3 100644 +index 55f032f1fc2d..7275fde1218d 100644 --- a/security/selinux/Kconfig +++ b/security/selinux/Kconfig @@ -2,7 +2,7 @@ config SECURITY_SELINUX @@ -2634,7 +2701,7 @@ index 8af7a690eb40..6539694b0fd3 100644 help This selects NSA Security-Enhanced Linux (SELinux). You will also need a policy configuration and a labeled filesystem. -@@ -79,23 +79,3 @@ config SECURITY_SELINUX_AVC_STATS +@@ -64,23 +64,3 @@ config SECURITY_SELINUX_AVC_STATS This option collects access vector cache statistics to /selinux/avc/cache_stats, which may be monitored via tools such as avcstat. @@ -2659,11 +2726,11 @@ index 8af7a690eb40..6539694b0fd3 100644 - - If you are unsure how to answer this question, answer 0. diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c -index 2b5ee5fbd652..99c7ed953d4e 100644 +index 614bc753822c..8c68df47483e 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c -@@ -135,18 +135,7 @@ __setup("selinux=", selinux_enabled_setup); - int selinux_enabled = 1; +@@ -134,18 +134,7 @@ static int __init selinux_enabled_setup(char *str) + __setup("selinux=", selinux_enabled_setup); #endif -static unsigned int selinux_checkreqprot_boot = @@ -2680,13 +2747,21 @@ index 2b5ee5fbd652..99c7ed953d4e 100644 -__setup("checkreqprot=", checkreqprot_setup); +static const unsigned int selinux_checkreqprot_boot; - static struct kmem_cache *sel_inode_cache; - static struct kmem_cache *file_security_cache; + /** + * selinux_secmark_enabled - Check to see if SECMARK is currently enabled diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c -index 79d3709b0671..4db06a12d48c 100644 +index 145ee62f205a..8a42d6a531a8 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c -@@ -664,10 +664,9 @@ static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf, +@@ -640,7 +640,6 @@ static ssize_t sel_read_checkreqprot(struct file *filp, char __user *buf, + static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) + { +- struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info; + char *page; + ssize_t length; + unsigned int new_value; +@@ -664,10 +663,9 @@ static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf, return PTR_ERR(page); length = -EINVAL; diff --git a/sys-kernel/linux-sources-redcore/files/mute-pps_state_mismatch.patch b/sys-kernel/linux-sources-redcore/files/5.1-mute-pps_state_mismatch.patch index 5bc1eff7..5bc1eff7 100644 --- a/sys-kernel/linux-sources-redcore/files/mute-pps_state_mismatch.patch +++ b/sys-kernel/linux-sources-redcore/files/5.1-mute-pps_state_mismatch.patch diff --git a/sys-kernel/linux-sources-redcore/files/radeon_dp_aux_transfer_native-no-ratelimited_debug.patch b/sys-kernel/linux-sources-redcore/files/5.1-radeon_dp_aux_transfer_native-no-ratelimited_debug.patch index 6ffcb42c..6ffcb42c 100644 --- a/sys-kernel/linux-sources-redcore/files/radeon_dp_aux_transfer_native-no-ratelimited_debug.patch +++ b/sys-kernel/linux-sources-redcore/files/5.1-radeon_dp_aux_transfer_native-no-ratelimited_debug.patch diff --git a/sys-kernel/linux-sources-redcore/files/uksm-for-linux-hardened.patch b/sys-kernel/linux-sources-redcore/files/5.1-uksm-linux-hardened.patch index d973274a..a79cb3d5 100644 --- a/sys-kernel/linux-sources-redcore/files/uksm-for-linux-hardened.patch +++ b/sys-kernel/linux-sources-redcore/files/5.1-uksm-linux-hardened.patch @@ -1,20 +1,6 @@ -diff -Nur a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX ---- a/Documentation/vm/00-INDEX 2018-08-24 12:04:51.000000000 +0100 -+++ b/Documentation/vm/00-INDEX 2018-08-27 10:44:36.340467187 +0100 -@@ -18,7 +18,9 @@ - - explains what hwpoison is - ksm.rst - - how to use the Kernel Samepage Merging feature. --mmu_notifier.rst -+uksm.txt -+ - Introduction to Ultra KSM -+mmu_notifier.txt - - a note about clearing pte/pmd and mmu notifications - numa.rst - - information about NUMA specific code in the Linux vm. diff -Nur a/Documentation/vm/uksm.txt b/Documentation/vm/uksm.txt --- a/Documentation/vm/uksm.txt 1970-01-01 01:00:00.000000000 +0100 -+++ b/Documentation/vm/uksm.txt 2018-08-27 10:44:36.340467187 +0100 ++++ b/Documentation/vm/uksm.txt 2019-07-07 09:32:50.451840235 +0100 @@ -0,0 +1,61 @@ +The Ultra Kernel Samepage Merging feature +---------------------------------------------- @@ -78,8 +64,8 @@ diff -Nur a/Documentation/vm/uksm.txt b/Documentation/vm/uksm.txt +2016-09-10 UKSM 0.1.2.5 Fix a bug in dedup ratio calculation. +2017-02-26 UKSM 0.1.2.6 Fix a bug in hugetlbpage handling and a race bug with page migration. diff -Nur a/fs/exec.c b/fs/exec.c ---- a/fs/exec.c 2018-08-27 10:42:48.184976507 +0100 -+++ b/fs/exec.c 2018-08-27 10:47:39.413380371 +0100 +--- a/fs/exec.c 2019-07-07 09:08:19.132347946 +0100 ++++ b/fs/exec.c 2019-07-07 09:33:47.653770486 +0100 @@ -63,6 +63,7 @@ #include <linux/compat.h> #include <linux/vmalloc.h> @@ -88,7 +74,7 @@ diff -Nur a/fs/exec.c b/fs/exec.c #include <linux/uaccess.h> #include <asm/mmu_context.h> -@@ -1381,6 +1382,7 @@ +@@ -1385,6 +1386,7 @@ /* An exec changes our domain. We are no longer part of the thread group */ current->self_exec_id++; @@ -97,9 +83,9 @@ diff -Nur a/fs/exec.c b/fs/exec.c } EXPORT_SYMBOL(setup_new_exec); diff -Nur a/fs/proc/meminfo.c b/fs/proc/meminfo.c ---- a/fs/proc/meminfo.c 2018-08-24 12:04:51.000000000 +0100 -+++ b/fs/proc/meminfo.c 2018-08-27 10:44:36.341467220 +0100 -@@ -105,6 +105,10 @@ +--- a/fs/proc/meminfo.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/fs/proc/meminfo.c 2019-07-07 09:32:50.451840235 +0100 +@@ -106,6 +106,10 @@ global_zone_page_state(NR_KERNEL_STACK_KB)); show_val_kb(m, "PageTables: ", global_zone_page_state(NR_PAGETABLE)); @@ -111,9 +97,9 @@ diff -Nur a/fs/proc/meminfo.c b/fs/proc/meminfo.c show_val_kb(m, "Quicklists: ", quicklist_total_size()); #endif diff -Nur a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h ---- a/include/asm-generic/pgtable.h 2018-08-24 12:04:51.000000000 +0100 -+++ b/include/asm-generic/pgtable.h 2018-08-27 10:44:36.341467220 +0100 -@@ -817,12 +817,25 @@ +--- a/include/asm-generic/pgtable.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/asm-generic/pgtable.h 2019-07-07 09:32:50.451840235 +0100 +@@ -855,12 +855,25 @@ extern void untrack_pfn_moved(struct vm_area_struct *vma); #endif @@ -140,7 +126,7 @@ diff -Nur a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h } #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) -@@ -831,7 +844,7 @@ +@@ -869,7 +882,7 @@ static inline int is_zero_pfn(unsigned long pfn) { extern unsigned long zero_pfn; @@ -150,33 +136,43 @@ diff -Nur a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h static inline unsigned long my_zero_pfn(unsigned long addr) diff -Nur a/include/linux/ksm.h b/include/linux/ksm.h ---- a/include/linux/ksm.h 2018-08-24 12:04:51.000000000 +0100 -+++ b/include/linux/ksm.h 2018-08-27 10:44:36.341467220 +0100 -@@ -21,21 +21,6 @@ +--- a/include/linux/ksm.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/ksm.h 2019-07-07 09:32:50.451840235 +0100 +@@ -1,4 +1,4 @@ +-/* SPDX-License-Identifier: GPL-2.0 */ ++/* SPDX-License-Identifier: GPL-3.0 */ + #ifndef __LINUX_KSM_H + #define __LINUX_KSM_H + /* +@@ -21,20 +21,16 @@ #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); -int __ksm_enter(struct mm_struct *mm); -void __ksm_exit(struct mm_struct *mm); -- + -static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) --{ ++static inline struct stable_node *page_stable_node(struct page *page) + { - if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) - return __ksm_enter(mm); - return 0; --} -- ++ return PageKsm(page) ? page_rmapping(page) : NULL; + } + -static inline void ksm_exit(struct mm_struct *mm) --{ ++static inline void set_page_stable_node(struct page *page, ++ struct stable_node *stable_node) + { - if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) - __ksm_exit(mm); --} ++ page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); + } /* - * When do_swap_page() first faults in from swap what used to be a KSM page, -@@ -54,6 +39,46 @@ - void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); - void ksm_migrate_page(struct page *newpage, struct page *oldpage); +@@ -56,6 +52,33 @@ + bool reuse_ksm_page(struct page *page, + struct vm_area_struct *vma, unsigned long address); +#ifdef CONFIG_KSM_LEGACY +int __ksm_enter(struct mm_struct *mm); @@ -203,25 +199,12 @@ diff -Nur a/include/linux/ksm.h b/include/linux/ksm.h +static inline void ksm_exit(struct mm_struct *mm) +{ +} -+ -+static inline void set_page_stable_node(struct page *page, -+ struct stable_node *stable_node) -+{ -+ page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); -+} -+ -+static inline struct stable_node *page_stable_node(struct page *page) -+{ -+ return PageKsm(page) ? page_rmapping(page) : NULL; -+} -+ -+ +#endif /* !CONFIG_UKSM */ + #else /* !CONFIG_KSM */ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) -@@ -89,4 +114,6 @@ +@@ -96,4 +119,6 @@ #endif /* CONFIG_MMU */ #endif /* !CONFIG_KSM */ @@ -229,9 +212,9 @@ diff -Nur a/include/linux/ksm.h b/include/linux/ksm.h + #endif /* __LINUX_KSM_H */ diff -Nur a/include/linux/mm_types.h b/include/linux/mm_types.h ---- a/include/linux/mm_types.h 2018-08-24 12:04:51.000000000 +0100 -+++ b/include/linux/mm_types.h 2018-08-27 10:44:36.342467252 +0100 -@@ -320,6 +320,9 @@ +--- a/include/linux/mm_types.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/mm_types.h 2019-07-07 09:32:50.451840235 +0100 +@@ -334,6 +334,9 @@ struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; @@ -242,8 +225,8 @@ diff -Nur a/include/linux/mm_types.h b/include/linux/mm_types.h struct core_thread { diff -Nur a/include/linux/mmzone.h b/include/linux/mmzone.h ---- a/include/linux/mmzone.h 2018-08-24 12:04:51.000000000 +0100 -+++ b/include/linux/mmzone.h 2018-08-27 10:44:36.342467252 +0100 +--- a/include/linux/mmzone.h 2019-06-25 04:34:56.000000000 +0100 ++++ b/include/linux/mmzone.h 2019-07-07 09:32:50.451840235 +0100 @@ -148,6 +148,9 @@ NR_ZSPAGES, /* allocated in zsmalloc */ #endif @@ -254,18 +237,9 @@ diff -Nur a/include/linux/mmzone.h b/include/linux/mmzone.h NR_VM_ZONE_STAT_ITEMS }; enum node_stat_item { -@@ -865,7 +868,7 @@ - } - - /** -- * is_highmem - helper function to quickly check if a struct zone is a -+ * is_highmem - helper function to quickly check if a struct zone is a - * highmem zone or not. This is an attempt to keep references - * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. - * @zone - pointer to struct zone variable diff -Nur a/include/linux/sradix-tree.h b/include/linux/sradix-tree.h --- a/include/linux/sradix-tree.h 1970-01-01 01:00:00.000000000 +0100 -+++ b/include/linux/sradix-tree.h 2018-08-27 10:44:36.343467284 +0100 ++++ b/include/linux/sradix-tree.h 2019-07-07 09:32:50.451840235 +0100 @@ -0,0 +1,77 @@ +#ifndef _LINUX_SRADIX_TREE_H +#define _LINUX_SRADIX_TREE_H @@ -346,7 +320,7 @@ diff -Nur a/include/linux/sradix-tree.h b/include/linux/sradix-tree.h +#endif /* _LINUX_SRADIX_TREE_H */ diff -Nur a/include/linux/uksm.h b/include/linux/uksm.h --- a/include/linux/uksm.h 1970-01-01 01:00:00.000000000 +0100 -+++ b/include/linux/uksm.h 2018-08-27 10:44:36.343467284 +0100 ++++ b/include/linux/uksm.h 2019-07-07 09:32:50.451840235 +0100 @@ -0,0 +1,149 @@ +#ifndef __LINUX_UKSM_H +#define __LINUX_UKSM_H @@ -498,9 +472,9 @@ diff -Nur a/include/linux/uksm.h b/include/linux/uksm.h +#endif /* !CONFIG_UKSM */ +#endif /* __LINUX_UKSM_H */ diff -Nur a/kernel/fork.c b/kernel/fork.c ---- a/kernel/fork.c 2018-08-27 10:42:48.208977282 +0100 -+++ b/kernel/fork.c 2018-08-27 10:44:36.344467317 +0100 -@@ -542,7 +542,7 @@ +--- a/kernel/fork.c 2019-07-07 09:08:19.152348621 +0100 ++++ b/kernel/fork.c 2019-07-07 09:32:50.451840235 +0100 +@@ -584,7 +584,7 @@ __vma_link_rb(mm, tmp, rb_link, rb_parent); rb_link = &tmp->vm_rb.rb_right; rb_parent = &tmp->vm_rb; @@ -510,20 +484,20 @@ diff -Nur a/kernel/fork.c b/kernel/fork.c if (!(tmp->vm_flags & VM_WIPEONFORK)) retval = copy_page_range(mm, oldmm, mpnt); diff -Nur a/lib/Makefile b/lib/Makefile ---- a/lib/Makefile 2018-08-24 12:04:51.000000000 +0100 -+++ b/lib/Makefile 2018-08-27 10:44:36.344467317 +0100 -@@ -18,7 +18,7 @@ - KCOV_INSTRUMENT_dynamic_debug.o := n +--- a/lib/Makefile 2019-06-25 04:34:56.000000000 +0100 ++++ b/lib/Makefile 2019-07-07 09:32:50.451840235 +0100 +@@ -29,7 +29,7 @@ + endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ -- rbtree.o radix-tree.o timerqueue.o\ -+ rbtree.o radix-tree.o sradix-tree.o timerqueue.o\ +- rbtree.o radix-tree.o timerqueue.o xarray.o \ ++ rbtree.o radix-tree.o sradix-tree.o timerqueue.o xarray.o \ idr.o int_sqrt.o extable.o \ - sha1.o chacha20.o irq_regs.o argv_split.o \ + sha1.o chacha.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ diff -Nur a/lib/sradix-tree.c b/lib/sradix-tree.c --- a/lib/sradix-tree.c 1970-01-01 01:00:00.000000000 +0100 -+++ b/lib/sradix-tree.c 2018-08-27 10:44:36.344467317 +0100 ++++ b/lib/sradix-tree.c 2019-07-07 09:32:50.451840235 +0100 @@ -0,0 +1,476 @@ +#include <linux/errno.h> +#include <linux/mm.h> @@ -1002,9 +976,9 @@ diff -Nur a/lib/sradix-tree.c b/lib/sradix-tree.c + return 0; +} diff -Nur a/mm/Kconfig b/mm/Kconfig ---- a/mm/Kconfig 2018-08-27 10:42:48.219977637 +0100 -+++ b/mm/Kconfig 2018-08-27 10:44:36.345467349 +0100 -@@ -308,6 +308,32 @@ +--- a/mm/Kconfig 2019-07-07 09:08:19.162348955 +0100 ++++ b/mm/Kconfig 2019-07-07 09:32:50.451840235 +0100 +@@ -302,6 +302,32 @@ See Documentation/vm/ksm.rst for more information: KSM is inactive until a program has madvised that an area is MADV_MERGEABLE, and root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). @@ -1037,10 +1011,31 @@ diff -Nur a/mm/Kconfig b/mm/Kconfig config DEFAULT_MMAP_MIN_ADDR int "Low address space to protect from user allocation" +diff -Nur a/mm/ksm.c b/mm/ksm.c +--- a/mm/ksm.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/mm/ksm.c 2019-07-07 09:32:50.461840572 +0100 +@@ -858,17 +858,6 @@ + return err; + } + +-static inline struct stable_node *page_stable_node(struct page *page) +-{ +- return PageKsm(page) ? page_rmapping(page) : NULL; +-} +- +-static inline void set_page_stable_node(struct page *page, +- struct stable_node *stable_node) +-{ +- page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); +-} +- + #ifdef CONFIG_SYSFS + /* + * Only called through the sysfs control interface: diff -Nur a/mm/Makefile b/mm/Makefile ---- a/mm/Makefile 2018-08-24 12:04:51.000000000 +0100 -+++ b/mm/Makefile 2018-08-27 10:44:36.345467349 +0100 -@@ -65,7 +65,8 @@ +--- a/mm/Makefile 2019-06-25 04:34:56.000000000 +0100 ++++ b/mm/Makefile 2019-07-07 09:32:50.451840235 +0100 +@@ -58,7 +58,8 @@ obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o obj-$(CONFIG_SLOB) += slob.o obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o @@ -1051,9 +1046,9 @@ diff -Nur a/mm/Makefile b/mm/Makefile obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLUB) += slub.o diff -Nur a/mm/memory.c b/mm/memory.c ---- a/mm/memory.c 2018-08-24 12:04:51.000000000 +0100 -+++ b/mm/memory.c 2018-08-27 10:44:36.346467381 +0100 -@@ -128,6 +128,25 @@ +--- a/mm/memory.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/mm/memory.c 2019-07-07 09:32:50.461840572 +0100 +@@ -129,6 +129,25 @@ unsigned long highest_memmap_pfn __read_mostly; @@ -1079,7 +1074,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c /* * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() */ -@@ -139,6 +158,7 @@ +@@ -140,6 +159,7 @@ core_initcall(init_zero_pfn); @@ -1087,7 +1082,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c #if defined(SPLIT_RSS_COUNTING) void sync_mm_rss(struct mm_struct *mm) -@@ -1035,6 +1055,9 @@ +@@ -794,6 +814,9 @@ get_page(page); page_dup_rmap(page, false); rss[mm_counter(page)]++; @@ -1097,7 +1092,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c } else if (pte_devmap(pte)) { page = pte_page(pte); -@@ -1048,6 +1071,8 @@ +@@ -807,6 +830,8 @@ page_dup_rmap(page, false); rss[mm_counter(page)]++; } @@ -1106,7 +1101,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c } out_set_pte: -@@ -1317,8 +1342,10 @@ +@@ -1075,8 +1100,10 @@ ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); tlb_remove_tlb_entry(tlb, pte, addr); @@ -1118,7 +1113,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c if (!PageAnon(page)) { if (pte_dirty(ptent)) { -@@ -2360,8 +2387,10 @@ +@@ -2117,8 +2144,10 @@ clear_page(kaddr); kunmap_atomic(kaddr); flush_dcache_page(dst); @@ -1130,7 +1125,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c } static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) -@@ -2510,6 +2539,7 @@ +@@ -2266,6 +2295,7 @@ vmf->address); if (!new_page) goto oom; @@ -1138,7 +1133,7 @@ diff -Nur a/mm/memory.c b/mm/memory.c } else { new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); -@@ -2536,7 +2566,9 @@ +@@ -2294,7 +2324,9 @@ mm_counter_file(old_page)); inc_mm_counter_fast(mm, MM_ANONPAGES); } @@ -1149,25 +1144,25 @@ diff -Nur a/mm/memory.c b/mm/memory.c } flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); diff -Nur a/mm/mmap.c b/mm/mmap.c ---- a/mm/mmap.c 2018-08-27 10:42:48.220977669 +0100 -+++ b/mm/mmap.c 2018-08-27 10:44:36.348467446 +0100 -@@ -45,6 +45,7 @@ - #include <linux/moduleparam.h> +--- a/mm/mmap.c 2019-07-07 09:08:19.162348955 +0100 ++++ b/mm/mmap.c 2019-07-07 09:34:22.544947675 +0100 +@@ -46,6 +46,7 @@ #include <linux/pkeys.h> #include <linux/oom.h> + #include <linux/sched/mm.h> +#include <linux/ksm.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> -@@ -182,6 +183,7 @@ +@@ -183,6 +184,7 @@ if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); -+ uksm_remove_vma(vma); ++ uksm_remove_vma(vma); vm_area_free(vma); return next; } -@@ -708,9 +710,16 @@ +@@ -733,9 +735,16 @@ long adjust_next = 0; int remove_next = 0; @@ -1184,7 +1179,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c if (end >= next->vm_end) { /* * vma expands, overlapping all the next, and -@@ -843,6 +852,7 @@ +@@ -868,6 +877,7 @@ end_changed = true; } vma->vm_pgoff = pgoff; @@ -1192,7 +1187,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c if (adjust_next) { next->vm_start += adjust_next << PAGE_SHIFT; next->vm_pgoff += adjust_next; -@@ -948,6 +958,7 @@ +@@ -973,6 +983,7 @@ if (remove_next == 2) { remove_next = 1; end = next->vm_end; @@ -1200,7 +1195,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c goto again; } else if (next) -@@ -974,10 +985,14 @@ +@@ -999,10 +1010,14 @@ */ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); } @@ -1215,7 +1210,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c validate_mm(mm); return 0; -@@ -1434,6 +1449,9 @@ +@@ -1459,6 +1474,9 @@ vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; @@ -1225,7 +1220,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; -@@ -1798,6 +1816,7 @@ +@@ -1823,6 +1841,7 @@ allow_write_access(file); } file = vma->vm_file; @@ -1233,7 +1228,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c out: perf_event_mmap(vma); -@@ -1839,6 +1858,7 @@ +@@ -1865,6 +1884,7 @@ if (vm_flags & VM_DENYWRITE) allow_write_access(file); free_vma: @@ -1241,7 +1236,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c vm_area_free(vma); unacct_error: if (charged) -@@ -2658,6 +2678,8 @@ +@@ -2697,6 +2717,8 @@ else err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); @@ -1250,7 +1245,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c /* Success. */ if (!err) return 0; -@@ -2943,6 +2965,7 @@ +@@ -3001,6 +3023,7 @@ if ((flags & (~VM_EXEC)) != 0) return -EINVAL; flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; @@ -1258,7 +1253,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); if (offset_in_page(error)) -@@ -2999,6 +3022,7 @@ +@@ -3051,6 +3074,7 @@ vma->vm_flags = flags; vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); @@ -1266,7 +1261,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; -@@ -3078,6 +3102,12 @@ +@@ -3128,6 +3152,12 @@ up_write(&mm->mmap_sem); } @@ -1279,7 +1274,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c if (mm->locked_vm) { vma = mm->mmap; while (vma) { -@@ -3112,6 +3142,11 @@ +@@ -3162,6 +3192,11 @@ vma = remove_vma(vma); } vm_unacct_memory(nr_accounted); @@ -1291,7 +1286,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c } /* Insert vm structure into process list sorted by address -@@ -3219,6 +3254,7 @@ +@@ -3269,6 +3304,7 @@ new_vma->vm_ops->open(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); *need_rmap_locks = false; @@ -1299,7 +1294,7 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c } return new_vma; -@@ -3369,6 +3405,7 @@ +@@ -3419,6 +3455,7 @@ vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); perf_event_mmap(vma); @@ -1307,25 +1302,10 @@ diff -Nur a/mm/mmap.c b/mm/mmap.c return vma; -diff -Nur a/mm/rmap.c b/mm/rmap.c ---- a/mm/rmap.c 2018-08-24 12:04:51.000000000 +0100 -+++ b/mm/rmap.c 2018-08-27 10:44:36.348467446 +0100 -@@ -1017,9 +1017,9 @@ - - /** - * __page_set_anon_rmap - set up new anonymous rmap -- * @page: Page to add to rmap -+ * @page: Page to add to rmap - * @vma: VM area to add page to. -- * @address: User virtual address of the mapping -+ * @address: User virtual address of the mapping - * @exclusive: the page is exclusively owned by the current process - */ - static void __page_set_anon_rmap(struct page *page, diff -Nur a/mm/uksm.c b/mm/uksm.c --- a/mm/uksm.c 1970-01-01 01:00:00.000000000 +0100 -+++ b/mm/uksm.c 2018-08-27 10:44:36.351467543 +0100 -@@ -0,0 +1,5584 @@ ++++ b/mm/uksm.c 2019-07-07 09:32:50.461840572 +0100 +@@ -0,0 +1,5580 @@ +/* + * Ultra KSM. Copyright (C) 2011-2012 Nai Xia + * @@ -2821,10 +2801,9 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c + .page = page, + .vma = vma, + }; ++ struct mmu_notifier_range range; + int swapped; + int err = -EFAULT; -+ unsigned long mmun_start; /* For mmu_notifiers */ -+ unsigned long mmun_end; /* For mmu_notifiers */ + + pvmw.address = page_address_in_vma(page, vma); + if (pvmw.address == -EFAULT) @@ -2832,9 +2811,8 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c + + BUG_ON(PageTransCompound(page)); + -+ mmun_start = pvmw.address; -+ mmun_end = pvmw.address + PAGE_SIZE; -+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); ++ mmu_notifier_range_init(&range, mm, pvmw.address, pvmw.address + PAGE_SIZE); ++ mmu_notifier_invalidate_range_start(&range); + + if (!page_vma_mapped_walk(&pvmw)) + goto out_mn; @@ -2884,7 +2862,7 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c +out_unlock: + page_vma_mapped_walk_done(&pvmw); +out_mn: -+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); ++ mmu_notifier_invalidate_range_end(&range); +out: + return err; +} @@ -2908,6 +2886,7 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c + struct page *kpage, pte_t orig_pte) +{ + struct mm_struct *mm = vma->vm_mm; ++ struct mmu_notifier_range range; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; @@ -2918,8 +2897,6 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c + + unsigned long addr; + int err = MERGE_ERR_PGERR; -+ unsigned long mmun_start; /* For mmu_notifiers */ -+ unsigned long mmun_end; /* For mmu_notifiers */ + + addr = page_address_in_vma(page, vma); + if (addr == -EFAULT) @@ -2939,9 +2916,8 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c + if (!pmd_present(*pmd)) + goto out; + -+ mmun_start = addr; -+ mmun_end = addr + PAGE_SIZE; -+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); ++ mmu_notifier_range_init(&range, mm, addr, addr + PAGE_SIZE); ++ mmu_notifier_invalidate_range_start(&range); + + ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!pte_same(*ptep, orig_pte)) { @@ -2974,7 +2950,7 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c + pte_unmap_unlock(ptep, ptl); + err = 0; +out_mn: -+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); ++ mmu_notifier_invalidate_range_end(&range); +out: + return err; +} @@ -6911,11 +6887,11 @@ diff -Nur a/mm/uksm.c b/mm/uksm.c +#endif + diff -Nur a/mm/vmstat.c b/mm/vmstat.c ---- a/mm/vmstat.c 2018-08-24 12:04:51.000000000 +0100 -+++ b/mm/vmstat.c 2018-08-27 10:44:36.352467575 +0100 -@@ -1163,6 +1163,9 @@ +--- a/mm/vmstat.c 2019-06-25 04:34:56.000000000 +0100 ++++ b/mm/vmstat.c 2019-07-07 09:32:50.461840572 +0100 +@@ -1165,6 +1165,9 @@ "nr_written", - "", /* nr_indirectly_reclaimable */ + "nr_kernel_misc_reclaimable", +#ifdef CONFIG_UKSM + "nr_uksm_zero_pages", diff --git a/sys-kernel/linux-sources-redcore/files/nouveau-pascal-backlight.patch b/sys-kernel/linux-sources-redcore/files/nouveau-pascal-backlight.patch deleted file mode 100644 index 754d982a..00000000 --- a/sys-kernel/linux-sources-redcore/files/nouveau-pascal-backlight.patch +++ /dev/null @@ -1,11 +0,0 @@ -diff -up linux-4.16/drivers/gpu/drm/nouveau/nouveau_backlight.c.omv~ linux-4.16/drivers/gpu/drm/nouveau/nouveau_backlight.c ---- linux-4.16/drivers/gpu/drm/nouveau/nouveau_backlight.c.omv~ 2018-04-06 01:04:34.573357055 +0200 -+++ linux-4.16/drivers/gpu/drm/nouveau/nouveau_backlight.c 2018-04-06 01:05:46.985579248 +0200 -@@ -287,6 +287,7 @@ nouveau_backlight_init(struct drm_device - case NV_DEVICE_INFO_V0_FERMI: - case NV_DEVICE_INFO_V0_KEPLER: - case NV_DEVICE_INFO_V0_MAXWELL: -+ case NV_DEVICE_INFO_V0_PASCAL: - return nv50_backlight_init(connector); - default: - break; diff --git a/sys-kernel/linux-sources-redcore/files/revert-patches-causing-instant-reboot.patch b/sys-kernel/linux-sources-redcore/files/revert-patches-causing-instant-reboot.patch deleted file mode 100644 index a2127cff..00000000 --- a/sys-kernel/linux-sources-redcore/files/revert-patches-causing-instant-reboot.patch +++ /dev/null @@ -1,314 +0,0 @@ -diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S -index 8169e8b7a4dc..12915511be61 100644 ---- a/arch/x86/boot/compressed/head_64.S -+++ b/arch/x86/boot/compressed/head_64.S -@@ -305,48 +305,13 @@ ENTRY(startup_64) - /* Set up the stack */ - leaq boot_stack_end(%rbx), %rsp - -- /* -- * paging_prepare() and cleanup_trampoline() below can have GOT -- * references. Adjust the table with address we are running at. -- * -- * Zero RAX for adjust_got: the GOT was not adjusted before; -- * there's no adjustment to undo. -- */ -- xorq %rax, %rax -- -- /* -- * Calculate the address the binary is loaded at and use it as -- * a GOT adjustment. -- */ -- call 1f --1: popq %rdi -- subq $1b, %rdi -- -- call adjust_got -- - /* - * At this point we are in long mode with 4-level paging enabled, -- * but we might want to enable 5-level paging or vice versa. -- * -- * The problem is that we cannot do it directly. Setting or clearing -- * CR4.LA57 in long mode would trigger #GP. So we need to switch off -- * long mode and paging first. -- * -- * We also need a trampoline in lower memory to switch over from -- * 4- to 5-level paging for cases when the bootloader puts the kernel -- * above 4G, but didn't enable 5-level paging for us. -- * -- * The same trampoline can be used to switch from 5- to 4-level paging -- * mode, like when starting 4-level paging kernel via kexec() when -- * original kernel worked in 5-level paging mode. -- * -- * For the trampoline, we need the top page table to reside in lower -- * memory as we don't have a way to load 64-bit values into CR3 in -- * 32-bit mode. -+ * but we want to enable 5-level paging. - * -- * We go though the trampoline even if we don't have to: if we're -- * already in a desired paging mode. This way the trampoline code gets -- * tested on every boot. -+ * The problem is that we cannot do it directly. Setting LA57 in -+ * long mode would trigger #GP. So we need to switch off long mode -+ * first. - */ - - /* Make sure we have GDT with 32-bit code segment */ -@@ -371,32 +336,40 @@ ENTRY(startup_64) - /* Save the trampoline address in RCX */ - movq %rax, %rcx - -+ /* Check if we need to enable 5-level paging */ -+ cmpq $0, %rdx -+ jz lvl5 -+ -+ /* Clear additional page table */ -+ leaq lvl5_pgtable(%rbx), %rdi -+ xorq %rax, %rax -+ movq $(PAGE_SIZE/8), %rcx -+ rep stosq -+ - /* -- * Load the address of trampoline_return() into RDI. -- * It will be used by the trampoline to return to the main code. -+ * Setup current CR3 as the first and only entry in a new top level -+ * page table. - */ -- leaq trampoline_return(%rip), %rdi -+ movq %cr3, %rdi -+ leaq 0x7 (%rdi), %rax -+ movq %rax, lvl5_pgtable(%rbx) - - /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */ - pushq $__KERNEL32_CS -- leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax -+ leaq compatible_mode(%rip), %rax - pushq %rax - lretq --trampoline_return: -+lvl5: - /* Restore the stack, the 32-bit trampoline uses its own stack */ - leaq boot_stack_end(%rbx), %rsp - - /* - * cleanup_trampoline() would restore trampoline memory. - * -- * RDI is address of the page table to use instead of page table -- * in trampoline memory (if required). -- * - * RSI holds real mode data and needs to be preserved across - * this function call. - */ - pushq %rsi -- leaq top_pgtable(%rbx), %rdi - call cleanup_trampoline - popq %rsi - -@@ -404,21 +377,6 @@ trampoline_return: - pushq $0 - popfq - -- /* -- * Previously we've adjusted the GOT with address the binary was -- * loaded at. Now we need to re-adjust for relocation address. -- * -- * Calculate the address the binary is loaded at, so that we can -- * undo the previous GOT adjustment. -- */ -- call 1f --1: popq %rax -- subq $1b, %rax -- -- /* The new adjustment is the relocation address */ -- movq %rbx, %rdi -- call adjust_got -- - /* - * Copy the compressed kernel to the end of our buffer - * where decompression in place becomes safe. -@@ -519,6 +477,19 @@ relocated: - shrq $3, %rcx - rep stosq - -+/* -+ * Adjust our own GOT -+ */ -+ leaq _got(%rip), %rdx -+ leaq _egot(%rip), %rcx -+1: -+ cmpq %rcx, %rdx -+ jae 2f -+ addq %rbx, (%rdx) -+ addq $8, %rdx -+ jmp 1b -+2: -+ - /* - * Do the extraction, and jump to the new kernel.. - */ -@@ -537,36 +508,9 @@ relocated: - */ - jmp *%rax - --/* -- * Adjust the global offset table -- * -- * RAX is the previous adjustment of the table to undo (use 0 if it's the -- * first time we touch GOT). -- * RDI is the new adjustment to apply. -- */ --adjust_got: -- /* Walk through the GOT adding the address to the entries */ -- leaq _got(%rip), %rdx -- leaq _egot(%rip), %rcx --1: -- cmpq %rcx, %rdx -- jae 2f -- subq %rax, (%rdx) /* Undo previous adjustment */ -- addq %rdi, (%rdx) /* Apply the new adjustment */ -- addq $8, %rdx -- jmp 1b --2: -- ret -- - .code32 --/* -- * This is the 32-bit trampoline that will be copied over to low memory. -- * -- * RDI contains the return address (might be above 4G). -- * ECX contains the base address of the trampoline memory. -- * Non zero RDX on return means we need to enable 5-level paging. -- */ - ENTRY(trampoline_32bit_src) -+compatible_mode: - /* Set up data and stack segments */ - movl $__KERNEL_DS, %eax - movl %eax, %ds -@@ -580,61 +524,33 @@ ENTRY(trampoline_32bit_src) - btrl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - -- /* Check what paging mode we want to be in after the trampoline */ -- cmpl $0, %edx -- jz 1f -+ /* Point CR3 to 5-level paging */ -+ leal lvl5_pgtable(%ebx), %eax -+ movl %eax, %cr3 - -- /* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */ -+ /* Enable PAE and LA57 mode */ - movl %cr4, %eax -- testl $X86_CR4_LA57, %eax -- jnz 3f -- jmp 2f --1: -- /* We want 4-level paging: don't touch CR3 if it already points to 4-level page tables */ -- movl %cr4, %eax -- testl $X86_CR4_LA57, %eax -- jz 3f --2: -- /* Point CR3 to the trampoline's new top level page table */ -- leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax -- movl %eax, %cr3 --3: -- /* Enable PAE and LA57 (if required) paging modes */ -- movl $X86_CR4_PAE, %eax -- cmpl $0, %edx -- jz 1f -- orl $X86_CR4_LA57, %eax --1: -+ orl $(X86_CR4_PAE | X86_CR4_LA57), %eax - movl %eax, %cr4 - -- /* Calculate address of paging_enabled() once we are executing in the trampoline */ -- leal paging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax -+ /* Calculate address we are running at */ -+ call 1f -+1: popl %edi -+ subl $1b, %edi - -- /* Prepare the stack for far return to Long Mode */ -+ /* Prepare stack for far return to Long Mode */ - pushl $__KERNEL_CS -- pushl %eax -+ leal lvl5(%edi), %eax -+ push %eax - -- /* Enable paging again */ -+ /* Enable paging back */ - movl $(X86_CR0_PG | X86_CR0_PE), %eax - movl %eax, %cr0 - - lret - -- .code64 --paging_enabled: -- /* Return from the trampoline */ -- jmp *%rdi -- -- /* -- * The trampoline code has a size limit. -- * Make sure we fail to compile if the trampoline code grows -- * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes. -- */ -- .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE -- -- .code32 - no_longmode: -- /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */ -+ /* This isn't an x86-64 CPU so hang */ - 1: - hlt - jmp 1b -@@ -695,10 +611,5 @@ boot_stack_end: - .balign 4096 - pgtable: - .fill BOOT_PGT_SIZE, 1, 0 -- --/* -- * The page table is going to be used instead of page table in the trampoline -- * memory. -- */ --top_pgtable: -+lvl5_pgtable: - .fill PAGE_SIZE, 1, 0 -diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c -index a362fa0b849c..32af1cbcd903 100644 ---- a/arch/x86/boot/compressed/pgtable_64.c -+++ b/arch/x86/boot/compressed/pgtable_64.c -@@ -22,6 +22,14 @@ struct paging_config { - /* Buffer to preserve trampoline memory */ - static char trampoline_save[TRAMPOLINE_32BIT_SIZE]; - -+/* -+ * The page table is going to be used instead of page table in the trampoline -+ * memory. -+ * -+ * It must not be in BSS as BSS is cleared after cleanup_trampoline(). -+ */ -+static char top_pgtable[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data); -+ - /* - * Trampoline address will be printed by extract_kernel() for debugging - * purposes. -@@ -126,7 +134,7 @@ struct paging_config paging_prepare(void) - return paging_config; - } - --void cleanup_trampoline(void *pgtable) -+void cleanup_trampoline(void) - { - void *trampoline_pgtable; - -@@ -137,8 +145,8 @@ void cleanup_trampoline(void *pgtable) - * if it's there. - */ - if ((void *)__native_read_cr3() == trampoline_pgtable) { -- memcpy(pgtable, trampoline_pgtable, PAGE_SIZE); -- native_write_cr3((unsigned long)pgtable); -+ memcpy(top_pgtable, trampoline_pgtable, PAGE_SIZE); -+ native_write_cr3((unsigned long)top_pgtable); - } - - /* Restore trampoline memory */ diff --git a/sys-kernel/linux-sources-redcore/files/workaround-BIOS-bugs-on-CCP-SVE-ryzen-TR.patch b/sys-kernel/linux-sources-redcore/files/workaround-BIOS-bugs-on-CCP-SVE-ryzen-TR.patch deleted file mode 100644 index 1d1ae0e3..00000000 --- a/sys-kernel/linux-sources-redcore/files/workaround-BIOS-bugs-on-CCP-SVE-ryzen-TR.patch +++ /dev/null @@ -1,85 +0,0 @@ -diff -up linux-4.18/drivers/crypto/ccp/psp-dev.c.0333~ linux-4.18/drivers/crypto/ccp/psp-dev.c ---- linux-4.18/drivers/crypto/ccp/psp-dev.c.0333~ 2018-08-21 19:32:36.457890957 +0200 -+++ linux-4.18/drivers/crypto/ccp/psp-dev.c 2018-08-21 19:35:43.283028769 +0200 -@@ -38,6 +38,17 @@ static DEFINE_MUTEX(sev_cmd_mutex); - static struct sev_misc_dev *misc_dev; - static struct psp_device *psp_master; - -+static int psp_cmd_timeout = 100; -+module_param(psp_cmd_timeout, int, 0644); -+MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); -+ -+static int psp_probe_timeout = 5; -+module_param(psp_probe_timeout, int, 0644); -+MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); -+ -+static bool psp_dead; -+static int psp_timeout; -+ - static struct psp_device *psp_alloc_struct(struct sp_device *sp) - { - struct device *dev = sp->dev; -@@ -82,10 +93,15 @@ done: - return IRQ_HANDLED; - } - --static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg) -+static int sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg, unsigned int timeout) - { -- wait_event(psp->sev_int_queue, psp->sev_int_rcvd); -+ int ret; -+ ret = wait_event_timeout(psp->sev_int_queue, psp->sev_int_rcvd, -+ timeout * HZ); -+ if (!ret) -+ return -ETIMEDOUT; - *reg = ioread32(psp->io_regs + PSP_CMDRESP); -+ return 0; - } - - static int sev_cmd_buffer_len(int cmd) -@@ -133,12 +149,15 @@ static int __sev_do_cmd_locked(int cmd, - if (!psp) - return -ENODEV; - -+ if (psp_dead) -+ return -EBUSY; -+ - /* Get the physical address of the command buffer */ - phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; - phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; - -- dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n", -- cmd, phys_msb, phys_lsb); -+ dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", -+ cmd, phys_msb, phys_lsb, psp_timeout); - - print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, - sev_cmd_buffer_len(cmd), false); -@@ -154,7 +173,17 @@ static int __sev_do_cmd_locked(int cmd, - iowrite32(reg, psp->io_regs + PSP_CMDRESP); - - /* wait for command completion */ -- sev_wait_cmd_ioc(psp, ®); -+ ret = sev_wait_cmd_ioc(psp, ®, psp_timeout); -+ if (ret) { -+ if (psp_ret) -+ *psp_ret = 0; -+ -+ dev_err(psp->dev, "sev command %#x timed out, disabling PSP\n", cmd); -+ psp_dead = true; -+ return ret; -+ } -+ -+ psp_timeout = psp_cmd_timeout; - - if (psp_ret) - *psp_ret = reg & PSP_CMDRESP_ERR_MASK; -@@ -886,6 +915,8 @@ void psp_pci_init(void) - - psp_master = sp->psp_data; - -+ psp_timeout = psp_probe_timeout; -+ - if (sev_get_api_version()) - goto err; - diff --git a/sys-kernel/linux-sources-redcore/linux-sources-redcore-4.18.5.ebuild b/sys-kernel/linux-sources-redcore/linux-sources-redcore-4.18.5.ebuild deleted file mode 100644 index 2aa366b1..00000000 --- a/sys-kernel/linux-sources-redcore/linux-sources-redcore-4.18.5.ebuild +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 1999-2017 Gentoo Foundation -# Distributed under the terms of the GNU General Public License v2 - -EAPI=6 - -inherit eutils - -EXTRAVERSION="redcore" -KV_FULL="${PV}-${EXTRAVERSION}" - -DESCRIPTION="Official Redcore Linux Kernel Sources" -HOMEPAGE="https://redcorelinux.org" -SRC_URI="https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-${PV}.tar.xz" - -KEYWORDS="amd64" -LICENSE="GPL-2" -SLOT="${PV}" -IUSE="" - -RESTRICT="strip mirror" -DEPEND=" - app-arch/xz-utils - sys-devel/autoconf - sys-devel/bc - sys-devel/make" -RDEPEND="${DEPEND}" - -PATCHES=( "${FILESDIR}"/0001-Revert-x86-ACPI-cstate-Allow-ACPI-C1-FFH-MWAIT-use-o.patch - "${FILESDIR}"/ata-fix-NCQ-LOG-strings-and-move-to-debug.patch - "${FILESDIR}"/drop_ancient-and-wrong-msg.patch - "${FILESDIR}"/enable_alx_wol.patch - "${FILESDIR}"/mute-pps_state_mismatch.patch - "${FILESDIR}"/nouveau-pascal-backlight.patch - "${FILESDIR}"/radeon_dp_aux_transfer_native-no-ratelimited_debug.patch - "${FILESDIR}"/revert-patches-causing-instant-reboot.patch - "${FILESDIR}"/workaround-BIOS-bugs-on-CCP-SVE-ryzen-TR.patch - "${FILESDIR}"/linux-hardened.patch - "${FILESDIR}"/uksm-for-linux-hardened.patch ) - -S="${WORKDIR}"/linux-"${PV}" - -pkg_setup() { - export KBUILD_BUILD_USER="nexus" - export KBUILD_BUILD_HOST="nexus.redcorelinux.org" - - export REAL_ARCH="$ARCH" - unset ARCH ; unset LDFLAGS #will interfere with Makefile if set -} - -src_prepare() { - default - emake mrproper - sed -ri "s|^(EXTRAVERSION =).*|\1 -${EXTRAVERSION}|" Makefile - cp "${FILESDIR}"/"${EXTRAVERSION}"-amd64.config .config -} - -src_compile() { - emake prepare modules_prepare -} - -src_install() { - dodir usr/src/linux-"${KV_FULL}" - cp -ax "${S}"/* "${D}"usr/src/linux-"${KV_FULL}" -} - -_kernel_sources_delete() { - rm -rf "${ROOT}"usr/src/linux-"${KV_FULL}" -} - -pkg_postrm() { - _kernel_sources_delete -} diff --git a/sys-kernel/linux-sources-redcore/linux-sources-redcore-5.1.15.ebuild b/sys-kernel/linux-sources-redcore/linux-sources-redcore-5.1.15.ebuild new file mode 100644 index 00000000..585459a0 --- /dev/null +++ b/sys-kernel/linux-sources-redcore/linux-sources-redcore-5.1.15.ebuild @@ -0,0 +1,90 @@ +# Copyright 1999-2017 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +EAPI=6 + +inherit eutils + +EXTRAVERSION="redcore" +KV_FULL="${PV}-${EXTRAVERSION}" +KV_MAJOR="5.1" + +DESCRIPTION="Official Redcore Linux Kernel Sources" +HOMEPAGE="https://redcorelinux.org" +SRC_URI="https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-${PV}.tar.xz" + +KEYWORDS="amd64" +LICENSE="GPL-2" +SLOT="${PVR}" +IUSE="" + +RESTRICT="strip mirror" +DEPEND=" + app-arch/lz4 + app-arch/xz-utils + sys-devel/autoconf + sys-devel/bc + sys-devel/make" +RDEPEND="${DEPEND}" + +PATCHES=( + "${FILESDIR}"/"${KV_MAJOR}"-acpi-use-kern_warning_even_when_error.patch + "${FILESDIR}"/"${KV_MAJOR}"-ata-fix-NCQ-LOG-strings-and-move-to-debug.patch + "${FILESDIR}"/"${KV_MAJOR}"-drop_ancient-and-wrong-msg.patch + "${FILESDIR}"/"${KV_MAJOR}"-enable_alx_wol.patch + "${FILESDIR}"/"${KV_MAJOR}"-mute-pps_state_mismatch.patch + "${FILESDIR}"/"${KV_MAJOR}"-fix-acpi_dbg_level.patch + "${FILESDIR}"/"${KV_MAJOR}"-radeon_dp_aux_transfer_native-no-ratelimited_debug.patch + "${FILESDIR}"/"${KV_MAJOR}"-Unknow-SSD-HFM128GDHTNG-8310B-QUIRK_NO_APST.patch + "${FILESDIR}"/"${KV_MAJOR}"-linux-hardened.patch + "${FILESDIR}"/"${KV_MAJOR}"-uksm-linux-hardened.patch + "${FILESDIR}"/"${KV_MAJOR}"-0001-MultiQueue-Skiplist-Scheduler-version-0.192-linux-hardened.patch + "${FILESDIR}"/"${KV_MAJOR}"-0002-Fix-Werror-build-failure-in-tools.patch + "${FILESDIR}"/"${KV_MAJOR}"-0003-Make-preemptible-kernel-default.patch + "${FILESDIR}"/"${KV_MAJOR}"-0004-Expose-vmsplit-for-our-poor-32-bit-users.patch + "${FILESDIR}"/"${KV_MAJOR}"-0005-Create-highres-timeout-variants-of-schedule_timeout-.patch + "${FILESDIR}"/"${KV_MAJOR}"-0006-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch + "${FILESDIR}"/"${KV_MAJOR}"-0007-Convert-msleep-to-use-hrtimers-when-active.patch + "${FILESDIR}"/"${KV_MAJOR}"-0008-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch + "${FILESDIR}"/"${KV_MAJOR}"-0009-Replace-all-calls-to-schedule_timeout_interruptible-.patch + "${FILESDIR}"/"${KV_MAJOR}"-0010-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch + "${FILESDIR}"/"${KV_MAJOR}"-0011-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch + "${FILESDIR}"/"${KV_MAJOR}"-0012-Make-threaded-IRQs-optionally-the-default-which-can-.patch + "${FILESDIR}"/"${KV_MAJOR}"-0013-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch + "${FILESDIR}"/"${KV_MAJOR}"-0014-Swap-sucks.patch +) + +S="${WORKDIR}"/linux-"${PV}" + +pkg_setup() { + export KBUILD_BUILD_USER="nexus" + export KBUILD_BUILD_HOST="nexus.redcorelinux.org" + + export REAL_ARCH="$ARCH" + unset ARCH ; unset LDFLAGS #will interfere with Makefile if set +} + +src_prepare() { + default + emake mrproper + sed -ri "s|^(EXTRAVERSION =).*|\1 -${EXTRAVERSION}|" Makefile + cp "${FILESDIR}"/"${KV_MAJOR}"-amd64.config .config + rm -rf $(find . -type f|grep -F \.orig) +} + +src_compile() { + emake prepare modules_prepare +} + +src_install() { + dodir usr/src/linux-"${KV_FULL}" + cp -ax "${S}"/* "${D}"usr/src/linux-"${KV_FULL}" +} + +_kernel_sources_delete() { + rm -rf "${ROOT}"usr/src/linux-"${KV_FULL}" +} + +pkg_postrm() { + _kernel_sources_delete +} |