summaryrefslogtreecommitdiff
path: root/sys-kernel
diff options
context:
space:
mode:
authorV3n3RiX <venerix@redcorelinux.org>2018-04-12 16:09:15 +0100
committerV3n3RiX <venerix@redcorelinux.org>2018-04-12 16:09:15 +0100
commit8d7700344360c308ad67f4a7343a099cf6a9ab01 (patch)
treec1662d711f9cc233621f729f64ce916cd0113f69 /sys-kernel
parentd93832cc9b421b5bb503c0b547598bc4b59976b9 (diff)
git is acting weird on me lately
Diffstat (limited to 'sys-kernel')
-rw-r--r--sys-kernel/linux-image-redcore/files/linux-hardened-v2.patch2725
-rw-r--r--sys-kernel/linux-sources-redcore/files/linux-hardened-v2.patch2725
2 files changed, 5450 insertions, 0 deletions
diff --git a/sys-kernel/linux-image-redcore/files/linux-hardened-v2.patch b/sys-kernel/linux-image-redcore/files/linux-hardened-v2.patch
new file mode 100644
index 00000000..8ec7b812
--- /dev/null
+++ b/sys-kernel/linux-image-redcore/files/linux-hardened-v2.patch
@@ -0,0 +1,2725 @@
+diff -Naur linux-4.16/arch/arm64/configs/defconfig linux-4.16-p/arch/arm64/configs/defconfig
+--- linux-4.16/arch/arm64/configs/defconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/arm64/configs/defconfig 2018-04-12 15:57:20.805694357 +0200
+@@ -1,4 +1,3 @@
+-CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+ CONFIG_AUDIT=y
+ CONFIG_NO_HZ_IDLE=y
+diff -Naur linux-4.16/arch/arm64/include/asm/elf.h linux-4.16-p/arch/arm64/include/asm/elf.h
+--- linux-4.16/arch/arm64/include/asm/elf.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/arm64/include/asm/elf.h 2018-04-12 15:57:20.806694357 +0200
+@@ -114,10 +114,10 @@
+
+ /*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+- * 64-bit, this is above 4GB to leave the entire 32-bit address
++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
++#define ELF_ET_DYN_BASE 0x100000000UL
+
+ #ifndef __ASSEMBLY__
+
+@@ -158,10 +158,10 @@
+ /* 1GB of VA */
+ #ifdef CONFIG_COMPAT
+ #define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
+- 0x7ff >> (PAGE_SHIFT - 12) : \
+- 0x3ffff >> (PAGE_SHIFT - 12))
++ ((1UL << mmap_rnd_compat_bits) - 1) >> (PAGE_SHIFT - 12) : \
++ ((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
+ #else
+-#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
++#define STACK_RND_MASK (((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
+ #endif
+
+ #ifdef __AARCH64EB__
+diff -Naur linux-4.16/arch/arm64/Kconfig linux-4.16-p/arch/arm64/Kconfig
+--- linux-4.16/arch/arm64/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/arm64/Kconfig 2018-04-12 15:57:20.806694357 +0200
+@@ -974,6 +974,7 @@
+
+ config ARM64_SW_TTBR0_PAN
+ bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
++ default y
+ help
+ Enabling this option prevents the kernel from accessing
+ user-space memory directly by pointing TTBR0_EL1 to a reserved
+@@ -1127,6 +1128,7 @@
+ bool "Randomize the address of the kernel image"
+ select ARM64_MODULE_PLTS if MODULES
+ select RELOCATABLE
++ default y
+ help
+ Randomizes the virtual address at which the kernel image is
+ loaded, as a security feature that deters exploit attempts
+diff -Naur linux-4.16/arch/arm64/Kconfig.debug linux-4.16-p/arch/arm64/Kconfig.debug
+--- linux-4.16/arch/arm64/Kconfig.debug 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/arm64/Kconfig.debug 2018-04-12 15:57:20.807694356 +0200
+@@ -45,6 +45,7 @@
+ config DEBUG_WX
+ bool "Warn on W+X mappings at boot"
+ select ARM64_PTDUMP_CORE
++ default y
+ ---help---
+ Generate a warning if any W+X mappings are found at boot.
+
+diff -Naur linux-4.16/arch/arm64/kernel/process.c linux-4.16-p/arch/arm64/kernel/process.c
+--- linux-4.16/arch/arm64/kernel/process.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/arm64/kernel/process.c 2018-04-12 15:57:20.807694356 +0200
+@@ -481,9 +481,9 @@
+ unsigned long arch_randomize_brk(struct mm_struct *mm)
+ {
+ if (is_compat_task())
+- return randomize_page(mm->brk, SZ_32M);
++ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
+ else
+- return randomize_page(mm->brk, SZ_1G);
++ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
+ }
+
+ /*
+diff -Naur linux-4.16/arch/Kconfig linux-4.16-p/arch/Kconfig
+--- linux-4.16/arch/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/Kconfig 2018-04-12 15:57:20.808694356 +0200
+@@ -454,6 +454,11 @@
+ is some slowdown of the boot process (about 0.5%) and fork and
+ irq processing.
+
++ When extra_latent_entropy is passed on the kernel command line,
++ entropy will be extracted from up to the first 4GB of RAM while the
++ runtime memory allocator is being initialized. This costs even more
++ slowdown of the boot process.
++
+ Note that entropy extracted this way is not cryptographically
+ secure!
+
+@@ -541,7 +546,7 @@
+ choice
+ prompt "Stack Protector buffer overflow detection"
+ depends on HAVE_CC_STACKPROTECTOR
+- default CC_STACKPROTECTOR_AUTO
++ default CC_STACKPROTECTOR_STRONG
+ help
+ This option turns on the "stack-protector" GCC feature. This
+ feature puts, at the beginning of functions, a canary value on
+@@ -747,7 +752,7 @@
+ int "Number of bits to use for ASLR of mmap base address" if EXPERT
+ range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
+ default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
+- default ARCH_MMAP_RND_BITS_MIN
++ default ARCH_MMAP_RND_BITS_MAX
+ depends on HAVE_ARCH_MMAP_RND_BITS
+ help
+ This value can be used to select the number of bits to use to
+@@ -781,7 +786,7 @@
+ int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
+ range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
+ default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
+- default ARCH_MMAP_RND_COMPAT_BITS_MIN
++ default ARCH_MMAP_RND_COMPAT_BITS_MAX
+ depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
+ help
+ This value can be used to select the number of bits to use to
+@@ -968,6 +973,7 @@
+
+ config REFCOUNT_FULL
+ bool "Perform full reference count validation at the expense of speed"
++ default y
+ help
+ Enabling this switches the refcounting infrastructure from a fast
+ unchecked atomic_t implementation to a fully state checked
+diff -Naur linux-4.16/arch/x86/configs/x86_64_defconfig linux-4.16-p/arch/x86/configs/x86_64_defconfig
+--- linux-4.16/arch/x86/configs/x86_64_defconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/configs/x86_64_defconfig 2018-04-12 15:57:20.808694356 +0200
+@@ -1,5 +1,4 @@
+ # CONFIG_LOCALVERSION_AUTO is not set
+-CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+ CONFIG_BSD_PROCESS_ACCT=y
+ CONFIG_TASKSTATS=y
+diff -Naur linux-4.16/arch/x86/entry/vdso/vma.c linux-4.16-p/arch/x86/entry/vdso/vma.c
+--- linux-4.16/arch/x86/entry/vdso/vma.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/entry/vdso/vma.c 2018-04-12 15:57:20.808694356 +0200
+@@ -204,55 +204,9 @@
+ }
+
+ #ifdef CONFIG_X86_64
+-/*
+- * Put the vdso above the (randomized) stack with another randomized
+- * offset. This way there is no hole in the middle of address space.
+- * To save memory make sure it is still in the same PTE as the stack
+- * top. This doesn't give that many random bits.
+- *
+- * Note that this algorithm is imperfect: the distribution of the vdso
+- * start address within a PMD is biased toward the end.
+- *
+- * Only used for the 64-bit and x32 vdsos.
+- */
+-static unsigned long vdso_addr(unsigned long start, unsigned len)
+-{
+- unsigned long addr, end;
+- unsigned offset;
+-
+- /*
+- * Round up the start address. It can start out unaligned as a result
+- * of stack start randomization.
+- */
+- start = PAGE_ALIGN(start);
+-
+- /* Round the lowest possible end address up to a PMD boundary. */
+- end = (start + len + PMD_SIZE - 1) & PMD_MASK;
+- if (end >= TASK_SIZE_MAX)
+- end = TASK_SIZE_MAX;
+- end -= len;
+-
+- if (end > start) {
+- offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+- addr = start + (offset << PAGE_SHIFT);
+- } else {
+- addr = start;
+- }
+-
+- /*
+- * Forcibly align the final address in case we have a hardware
+- * issue that requires alignment for performance reasons.
+- */
+- addr = align_vdso_addr(addr);
+-
+- return addr;
+-}
+-
+ static int map_vdso_randomized(const struct vdso_image *image)
+ {
+- unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
+-
+- return map_vdso(image, addr);
++ return map_vdso(image, 0);
+ }
+ #endif
+
+diff -Naur linux-4.16/arch/x86/include/asm/elf.h linux-4.16-p/arch/x86/include/asm/elf.h
+--- linux-4.16/arch/x86/include/asm/elf.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/include/asm/elf.h 2018-04-12 15:57:20.809694356 +0200
+@@ -249,11 +249,11 @@
+
+ /*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+- * 64-bit, this is above 4GB to leave the entire 32-bit address
++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+ #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
+- (DEFAULT_MAP_WINDOW / 3 * 2))
++ 0x100000000UL)
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+@@ -313,8 +313,8 @@
+
+ #ifdef CONFIG_X86_32
+
+-#define __STACK_RND_MASK(is32bit) (0x7ff)
+-#define STACK_RND_MASK (0x7ff)
++#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
++#define STACK_RND_MASK ((1UL << mmap_rnd_bits) - 1)
+
+ #define ARCH_DLINFO ARCH_DLINFO_IA32
+
+@@ -323,7 +323,11 @@
+ #else /* CONFIG_X86_32 */
+
+ /* 1GB for 64bit, 8MB for 32bit */
+-#define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
++#ifdef CONFIG_COMPAT
++#define __STACK_RND_MASK(is32bit) ((is32bit) ? (1UL << mmap_rnd_compat_bits) - 1 : (1UL << mmap_rnd_bits) - 1)
++#else
++#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
++#endif
+ #define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())
+
+ #define ARCH_DLINFO \
+@@ -381,5 +385,4 @@
+ } ____cacheline_aligned;
+
+ extern struct va_alignment va_align;
+-extern unsigned long align_vdso_addr(unsigned long);
+ #endif /* _ASM_X86_ELF_H */
+diff -Naur linux-4.16/arch/x86/include/asm/tlbflush.h linux-4.16-p/arch/x86/include/asm/tlbflush.h
+--- linux-4.16/arch/x86/include/asm/tlbflush.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/include/asm/tlbflush.h 2018-04-12 15:57:20.809694356 +0200
+@@ -261,6 +261,7 @@
+
+ local_irq_save(flags);
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
++ BUG_ON(cr4 != __read_cr4());
+ if ((cr4 | mask) != cr4)
+ __cr4_set(cr4 | mask);
+ local_irq_restore(flags);
+@@ -273,6 +274,7 @@
+
+ local_irq_save(flags);
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
++ BUG_ON(cr4 != __read_cr4());
+ if ((cr4 & ~mask) != cr4)
+ __cr4_set(cr4 & ~mask);
+ local_irq_restore(flags);
+@@ -283,6 +285,7 @@
+ unsigned long cr4;
+
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
++ BUG_ON(cr4 != __read_cr4());
+ __cr4_set(cr4 ^ mask);
+ }
+
+@@ -389,6 +392,7 @@
+ raw_local_irq_save(flags);
+
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
++ BUG_ON(cr4 != __read_cr4());
+ /* toggle PGE */
+ native_write_cr4(cr4 ^ X86_CR4_PGE);
+ /* write old PGE again and flush TLBs */
+diff -Naur linux-4.16/arch/x86/Kconfig linux-4.16-p/arch/x86/Kconfig
+--- linux-4.16/arch/x86/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/Kconfig 2018-04-12 15:57:20.810694356 +0200
+@@ -1208,8 +1208,7 @@
+ default X86_LEGACY_VM86
+
+ config X86_16BIT
+- bool "Enable support for 16-bit segments" if EXPERT
+- default y
++ bool "Enable support for 16-bit segments"
+ depends on MODIFY_LDT_SYSCALL
+ ---help---
+ This option is required by programs like Wine to run 16-bit
+@@ -2299,7 +2298,7 @@
+ choice
+ prompt "vsyscall table for legacy applications"
+ depends on X86_64
+- default LEGACY_VSYSCALL_EMULATE
++ default LEGACY_VSYSCALL_NONE
+ help
+ Legacy user code that does not know how to find the vDSO expects
+ to be able to issue three syscalls by calling fixed addresses in
+@@ -2380,8 +2379,7 @@
+ be set to 'N' under normal conditions.
+
+ config MODIFY_LDT_SYSCALL
+- bool "Enable the LDT (local descriptor table)" if EXPERT
+- default y
++ bool "Enable the LDT (local descriptor table)"
+ ---help---
+ Linux can allow user programs to install a per-process x86
+ Local Descriptor Table (LDT) using the modify_ldt(2) system
+diff -Naur linux-4.16/arch/x86/Kconfig.debug linux-4.16-p/arch/x86/Kconfig.debug
+--- linux-4.16/arch/x86/Kconfig.debug 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/Kconfig.debug 2018-04-12 15:57:20.810694356 +0200
+@@ -101,6 +101,7 @@
+ config DEBUG_WX
+ bool "Warn on W+X mappings at boot"
+ select X86_PTDUMP_CORE
++ default y
+ ---help---
+ Generate a warning if any W+X mappings are found at boot.
+
+diff -Naur linux-4.16/arch/x86/kernel/cpu/common.c linux-4.16-p/arch/x86/kernel/cpu/common.c
+--- linux-4.16/arch/x86/kernel/cpu/common.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/kernel/cpu/common.c 2018-04-12 15:57:20.811694355 +0200
+@@ -1617,7 +1617,6 @@
+ wrmsrl(MSR_KERNEL_GS_BASE, 0);
+ barrier();
+
+- x86_configure_nx();
+ x2apic_setup();
+
+ /*
+diff -Naur linux-4.16/arch/x86/kernel/process.c linux-4.16-p/arch/x86/kernel/process.c
+--- linux-4.16/arch/x86/kernel/process.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/kernel/process.c 2018-04-12 15:57:20.812694355 +0200
+@@ -38,6 +38,8 @@
+ #include <asm/switch_to.h>
+ #include <asm/desc.h>
+ #include <asm/prctl.h>
++#include <asm/elf.h>
++#include <linux/sizes.h>
+
+ /*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+@@ -572,7 +574,10 @@
+
+ unsigned long arch_randomize_brk(struct mm_struct *mm)
+ {
+- return randomize_page(mm->brk, 0x02000000);
++ if (mmap_is_ia32())
++ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
++ else
++ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
+ }
+
+ /*
+diff -Naur linux-4.16/arch/x86/kernel/sys_x86_64.c linux-4.16-p/arch/x86/kernel/sys_x86_64.c
+--- linux-4.16/arch/x86/kernel/sys_x86_64.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/kernel/sys_x86_64.c 2018-04-12 15:57:20.812694355 +0200
+@@ -54,13 +54,6 @@
+ return va_align.bits & get_align_mask();
+ }
+
+-unsigned long align_vdso_addr(unsigned long addr)
+-{
+- unsigned long align_mask = get_align_mask();
+- addr = (addr + align_mask) & ~align_mask;
+- return addr | get_align_bits();
+-}
+-
+ static int __init control_va_addr_alignment(char *str)
+ {
+ /* guard against enabling this on other CPU families */
+@@ -122,10 +115,7 @@
+ }
+
+ *begin = get_mmap_base(1);
+- if (in_compat_syscall())
+- *end = task_size_32bit();
+- else
+- *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
++ *end = get_mmap_base(0);
+ }
+
+ unsigned long
+@@ -210,7 +200,7 @@
+
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+- info.low_limit = PAGE_SIZE;
++ info.low_limit = get_mmap_base(1);
+ info.high_limit = get_mmap_base(0);
+
+ /*
+diff -Naur linux-4.16/arch/x86/mm/init_32.c linux-4.16-p/arch/x86/mm/init_32.c
+--- linux-4.16/arch/x86/mm/init_32.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/mm/init_32.c 2018-04-12 15:57:20.812694355 +0200
+@@ -558,7 +558,7 @@
+ permanent_kmaps_init(pgd_base);
+ }
+
+-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
++pteval_t __supported_pte_mask __ro_after_init = ~(_PAGE_NX | _PAGE_GLOBAL);
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+
+ /* user-defined highmem size */
+@@ -866,7 +866,7 @@
+ #endif
+ #endif
+
+-int kernel_set_to_readonly __read_mostly;
++int kernel_set_to_readonly __ro_after_init;
+
+ void set_kernel_text_rw(void)
+ {
+@@ -918,12 +918,11 @@
+ unsigned long start = PFN_ALIGN(_text);
+ unsigned long size = PFN_ALIGN(_etext) - start;
+
++ kernel_set_to_readonly = 1;
+ set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
+ printk(KERN_INFO "Write protecting the kernel text: %luk\n",
+ size >> 10);
+
+- kernel_set_to_readonly = 1;
+-
+ #ifdef CONFIG_CPA_DEBUG
+ printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
+ start, start+size);
+diff -Naur linux-4.16/arch/x86/mm/init_64.c linux-4.16-p/arch/x86/mm/init_64.c
+--- linux-4.16/arch/x86/mm/init_64.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/mm/init_64.c 2018-04-12 15:57:20.813694355 +0200
+@@ -65,7 +65,7 @@
+ * around without checking the pgd every time.
+ */
+
+-pteval_t __supported_pte_mask __read_mostly = ~0;
++pteval_t __supported_pte_mask __ro_after_init = ~0;
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+
+ int force_personality32;
+@@ -1195,7 +1195,7 @@
+ mem_init_print_info(NULL);
+ }
+
+-int kernel_set_to_readonly;
++int kernel_set_to_readonly __ro_after_init;
+
+ void set_kernel_text_rw(void)
+ {
+@@ -1244,9 +1244,8 @@
+
+ printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+ (end - start) >> 10);
+- set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+-
+ kernel_set_to_readonly = 1;
++ set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+
+ /*
+ * The rodata/data/bss/brk section (but not the kernel text!)
+diff -Naur linux-4.16/block/blk-softirq.c linux-4.16-p/block/blk-softirq.c
+--- linux-4.16/block/blk-softirq.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/block/blk-softirq.c 2018-04-12 15:57:20.813694355 +0200
+@@ -20,7 +20,7 @@
+ * Softirq action handler - move entries to local list and loop over them
+ * while passing them to the queue registered handler.
+ */
+-static __latent_entropy void blk_done_softirq(struct softirq_action *h)
++static __latent_entropy void blk_done_softirq(void)
+ {
+ struct list_head *cpu_list, local_list;
+
+diff -Naur linux-4.16/Documentation/admin-guide/kernel-parameters.txt linux-4.16-p/Documentation/admin-guide/kernel-parameters.txt
+--- linux-4.16/Documentation/admin-guide/kernel-parameters.txt 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/Documentation/admin-guide/kernel-parameters.txt 2018-04-12 15:57:20.815694354 +0200
+@@ -496,16 +496,6 @@
+ nosocket -- Disable socket memory accounting.
+ nokmem -- Disable kernel memory accounting.
+
+- checkreqprot [SELINUX] Set initial checkreqprot flag value.
+- Format: { "0" | "1" }
+- See security/selinux/Kconfig help text.
+- 0 -- check protection applied by kernel (includes
+- any implied execute protection).
+- 1 -- check protection requested by application.
+- Default value is set via a kernel config option.
+- Value can be changed at runtime via
+- /selinux/checkreqprot.
+-
+ cio_ignore= [S390]
+ See Documentation/s390/CommonIO for details.
+ clk_ignore_unused
+@@ -2943,6 +2933,11 @@
+ the specified number of seconds. This is to be used if
+ your oopses keep scrolling off the screen.
+
++ extra_latent_entropy
++ Enable a very simple form of latent entropy extraction
++ from the first 4GB of memory as the bootmem allocator
++ passes the memory pages to the buddy allocator.
++
+ pcbit= [HW,ISDN]
+
+ pcd. [PARIDE]
+diff -Naur linux-4.16/Documentation/sysctl/kernel.txt linux-4.16-p/Documentation/sysctl/kernel.txt
+--- linux-4.16/Documentation/sysctl/kernel.txt 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/Documentation/sysctl/kernel.txt 2018-04-12 15:57:20.815694354 +0200
+@@ -92,6 +92,7 @@
+ - sysctl_writes_strict
+ - tainted
+ - threads-max
++- tiocsti_restrict
+ - unknown_nmi_panic
+ - watchdog
+ - watchdog_thresh
+@@ -1014,6 +1015,26 @@
+
+ ==============================================================
+
++tiocsti_restrict:
++
++This toggle indicates whether unprivileged users are prevented
++from using the TIOCSTI ioctl to inject commands into other processes
++which share a tty session.
++
++When tiocsti_restrict is set to (0) there are no restrictions(accept
++the default restriction of only being able to injection commands into
++one's own tty). When tiocsti_restrict is set to (1), users must
++have CAP_SYS_ADMIN to use the TIOCSTI ioctl.
++
++When user namespaces are in use, the check for the capability
++CAP_SYS_ADMIN is done against the user namespace that originally
++opened the tty.
++
++The kernel config option CONFIG_SECURITY_TIOCSTI_RESTRICT sets the
++default value of tiocsti_restrict.
++
++==============================================================
++
+ unknown_nmi_panic:
+
+ The value in this file affects behavior of handling NMI. When the
+diff -Naur linux-4.16/drivers/ata/libata-core.c linux-4.16-p/drivers/ata/libata-core.c
+--- linux-4.16/drivers/ata/libata-core.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/ata/libata-core.c 2018-04-12 15:57:20.817694353 +0200
+@@ -5148,7 +5148,7 @@
+ struct ata_port *ap;
+ unsigned int tag;
+
+- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+ ap = qc->ap;
+
+ qc->flags = 0;
+@@ -5165,7 +5165,7 @@
+ struct ata_port *ap;
+ struct ata_link *link;
+
+- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+ WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
+ ap = qc->ap;
+ link = qc->dev->link;
+diff -Naur linux-4.16/drivers/char/Kconfig linux-4.16-p/drivers/char/Kconfig
+--- linux-4.16/drivers/char/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/char/Kconfig 2018-04-12 15:57:20.817694353 +0200
+@@ -9,7 +9,6 @@
+
+ config DEVMEM
+ bool "/dev/mem virtual device support"
+- default y
+ help
+ Say Y here if you want to support the /dev/mem device.
+ The /dev/mem device is used to access areas of physical
+@@ -568,7 +567,6 @@
+ config DEVPORT
+ bool "/dev/port character device"
+ depends on ISA || PCI
+- default y
+ help
+ Say Y here if you want to support the /dev/port device. The /dev/port
+ device is similar to /dev/mem, but for I/O ports.
+diff -Naur linux-4.16/drivers/media/dvb-frontends/cx24116.c linux-4.16-p/drivers/media/dvb-frontends/cx24116.c
+--- linux-4.16/drivers/media/dvb-frontends/cx24116.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/cx24116.c 2018-04-12 15:57:20.818694353 +0200
+@@ -1456,7 +1456,7 @@
+ return cx24116_read_status(fe, status);
+ }
+
+-static int cx24116_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cx24116_get_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/dvb-frontends/cx24117.c linux-4.16-p/drivers/media/dvb-frontends/cx24117.c
+--- linux-4.16/drivers/media/dvb-frontends/cx24117.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/cx24117.c 2018-04-12 15:57:20.818694353 +0200
+@@ -1555,7 +1555,7 @@
+ return cx24117_read_status(fe, status);
+ }
+
+-static int cx24117_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cx24117_get_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/dvb-frontends/cx24120.c linux-4.16-p/drivers/media/dvb-frontends/cx24120.c
+--- linux-4.16/drivers/media/dvb-frontends/cx24120.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/cx24120.c 2018-04-12 15:57:20.818694353 +0200
+@@ -1491,7 +1491,7 @@
+ return cx24120_read_status(fe, status);
+ }
+
+-static int cx24120_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cx24120_get_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/dvb-frontends/cx24123.c linux-4.16-p/drivers/media/dvb-frontends/cx24123.c
+--- linux-4.16/drivers/media/dvb-frontends/cx24123.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/cx24123.c 2018-04-12 15:57:20.819694353 +0200
+@@ -1005,7 +1005,7 @@
+ return retval;
+ }
+
+-static int cx24123_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cx24123_get_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/dvb-frontends/cxd2820r_core.c linux-4.16-p/drivers/media/dvb-frontends/cxd2820r_core.c
+--- linux-4.16/drivers/media/dvb-frontends/cxd2820r_core.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/cxd2820r_core.c 2018-04-12 15:57:20.819694353 +0200
+@@ -403,7 +403,7 @@
+ return DVBFE_ALGO_SEARCH_ERROR;
+ }
+
+-static int cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_CUSTOM;
+ }
+diff -Naur linux-4.16/drivers/media/dvb-frontends/mb86a20s.c linux-4.16-p/drivers/media/dvb-frontends/mb86a20s.c
+--- linux-4.16/drivers/media/dvb-frontends/mb86a20s.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/mb86a20s.c 2018-04-12 15:57:20.819694353 +0200
+@@ -2055,7 +2055,7 @@
+ kfree(state);
+ }
+
+-static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/dvb-frontends/s921.c linux-4.16-p/drivers/media/dvb-frontends/s921.c
+--- linux-4.16/drivers/media/dvb-frontends/s921.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/s921.c 2018-04-12 15:57:20.819694353 +0200
+@@ -464,7 +464,7 @@
+ return rc;
+ }
+
+-static int s921_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo s921_get_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/pci/bt8xx/dst.c linux-4.16-p/drivers/media/pci/bt8xx/dst.c
+--- linux-4.16/drivers/media/pci/bt8xx/dst.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/pci/bt8xx/dst.c 2018-04-12 15:57:20.820694352 +0200
+@@ -1657,7 +1657,7 @@
+ return 0;
+ }
+
+-static int dst_get_tuning_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo dst_get_tuning_algo(struct dvb_frontend *fe)
+ {
+ return dst_algo ? DVBFE_ALGO_HW : DVBFE_ALGO_SW;
+ }
+diff -Naur linux-4.16/drivers/media/pci/pt1/va1j5jf8007s.c linux-4.16-p/drivers/media/pci/pt1/va1j5jf8007s.c
+--- linux-4.16/drivers/media/pci/pt1/va1j5jf8007s.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/pci/pt1/va1j5jf8007s.c 2018-04-12 15:57:20.820694352 +0200
+@@ -98,7 +98,7 @@
+ return 0;
+ }
+
+-static int va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/pci/pt1/va1j5jf8007t.c linux-4.16-p/drivers/media/pci/pt1/va1j5jf8007t.c
+--- linux-4.16/drivers/media/pci/pt1/va1j5jf8007t.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/pci/pt1/va1j5jf8007t.c 2018-04-12 15:57:20.820694352 +0200
+@@ -88,7 +88,7 @@
+ return 0;
+ }
+
+-static int va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/tty/Kconfig linux-4.16-p/drivers/tty/Kconfig
+--- linux-4.16/drivers/tty/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/tty/Kconfig 2018-04-12 15:57:20.820694352 +0200
+@@ -122,7 +122,6 @@
+
+ config LEGACY_PTYS
+ bool "Legacy (BSD) PTY support"
+- default y
+ ---help---
+ A pseudo terminal (PTY) is a software device consisting of two
+ halves: a master and a slave. The slave device behaves identical to
+diff -Naur linux-4.16/drivers/tty/tty_io.c linux-4.16-p/drivers/tty/tty_io.c
+--- linux-4.16/drivers/tty/tty_io.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/tty/tty_io.c 2018-04-12 15:57:20.820694352 +0200
+@@ -172,6 +172,7 @@
+ put_device(tty->dev);
+ kfree(tty->write_buf);
+ tty->magic = 0xDEADDEAD;
++ put_user_ns(tty->owner_user_ns);
+ kfree(tty);
+ }
+
+@@ -2155,11 +2156,19 @@
+ * FIXME: may race normal receive processing
+ */
+
++int tiocsti_restrict = IS_ENABLED(CONFIG_SECURITY_TIOCSTI_RESTRICT);
++
+ static int tiocsti(struct tty_struct *tty, char __user *p)
+ {
+ char ch, mbz = 0;
+ struct tty_ldisc *ld;
+
++ if (tiocsti_restrict &&
++ !ns_capable(tty->owner_user_ns, CAP_SYS_ADMIN)) {
++ dev_warn_ratelimited(tty->dev,
++ "Denied TIOCSTI ioctl for non-privileged process\n");
++ return -EPERM;
++ }
+ if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ch, p))
+@@ -2839,6 +2848,7 @@
+ tty->index = idx;
+ tty_line_name(driver, idx, tty->name);
+ tty->dev = tty_get_device(tty);
++ tty->owner_user_ns = get_user_ns(current_user_ns());
+
+ return tty;
+ }
+diff -Naur linux-4.16/drivers/usb/core/hub.c linux-4.16-p/drivers/usb/core/hub.c
+--- linux-4.16/drivers/usb/core/hub.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/usb/core/hub.c 2018-04-12 15:57:20.821694352 +0200
+@@ -41,6 +41,8 @@
+ #define USB_TP_TRANSMISSION_DELAY 40 /* ns */
+ #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
+
++extern int deny_new_usb;
++
+ /* Protect struct usb_device->state and ->children members
+ * Note: Both are also protected by ->dev.sem, except that ->state can
+ * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
+@@ -4839,6 +4841,12 @@
+ goto done;
+ return;
+ }
++
++ if (deny_new_usb) {
++ dev_err(&port_dev->dev, "denied insert of USB device on port %d\n", port1);
++ goto done;
++ }
++
+ if (hub_is_superspeed(hub->hdev))
+ unit_load = 150;
+ else
+diff -Naur linux-4.16/fs/exec.c linux-4.16-p/fs/exec.c
+--- linux-4.16/fs/exec.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/fs/exec.c 2018-04-12 15:57:20.822694352 +0200
+@@ -62,6 +62,7 @@
+ #include <linux/oom.h>
+ #include <linux/compat.h>
+ #include <linux/vmalloc.h>
++#include <linux/random.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/mmu_context.h>
+@@ -321,6 +322,8 @@
+ arch_bprm_mm_init(mm, vma);
+ up_write(&mm->mmap_sem);
+ bprm->p = vma->vm_end - sizeof(void *);
++ if (randomize_va_space)
++ bprm->p ^= get_random_int() & ~PAGE_MASK;
+ return 0;
+ err:
+ up_write(&mm->mmap_sem);
+diff -Naur linux-4.16/fs/namei.c linux-4.16-p/fs/namei.c
+--- linux-4.16/fs/namei.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/fs/namei.c 2018-04-12 15:57:20.822694352 +0200
+@@ -882,8 +882,8 @@
+ path_put(&last->link);
+ }
+
+-int sysctl_protected_symlinks __read_mostly = 0;
+-int sysctl_protected_hardlinks __read_mostly = 0;
++int sysctl_protected_symlinks __read_mostly = 1;
++int sysctl_protected_hardlinks __read_mostly = 1;
+
+ /**
+ * may_follow_link - Check symlink following for unsafe situations
+diff -Naur linux-4.16/fs/nfs/Kconfig linux-4.16-p/fs/nfs/Kconfig
+--- linux-4.16/fs/nfs/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/fs/nfs/Kconfig 2018-04-12 15:57:20.822694352 +0200
+@@ -195,4 +195,3 @@
+ bool
+ depends on NFS_FS && SUNRPC_DEBUG
+ select CRC32
+- default y
+diff -Naur linux-4.16/fs/proc/Kconfig linux-4.16-p/fs/proc/Kconfig
+--- linux-4.16/fs/proc/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/fs/proc/Kconfig 2018-04-12 15:57:20.822694352 +0200
+@@ -39,7 +39,6 @@
+ config PROC_VMCORE
+ bool "/proc/vmcore support"
+ depends on PROC_FS && CRASH_DUMP
+- default y
+ help
+ Exports the dump image of crashed kernel in ELF format.
+
+diff -Naur linux-4.16/fs/stat.c linux-4.16-p/fs/stat.c
+--- linux-4.16/fs/stat.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/fs/stat.c 2018-04-12 15:57:20.823694351 +0200
+@@ -40,8 +40,13 @@
+ stat->gid = inode->i_gid;
+ stat->rdev = inode->i_rdev;
+ stat->size = i_size_read(inode);
+- stat->atime = inode->i_atime;
+- stat->mtime = inode->i_mtime;
++ if (is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
++ stat->atime = inode->i_ctime;
++ stat->mtime = inode->i_ctime;
++ } else {
++ stat->atime = inode->i_atime;
++ stat->mtime = inode->i_mtime;
++ }
+ stat->ctime = inode->i_ctime;
+ stat->blksize = i_blocksize(inode);
+ stat->blocks = inode->i_blocks;
+@@ -75,9 +80,14 @@
+ stat->result_mask |= STATX_BASIC_STATS;
+ request_mask &= STATX_ALL;
+ query_flags &= KSTAT_QUERY_FLAGS;
+- if (inode->i_op->getattr)
+- return inode->i_op->getattr(path, stat, request_mask,
+- query_flags);
++ if (inode->i_op->getattr) {
++ int retval = inode->i_op->getattr(path, stat, request_mask, query_flags);
++ if (!retval && is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
++ stat->atime = stat->ctime;
++ stat->mtime = stat->ctime;
++ }
++ return retval;
++ }
+
+ generic_fillattr(inode, stat);
+ return 0;
+diff -Naur linux-4.16/include/linux/cache.h linux-4.16-p/include/linux/cache.h
+--- linux-4.16/include/linux/cache.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/cache.h 2018-04-12 15:57:20.823694351 +0200
+@@ -31,6 +31,8 @@
+ #define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
+ #endif
+
++#define __read_only __ro_after_init
++
+ #ifndef ____cacheline_aligned
+ #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
+ #endif
+diff -Naur linux-4.16/include/linux/capability.h linux-4.16-p/include/linux/capability.h
+--- linux-4.16/include/linux/capability.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/capability.h 2018-04-12 15:57:20.823694351 +0200
+@@ -207,6 +207,7 @@
+ extern bool has_ns_capability_noaudit(struct task_struct *t,
+ struct user_namespace *ns, int cap);
+ extern bool capable(int cap);
++extern bool capable_noaudit(int cap);
+ extern bool ns_capable(struct user_namespace *ns, int cap);
+ extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
+ #else
+@@ -232,6 +233,10 @@
+ {
+ return true;
+ }
++static inline bool capable_noaudit(int cap)
++{
++ return true;
++}
+ static inline bool ns_capable(struct user_namespace *ns, int cap)
+ {
+ return true;
+diff -Naur linux-4.16/include/linux/fs.h linux-4.16-p/include/linux/fs.h
+--- linux-4.16/include/linux/fs.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/fs.h 2018-04-12 15:57:20.823694351 +0200
+@@ -3407,4 +3407,15 @@
+ extern bool path_noexec(const struct path *path);
+ extern void inode_nohighmem(struct inode *inode);
+
++extern int device_sidechannel_restrict;
++
++static inline bool is_sidechannel_device(const struct inode *inode)
++{
++ umode_t mode;
++ if (!device_sidechannel_restrict)
++ return false;
++ mode = inode->i_mode;
++ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
++}
++
+ #endif /* _LINUX_FS_H */
+diff -Naur linux-4.16/include/linux/fsnotify.h linux-4.16-p/include/linux/fsnotify.h
+--- linux-4.16/include/linux/fsnotify.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/fsnotify.h 2018-04-12 15:57:20.823694351 +0200
+@@ -181,6 +181,9 @@
+ struct inode *inode = path->dentry->d_inode;
+ __u32 mask = FS_ACCESS;
+
++ if (is_sidechannel_device(inode))
++ return;
++
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+@@ -199,6 +202,9 @@
+ struct inode *inode = path->dentry->d_inode;
+ __u32 mask = FS_MODIFY;
+
++ if (is_sidechannel_device(inode))
++ return;
++
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+diff -Naur linux-4.16/include/linux/gfp.h linux-4.16-p/include/linux/gfp.h
+--- linux-4.16/include/linux/gfp.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/gfp.h 2018-04-12 15:57:20.824694351 +0200
+@@ -513,9 +513,9 @@
+ extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
+ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
+
+-void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
++void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __attribute__((alloc_size(1)));
+ void free_pages_exact(void *virt, size_t size);
+-void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
++void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __attribute__((alloc_size(1)));
+
+ #define __get_free_page(gfp_mask) \
+ __get_free_pages((gfp_mask), 0)
+diff -Naur linux-4.16/include/linux/highmem.h linux-4.16-p/include/linux/highmem.h
+--- linux-4.16/include/linux/highmem.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/highmem.h 2018-04-12 15:57:20.824694351 +0200
+@@ -191,6 +191,13 @@
+ kunmap_atomic(kaddr);
+ }
+
++static inline void verify_zero_highpage(struct page *page)
++{
++ void *kaddr = kmap_atomic(page);
++ BUG_ON(memchr_inv(kaddr, 0, PAGE_SIZE));
++ kunmap_atomic(kaddr);
++}
++
+ static inline void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
+diff -Naur linux-4.16/include/linux/interrupt.h linux-4.16-p/include/linux/interrupt.h
+--- linux-4.16/include/linux/interrupt.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/interrupt.h 2018-04-12 15:57:20.824694351 +0200
+@@ -485,7 +485,7 @@
+
+ struct softirq_action
+ {
+- void (*action)(struct softirq_action *);
++ void (*action)(void);
+ };
+
+ asmlinkage void do_softirq(void);
+@@ -500,7 +500,7 @@
+ }
+ #endif
+
+-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
++extern void __init open_softirq(int nr, void (*action)(void));
+ extern void softirq_init(void);
+ extern void __raise_softirq_irqoff(unsigned int nr);
+
+diff -Naur linux-4.16/include/linux/kobject_ns.h linux-4.16-p/include/linux/kobject_ns.h
+--- linux-4.16/include/linux/kobject_ns.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/kobject_ns.h 2018-04-12 15:57:20.824694351 +0200
+@@ -45,7 +45,7 @@
+ void (*drop_ns)(void *);
+ };
+
+-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
++int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
+ int kobj_ns_type_registered(enum kobj_ns_type type);
+ const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
+ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
+diff -Naur linux-4.16/include/linux/mm.h linux-4.16-p/include/linux/mm.h
+--- linux-4.16/include/linux/mm.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/mm.h 2018-04-12 15:57:20.824694351 +0200
+@@ -535,7 +535,7 @@
+ }
+ #endif
+
+-extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
++extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __attribute__((alloc_size(1)));
+ static inline void *kvmalloc(size_t size, gfp_t flags)
+ {
+ return kvmalloc_node(size, flags, NUMA_NO_NODE);
+diff -Naur linux-4.16/include/linux/percpu.h linux-4.16-p/include/linux/percpu.h
+--- linux-4.16/include/linux/percpu.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/percpu.h 2018-04-12 15:57:20.825694351 +0200
+@@ -129,7 +129,7 @@
+ pcpu_fc_populate_pte_fn_t populate_pte_fn);
+ #endif
+
+-extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
++extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
+ extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
+ extern bool is_kernel_percpu_address(unsigned long addr);
+
+@@ -137,8 +137,8 @@
+ extern void __init setup_per_cpu_areas(void);
+ #endif
+
+-extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
+-extern void __percpu *__alloc_percpu(size_t size, size_t align);
++extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __attribute__((alloc_size(1)));
++extern void __percpu *__alloc_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
+ extern void free_percpu(void __percpu *__pdata);
+ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
+
+diff -Naur linux-4.16/include/linux/perf_event.h linux-4.16-p/include/linux/perf_event.h
+--- linux-4.16/include/linux/perf_event.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/perf_event.h 2018-04-12 15:57:20.825694351 +0200
+@@ -1151,6 +1151,11 @@
+ int perf_event_max_stack_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
++static inline bool perf_paranoid_any(void)
++{
++ return sysctl_perf_event_paranoid > 2;
++}
++
+ static inline bool perf_paranoid_tracepoint_raw(void)
+ {
+ return sysctl_perf_event_paranoid > -1;
+diff -Naur linux-4.16/include/linux/slab.h linux-4.16-p/include/linux/slab.h
+--- linux-4.16/include/linux/slab.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/slab.h 2018-04-12 15:57:20.825694351 +0200
+@@ -177,8 +177,8 @@
+ /*
+ * Common kmalloc functions provided by all allocators
+ */
+-void * __must_check __krealloc(const void *, size_t, gfp_t);
+-void * __must_check krealloc(const void *, size_t, gfp_t);
++void * __must_check __krealloc(const void *, size_t, gfp_t) __attribute__((alloc_size(2)));
++void * __must_check krealloc(const void *, size_t, gfp_t) __attribute((alloc_size(2)));
+ void kfree(const void *);
+ void kzfree(const void *);
+ size_t ksize(const void *);
+@@ -351,7 +351,7 @@
+ }
+ #endif /* !CONFIG_SLOB */
+
+-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
++void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
+ void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
+ void kmem_cache_free(struct kmem_cache *, void *);
+
+@@ -375,7 +375,7 @@
+ }
+
+ #ifdef CONFIG_NUMA
+-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
++void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
+ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
+ #else
+ static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
+@@ -497,7 +497,7 @@
+ * for general use, and so are not documented here. For a full list of
+ * potential flags, always refer to linux/gfp.h.
+ */
+-static __always_inline void *kmalloc(size_t size, gfp_t flags)
++static __always_inline __attribute__((alloc_size(1))) void *kmalloc(size_t size, gfp_t flags)
+ {
+ if (__builtin_constant_p(size)) {
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+@@ -537,7 +537,7 @@
+ return 0;
+ }
+
+-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
++static __always_inline __attribute__((alloc_size(1))) void *kmalloc_node(size_t size, gfp_t flags, int node)
+ {
+ #ifndef CONFIG_SLOB
+ if (__builtin_constant_p(size) &&
+diff -Naur linux-4.16/include/linux/slub_def.h linux-4.16-p/include/linux/slub_def.h
+--- linux-4.16/include/linux/slub_def.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/slub_def.h 2018-04-12 15:57:20.825694351 +0200
+@@ -120,6 +120,11 @@
+ unsigned long random;
+ #endif
+
++#ifdef CONFIG_SLAB_CANARY
++ unsigned long random_active;
++ unsigned long random_inactive;
++#endif
++
+ #ifdef CONFIG_NUMA
+ /*
+ * Defragmentation by allocating from a remote node.
+diff -Naur linux-4.16/include/linux/string.h linux-4.16-p/include/linux/string.h
+--- linux-4.16/include/linux/string.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/string.h 2018-04-12 15:57:20.825694351 +0200
+@@ -235,10 +235,16 @@
+ void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter");
+ void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
+
++#ifdef CONFIG_FORTIFY_SOURCE_STRICT_STRING
++#define __string_size(p) __builtin_object_size(p, 1)
++#else
++#define __string_size(p) __builtin_object_size(p, 0)
++#endif
++
+ #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
+ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
+ {
+- size_t p_size = __builtin_object_size(p, 0);
++ size_t p_size = __string_size(p);
+ if (__builtin_constant_p(size) && p_size < size)
+ __write_overflow();
+ if (p_size < size)
+@@ -248,7 +254,7 @@
+
+ __FORTIFY_INLINE char *strcat(char *p, const char *q)
+ {
+- size_t p_size = __builtin_object_size(p, 0);
++ size_t p_size = __string_size(p);
+ if (p_size == (size_t)-1)
+ return __builtin_strcat(p, q);
+ if (strlcat(p, q, p_size) >= p_size)
+@@ -259,7 +265,7 @@
+ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
+ {
+ __kernel_size_t ret;
+- size_t p_size = __builtin_object_size(p, 0);
++ size_t p_size = __string_size(p);
+
+ /* Work around gcc excess stack consumption issue */
+ if (p_size == (size_t)-1 ||
+@@ -274,7 +280,7 @@
+ extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+ __FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
+ {
+- size_t p_size = __builtin_object_size(p, 0);
++ size_t p_size = __string_size(p);
+ __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+ if (p_size <= ret && maxlen != ret)
+ fortify_panic(__func__);
+@@ -286,8 +292,8 @@
+ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
+ {
+ size_t ret;
+- size_t p_size = __builtin_object_size(p, 0);
+- size_t q_size = __builtin_object_size(q, 0);
++ size_t p_size = __string_size(p);
++ size_t q_size = __string_size(q);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __real_strlcpy(p, q, size);
+ ret = strlen(q);
+@@ -307,8 +313,8 @@
+ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
+ {
+ size_t p_len, copy_len;
+- size_t p_size = __builtin_object_size(p, 0);
+- size_t q_size = __builtin_object_size(q, 0);
++ size_t p_size = __string_size(p);
++ size_t q_size = __string_size(q);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __builtin_strncat(p, q, count);
+ p_len = strlen(p);
+@@ -421,8 +427,8 @@
+ /* defined after fortified strlen and memcpy to reuse them */
+ __FORTIFY_INLINE char *strcpy(char *p, const char *q)
+ {
+- size_t p_size = __builtin_object_size(p, 0);
+- size_t q_size = __builtin_object_size(q, 0);
++ size_t p_size = __string_size(p);
++ size_t q_size = __string_size(q);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __builtin_strcpy(p, q);
+ memcpy(p, q, strlen(q) + 1);
+diff -Naur linux-4.16/include/linux/tty.h linux-4.16-p/include/linux/tty.h
+--- linux-4.16/include/linux/tty.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/tty.h 2018-04-12 15:57:20.825694351 +0200
+@@ -13,6 +13,7 @@
+ #include <uapi/linux/tty.h>
+ #include <linux/rwsem.h>
+ #include <linux/llist.h>
++#include <linux/user_namespace.h>
+
+
+ /*
+@@ -335,6 +336,7 @@
+ /* If the tty has a pending do_SAK, queue it here - akpm */
+ struct work_struct SAK_work;
+ struct tty_port *port;
++ struct user_namespace *owner_user_ns;
+ } __randomize_layout;
+
+ /* Each of a tty's open files has private_data pointing to tty_file_private */
+@@ -344,6 +346,8 @@
+ struct list_head list;
+ };
+
++extern int tiocsti_restrict;
++
+ /* tty magic number */
+ #define TTY_MAGIC 0x5401
+
+diff -Naur linux-4.16/include/linux/vmalloc.h linux-4.16-p/include/linux/vmalloc.h
+--- linux-4.16/include/linux/vmalloc.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/vmalloc.h 2018-04-12 15:57:20.826694350 +0200
+@@ -68,19 +68,19 @@
+ }
+ #endif
+
+-extern void *vmalloc(unsigned long size);
+-extern void *vzalloc(unsigned long size);
+-extern void *vmalloc_user(unsigned long size);
+-extern void *vmalloc_node(unsigned long size, int node);
+-extern void *vzalloc_node(unsigned long size, int node);
+-extern void *vmalloc_exec(unsigned long size);
+-extern void *vmalloc_32(unsigned long size);
+-extern void *vmalloc_32_user(unsigned long size);
+-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
++extern void *vmalloc(unsigned long size) __attribute__((alloc_size(1)));
++extern void *vzalloc(unsigned long size) __attribute__((alloc_size(1)));
++extern void *vmalloc_user(unsigned long size) __attribute__((alloc_size(1)));
++extern void *vmalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
++extern void *vzalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
++extern void *vmalloc_exec(unsigned long size) __attribute__((alloc_size(1)));
++extern void *vmalloc_32(unsigned long size) __attribute__((alloc_size(1)));
++extern void *vmalloc_32_user(unsigned long size) __attribute__((alloc_size(1)));
++extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __attribute__((alloc_size(1)));
+ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
+ unsigned long start, unsigned long end, gfp_t gfp_mask,
+ pgprot_t prot, unsigned long vm_flags, int node,
+- const void *caller);
++ const void *caller) __attribute__((alloc_size(1)));
+ #ifndef CONFIG_MMU
+ extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
+ static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
+diff -Naur linux-4.16/init/Kconfig linux-4.16-p/init/Kconfig
+--- linux-4.16/init/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/init/Kconfig 2018-04-12 15:57:20.826694350 +0200
+@@ -296,6 +296,7 @@
+ config AUDIT
+ bool "Auditing support"
+ depends on NET
++ default y
+ help
+ Enable auditing infrastructure that can be used with another
+ kernel subsystem, such as SELinux (which requires this for
+@@ -1039,6 +1040,12 @@
+
+ endchoice
+
++config LOCAL_INIT
++ bool "Zero uninitialized locals"
++ help
++ Zero-fill uninitialized local variables, other than variable-length
++ arrays. Requires compiler support.
++
+ config SYSCTL
+ bool
+
+@@ -1296,8 +1303,7 @@
+ which may be appropriate on small systems without swap.
+
+ config AIO
+- bool "Enable AIO support" if EXPERT
+- default y
++ bool "Enable AIO support"
+ help
+ This option enables POSIX asynchronous I/O which may by used
+ by some high performance threaded applications. Disabling
+@@ -1502,7 +1508,7 @@
+
+ config SLUB_DEBUG
+ default y
+- bool "Enable SLUB debugging support" if EXPERT
++ bool "Enable SLUB debugging support"
+ depends on SLUB && SYSFS
+ help
+ SLUB has extensive debug support features. Disabling these can
+@@ -1526,7 +1532,6 @@
+
+ config COMPAT_BRK
+ bool "Disable heap randomization"
+- default y
+ help
+ Randomizing heap placement makes heap exploits harder, but it
+ also breaks ancient binaries (including anything libc5 based).
+@@ -1573,7 +1578,6 @@
+
+ config SLAB_MERGE_DEFAULT
+ bool "Allow slab caches to be merged"
+- default y
+ help
+ For reduced kernel memory fragmentation, slab caches can be
+ merged when they share the same size and other characteristics.
+@@ -1586,9 +1590,9 @@
+ command line.
+
+ config SLAB_FREELIST_RANDOM
+- default n
+ depends on SLAB || SLUB
+ bool "SLAB freelist randomization"
++ default y
+ help
+ Randomizes the freelist order used on creating new pages. This
+ security feature reduces the predictability of the kernel slab
+@@ -1597,12 +1601,56 @@
+ config SLAB_FREELIST_HARDENED
+ bool "Harden slab freelist metadata"
+ depends on SLUB
++ default y
+ help
+ Many kernel heap attacks try to target slab cache metadata and
+ other infrastructure. This options makes minor performance
+ sacrifies to harden the kernel slab allocator against common
+ freelist exploit methods.
+
++config SLAB_HARDENED
++ default y
++ depends on SLUB
++ bool "Hardened SLAB infrastructure"
++ help
++ Make minor performance sacrifices to harden the kernel slab
++ allocator.
++
++config SLAB_CANARY
++ depends on SLUB
++ depends on !SLAB_MERGE_DEFAULT
++ bool "SLAB canaries"
++ default y
++ help
++ Place canaries at the end of kernel slab allocations, sacrificing
++ some performance and memory usage for security.
++
++ Canaries can detect some forms of heap corruption when allocations
++ are freed and as part of the HARDENED_USERCOPY feature. It provides
++ basic use-after-free detection for HARDENED_USERCOPY.
++
++ Canaries absorb small overflows (rendering them harmless), mitigate
++ non-NUL terminated C string overflows on 64-bit via a guaranteed zero
++ byte and provide basic double-free detection.
++
++config SLAB_SANITIZE
++ bool "Sanitize SLAB allocations"
++ depends on SLUB
++ default y
++ help
++ Zero fill slab allocations on free, reducing the lifetime of
++ sensitive data and helping to mitigate use-after-free bugs.
++
++ For slabs with debug poisoning enabling, this has no impact.
++
++config SLAB_SANITIZE_VERIFY
++ depends on SLAB_SANITIZE && PAGE_SANITIZE
++ default y
++ bool "Verify sanitized SLAB allocations"
++ help
++ Verify that newly allocated slab allocations are zeroed to detect
++ write-after-free bugs.
++
+ config SLUB_CPU_PARTIAL
+ default y
+ depends on SLUB && SMP
+diff -Naur linux-4.16/kernel/audit.c linux-4.16-p/kernel/audit.c
+--- linux-4.16/kernel/audit.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/audit.c 2018-04-12 15:57:20.826694350 +0200
+@@ -1578,6 +1578,9 @@
+
+ if (audit_default == AUDIT_OFF)
+ audit_initialized = AUDIT_DISABLED;
++ else if (!audit_ever_enabled)
++ audit_initialized = AUDIT_UNINITIALIZED;
++
+ if (audit_set_enabled(audit_default))
+ panic("audit: error setting audit state (%d)\n", audit_default);
+
+diff -Naur linux-4.16/kernel/capability.c linux-4.16-p/kernel/capability.c
+--- linux-4.16/kernel/capability.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/capability.c 2018-04-12 15:57:20.826694350 +0200
+@@ -431,6 +431,12 @@
+ return ns_capable(&init_user_ns, cap);
+ }
+ EXPORT_SYMBOL(capable);
++
++bool capable_noaudit(int cap)
++{
++ return ns_capable_noaudit(&init_user_ns, cap);
++}
++EXPORT_SYMBOL(capable_noaudit);
+ #endif /* CONFIG_MULTIUSER */
+
+ /**
+diff -Naur linux-4.16/kernel/events/core.c linux-4.16-p/kernel/events/core.c
+--- linux-4.16/kernel/events/core.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/events/core.c 2018-04-12 15:57:20.828694350 +0200
+@@ -397,8 +397,13 @@
+ * 0 - disallow raw tracepoint access for unpriv
+ * 1 - disallow cpu events for unpriv
+ * 2 - disallow kernel profiling for unpriv
++ * 3 - disallow all unpriv perf event use
+ */
++#ifdef CONFIG_SECURITY_PERF_EVENTS_RESTRICT
++int sysctl_perf_event_paranoid __read_mostly = 3;
++#else
+ int sysctl_perf_event_paranoid __read_mostly = 2;
++#endif
+
+ /* Minimum for 512 kiB + 1 user control page */
+ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
+@@ -9915,6 +9920,9 @@
+ if (flags & ~PERF_FLAG_ALL)
+ return -EINVAL;
+
++ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
++ return -EACCES;
++
+ err = perf_copy_attr(attr_uptr, &attr);
+ if (err)
+ return err;
+diff -Naur linux-4.16/kernel/fork.c linux-4.16-p/kernel/fork.c
+--- linux-4.16/kernel/fork.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/fork.c 2018-04-12 15:57:20.828694350 +0200
+@@ -103,6 +103,11 @@
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/task.h>
++#ifdef CONFIG_USER_NS
++extern int unprivileged_userns_clone;
++#else
++#define unprivileged_userns_clone 0
++#endif
+
+ /*
+ * Minimum number of threads to boot the kernel
+@@ -1591,6 +1596,10 @@
+ if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
+ return ERR_PTR(-EINVAL);
+
++ if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone)
++ if (!capable(CAP_SYS_ADMIN))
++ return ERR_PTR(-EPERM);
++
+ /*
+ * Thread groups must share signals as well, and detached threads
+ * can only be started up within the thread group.
+@@ -2385,6 +2394,12 @@
+ if (unshare_flags & CLONE_NEWNS)
+ unshare_flags |= CLONE_FS;
+
++ if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) {
++ err = -EPERM;
++ if (!capable(CAP_SYS_ADMIN))
++ goto bad_unshare_out;
++ }
++
+ err = check_unshare_flags(unshare_flags);
+ if (err)
+ goto bad_unshare_out;
+diff -Naur linux-4.16/kernel/power/snapshot.c linux-4.16-p/kernel/power/snapshot.c
+--- linux-4.16/kernel/power/snapshot.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/power/snapshot.c 2018-04-12 15:57:20.828694350 +0200
+@@ -1138,7 +1138,7 @@
+
+ void clear_free_pages(void)
+ {
+-#ifdef CONFIG_PAGE_POISONING_ZERO
++#if defined(CONFIG_PAGE_POISONING_ZERO) || defined(CONFIG_PAGE_SANITIZE)
+ struct memory_bitmap *bm = free_pages_map;
+ unsigned long pfn;
+
+@@ -1155,7 +1155,7 @@
+ }
+ memory_bm_position_reset(bm);
+ pr_info("free pages cleared after restore\n");
+-#endif /* PAGE_POISONING_ZERO */
++#endif /* PAGE_POISONING_ZERO || PAGE_SANITIZE */
+ }
+
+ /**
+diff -Naur linux-4.16/kernel/rcu/tiny.c linux-4.16-p/kernel/rcu/tiny.c
+--- linux-4.16/kernel/rcu/tiny.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/rcu/tiny.c 2018-04-12 15:57:20.829694349 +0200
+@@ -164,7 +164,7 @@
+ }
+ }
+
+-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
++static __latent_entropy void rcu_process_callbacks(void)
+ {
+ __rcu_process_callbacks(&rcu_sched_ctrlblk);
+ __rcu_process_callbacks(&rcu_bh_ctrlblk);
+diff -Naur linux-4.16/kernel/rcu/tree.c linux-4.16-p/kernel/rcu/tree.c
+--- linux-4.16/kernel/rcu/tree.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/rcu/tree.c 2018-04-12 15:57:20.829694349 +0200
+@@ -2906,7 +2906,7 @@
+ /*
+ * Do RCU core processing for the current CPU.
+ */
+-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
++static __latent_entropy void rcu_process_callbacks(void)
+ {
+ struct rcu_state *rsp;
+
+diff -Naur linux-4.16/kernel/sched/fair.c linux-4.16-p/kernel/sched/fair.c
+--- linux-4.16/kernel/sched/fair.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/sched/fair.c 2018-04-12 15:57:20.830694349 +0200
+@@ -9387,7 +9387,7 @@
+ * run_rebalance_domains is triggered when needed from the scheduler tick.
+ * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
+ */
+-static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
++static __latent_entropy void run_rebalance_domains(void)
+ {
+ struct rq *this_rq = this_rq();
+ enum cpu_idle_type idle = this_rq->idle_balance ?
+diff -Naur linux-4.16/kernel/softirq.c linux-4.16-p/kernel/softirq.c
+--- linux-4.16/kernel/softirq.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/softirq.c 2018-04-12 15:57:20.830694349 +0200
+@@ -53,7 +53,7 @@
+ EXPORT_SYMBOL(irq_stat);
+ #endif
+
+-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
++static struct softirq_action softirq_vec[NR_SOFTIRQS] __ro_after_init __aligned(PAGE_SIZE);
+
+ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+
+@@ -282,7 +282,7 @@
+ kstat_incr_softirqs_this_cpu(vec_nr);
+
+ trace_softirq_entry(vec_nr);
+- h->action(h);
++ h->action();
+ trace_softirq_exit(vec_nr);
+ if (unlikely(prev_count != preempt_count())) {
+ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
+@@ -444,7 +444,7 @@
+ or_softirq_pending(1UL << nr);
+ }
+
+-void open_softirq(int nr, void (*action)(struct softirq_action *))
++void __init open_softirq(int nr, void (*action)(void))
+ {
+ softirq_vec[nr].action = action;
+ }
+@@ -486,7 +486,7 @@
+ }
+ EXPORT_SYMBOL(__tasklet_hi_schedule);
+
+-static __latent_entropy void tasklet_action(struct softirq_action *a)
++static __latent_entropy void tasklet_action(void)
+ {
+ struct tasklet_struct *list;
+
+@@ -522,7 +522,7 @@
+ }
+ }
+
+-static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
++static __latent_entropy void tasklet_hi_action(void)
+ {
+ struct tasklet_struct *list;
+
+diff -Naur linux-4.16/kernel/sysctl.c linux-4.16-p/kernel/sysctl.c
+--- linux-4.16/kernel/sysctl.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/sysctl.c 2018-04-12 15:57:20.831694349 +0200
+@@ -67,6 +67,7 @@
+ #include <linux/bpf.h>
+ #include <linux/mount.h>
+ #include <linux/pipe_fs_i.h>
++#include <linux/tty.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/processor.h>
+@@ -99,12 +100,19 @@
+ #if defined(CONFIG_SYSCTL)
+
+ /* External variables not in a header file. */
++#if IS_ENABLED(CONFIG_USB)
++int deny_new_usb __read_mostly = 0;
++EXPORT_SYMBOL(deny_new_usb);
++#endif
+ extern int suid_dumpable;
+ #ifdef CONFIG_COREDUMP
+ extern int core_uses_pid;
+ extern char core_pattern[];
+ extern unsigned int core_pipe_limit;
+ #endif
++#ifdef CONFIG_USER_NS
++extern int unprivileged_userns_clone;
++#endif
+ extern int pid_max;
+ extern int pid_max_min, pid_max_max;
+ extern int percpu_pagelist_fraction;
+@@ -116,40 +124,43 @@
+
+ /* Constants used for minimum and maximum */
+ #ifdef CONFIG_LOCKUP_DETECTOR
+-static int sixty = 60;
++static int sixty __read_only = 60;
+ #endif
+
+-static int __maybe_unused neg_one = -1;
++static int __maybe_unused neg_one __read_only = -1;
+
+ static int zero;
+-static int __maybe_unused one = 1;
+-static int __maybe_unused two = 2;
+-static int __maybe_unused four = 4;
+-static unsigned long one_ul = 1;
+-static int one_hundred = 100;
+-static int one_thousand = 1000;
++static int __maybe_unused one __read_only = 1;
++static int __maybe_unused two __read_only = 2;
++static int __maybe_unused four __read_only = 4;
++static unsigned long one_ul __read_only = 1;
++static int one_hundred __read_only = 100;
++static int one_thousand __read_only = 1000;
+ #ifdef CONFIG_PRINTK
+-static int ten_thousand = 10000;
++static int ten_thousand __read_only = 10000;
+ #endif
+ #ifdef CONFIG_PERF_EVENTS
+-static int six_hundred_forty_kb = 640 * 1024;
++static int six_hundred_forty_kb __read_only = 640 * 1024;
+ #endif
+
+ /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
+-static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
++static unsigned long dirty_bytes_min __read_only = 2 * PAGE_SIZE;
+
+ /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
+-static int maxolduid = 65535;
+-static int minolduid;
++static int maxolduid __read_only = 65535;
++static int minolduid __read_only;
+
+-static int ngroups_max = NGROUPS_MAX;
++static int ngroups_max __read_only = NGROUPS_MAX;
+ static const int cap_last_cap = CAP_LAST_CAP;
+
+ /*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */
+ #ifdef CONFIG_DETECT_HUNG_TASK
+-static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
++static unsigned long hung_task_timeout_max __read_only = (LONG_MAX/HZ);
+ #endif
+
++int device_sidechannel_restrict __read_mostly = 1;
++EXPORT_SYMBOL(device_sidechannel_restrict);
++
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
+@@ -289,19 +300,19 @@
+ };
+
+ #ifdef CONFIG_SCHED_DEBUG
+-static int min_sched_granularity_ns = 100000; /* 100 usecs */
+-static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
+-static int min_wakeup_granularity_ns; /* 0 usecs */
+-static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
++static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */
++static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
++static int min_wakeup_granularity_ns __read_only; /* 0 usecs */
++static int max_wakeup_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
+ #ifdef CONFIG_SMP
+-static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
+-static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
++static int min_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_NONE;
++static int max_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_END-1;
+ #endif /* CONFIG_SMP */
+ #endif /* CONFIG_SCHED_DEBUG */
+
+ #ifdef CONFIG_COMPACTION
+-static int min_extfrag_threshold;
+-static int max_extfrag_threshold = 1000;
++static int min_extfrag_threshold __read_only;
++static int max_extfrag_threshold __read_only = 1000;
+ #endif
+
+ static struct ctl_table kern_table[] = {
+@@ -515,6 +526,15 @@
+ .proc_handler = proc_dointvec,
+ },
+ #endif
++#ifdef CONFIG_USER_NS
++ {
++ .procname = "unprivileged_userns_clone",
++ .data = &unprivileged_userns_clone,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++#endif
+ #ifdef CONFIG_PROC_SYSCTL
+ {
+ .procname = "tainted",
+@@ -857,6 +877,37 @@
+ .extra2 = &two,
+ },
+ #endif
++#if defined CONFIG_TTY
++ {
++ .procname = "tiocsti_restrict",
++ .data = &tiocsti_restrict,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax_sysadmin,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++#endif
++ {
++ .procname = "device_sidechannel_restrict",
++ .data = &device_sidechannel_restrict,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax_sysadmin,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++#if IS_ENABLED(CONFIG_USB)
++ {
++ .procname = "deny_new_usb",
++ .data = &deny_new_usb,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax_sysadmin,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++#endif
+ {
+ .procname = "ngroups_max",
+ .data = &ngroups_max,
+diff -Naur linux-4.16/kernel/time/hrtimer.c linux-4.16-p/kernel/time/hrtimer.c
+--- linux-4.16/kernel/time/hrtimer.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/time/hrtimer.c 2018-04-12 15:57:40.443687638 +0200
+@@ -1413,7 +1413,7 @@
+ }
+ }
+
+-static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
++static __latent_entropy void hrtimer_run_softirq(void)
+ {
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ unsigned long flags;
+diff -Naur linux-4.16/kernel/time/timer.c linux-4.16-p/kernel/time/timer.c
+--- linux-4.16/kernel/time/timer.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/time/timer.c 2018-04-12 15:57:20.831694349 +0200
+@@ -1672,7 +1672,7 @@
+ /*
+ * This function runs timers and the timer-tq in bottom half context.
+ */
+-static __latent_entropy void run_timer_softirq(struct softirq_action *h)
++static __latent_entropy void run_timer_softirq(void)
+ {
+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
+diff -Naur linux-4.16/kernel/user_namespace.c linux-4.16-p/kernel/user_namespace.c
+--- linux-4.16/kernel/user_namespace.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/user_namespace.c 2018-04-12 15:57:20.831694349 +0200
+@@ -26,6 +26,9 @@
+ #include <linux/bsearch.h>
+ #include <linux/sort.h>
+
++/* sysctl */
++int unprivileged_userns_clone;
++
+ static struct kmem_cache *user_ns_cachep __read_mostly;
+ static DEFINE_MUTEX(userns_state_mutex);
+
+diff -Naur linux-4.16/lib/irq_poll.c linux-4.16-p/lib/irq_poll.c
+--- linux-4.16/lib/irq_poll.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/lib/irq_poll.c 2018-04-12 15:57:20.831694349 +0200
+@@ -75,7 +75,7 @@
+ }
+ EXPORT_SYMBOL(irq_poll_complete);
+
+-static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
++static void __latent_entropy irq_poll_softirq(void)
+ {
+ struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
+ int rearm = 0, budget = irq_poll_budget;
+diff -Naur linux-4.16/lib/Kconfig.debug linux-4.16-p/lib/Kconfig.debug
+--- linux-4.16/lib/Kconfig.debug 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/lib/Kconfig.debug 2018-04-12 15:57:20.832694348 +0200
+@@ -945,6 +945,7 @@
+
+ config PANIC_ON_OOPS
+ bool "Panic on Oops"
++ default y
+ help
+ Say Y here to enable the kernel to panic when it oopses. This
+ has the same effect as setting oops=panic on the kernel command
+@@ -954,7 +955,7 @@
+ anything erroneous after an oops which could result in data
+ corruption or other issues.
+
+- Say N if unsure.
++ Say Y if unsure.
+
+ config PANIC_ON_OOPS_VALUE
+ int
+@@ -1309,6 +1310,7 @@
+ config DEBUG_LIST
+ bool "Debug linked list manipulation"
+ depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION
++ default y
+ help
+ Enable this to turn on extended checks in the linked-list
+ walking routines.
+@@ -1949,6 +1951,7 @@
+ config BUG_ON_DATA_CORRUPTION
+ bool "Trigger a BUG when data corruption is detected"
+ select DEBUG_LIST
++ default y
+ help
+ Select this option if the kernel should BUG when it encounters
+ data corruption in kernel memory structures when they get checked
+@@ -1988,6 +1991,7 @@
+ config IO_STRICT_DEVMEM
+ bool "Filter I/O access to /dev/mem"
+ depends on STRICT_DEVMEM
++ default y
+ ---help---
+ If this option is disabled, you allow userspace (root) access to all
+ io-memory regardless of whether a driver is actively using that
+diff -Naur linux-4.16/lib/kobject.c linux-4.16-p/lib/kobject.c
+--- linux-4.16/lib/kobject.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/lib/kobject.c 2018-04-12 15:57:20.832694348 +0200
+@@ -956,9 +956,9 @@
+
+
+ static DEFINE_SPINLOCK(kobj_ns_type_lock);
+-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
++static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __ro_after_init;
+
+-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
++int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
+ {
+ enum kobj_ns_type type = ops->type;
+ int error;
+diff -Naur linux-4.16/lib/nlattr.c linux-4.16-p/lib/nlattr.c
+--- linux-4.16/lib/nlattr.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/lib/nlattr.c 2018-04-12 15:57:20.832694348 +0200
+@@ -364,6 +364,8 @@
+ {
+ int minlen = min_t(int, count, nla_len(src));
+
++ BUG_ON(minlen < 0);
++
+ memcpy(dest, nla_data(src), minlen);
+ if (count > minlen)
+ memset(dest + minlen, 0, count - minlen);
+diff -Naur linux-4.16/lib/vsprintf.c linux-4.16-p/lib/vsprintf.c
+--- linux-4.16/lib/vsprintf.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/lib/vsprintf.c 2018-04-12 15:57:20.832694348 +0200
+@@ -1344,7 +1344,7 @@
+ return string(buf, end, uuid, spec);
+ }
+
+-int kptr_restrict __read_mostly;
++int kptr_restrict __read_mostly = 2;
+
+ static noinline_for_stack
+ char *restricted_pointer(char *buf, char *end, const void *ptr,
+diff -Naur linux-4.16/Makefile linux-4.16-p/Makefile
+--- linux-4.16/Makefile 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/Makefile 2018-04-12 15:57:20.833694348 +0200
+@@ -734,6 +734,9 @@
+ endif
+
+ ifeq ($(cc-name),clang)
++ifdef CONFIG_LOCAL_INIT
++KBUILD_CFLAGS += -fsanitize=local-init
++endif
+ KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
+ KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
+ KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+diff -Naur linux-4.16/mm/Kconfig linux-4.16-p/mm/Kconfig
+--- linux-4.16/mm/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/Kconfig 2018-04-12 15:57:20.833694348 +0200
+@@ -319,7 +319,8 @@
+ config DEFAULT_MMAP_MIN_ADDR
+ int "Low address space to protect from user allocation"
+ depends on MMU
+- default 4096
++ default 32768 if ARM || (ARM64 && COMPAT)
++ default 65536
+ help
+ This is the portion of low virtual memory which should be protected
+ from userspace allocation. Keeping a user from writing to low pages
+diff -Naur linux-4.16/mm/mmap.c linux-4.16-p/mm/mmap.c
+--- linux-4.16/mm/mmap.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/mmap.c 2018-04-12 15:57:20.833694348 +0200
+@@ -220,6 +220,13 @@
+
+ newbrk = PAGE_ALIGN(brk);
+ oldbrk = PAGE_ALIGN(mm->brk);
++ /* properly handle unaligned min_brk as an empty heap */
++ if (min_brk & ~PAGE_MASK) {
++ if (brk == min_brk)
++ newbrk -= PAGE_SIZE;
++ if (mm->brk == min_brk)
++ oldbrk -= PAGE_SIZE;
++ }
+ if (oldbrk == newbrk)
+ goto set_brk;
+
+diff -Naur linux-4.16/mm/page_alloc.c linux-4.16-p/mm/page_alloc.c
+--- linux-4.16/mm/page_alloc.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/page_alloc.c 2018-04-12 15:57:20.834694348 +0200
+@@ -68,6 +68,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/lockdep.h>
+ #include <linux/nmi.h>
++#include <linux/random.h>
+
+ #include <asm/sections.h>
+ #include <asm/tlbflush.h>
+@@ -101,6 +102,15 @@
+ DEFINE_MUTEX(pcpu_drain_mutex);
+ DEFINE_PER_CPU(struct work_struct, pcpu_drain);
+
++bool __meminitdata extra_latent_entropy;
++
++static int __init setup_extra_latent_entropy(char *str)
++{
++ extra_latent_entropy = true;
++ return 0;
++}
++early_param("extra_latent_entropy", setup_extra_latent_entropy);
++
+ #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
+ volatile unsigned long latent_entropy __latent_entropy;
+ EXPORT_SYMBOL(latent_entropy);
+@@ -1069,6 +1079,13 @@
+ debug_check_no_obj_freed(page_address(page),
+ PAGE_SIZE << order);
+ }
++
++ if (IS_ENABLED(CONFIG_PAGE_SANITIZE)) {
++ int i;
++ for (i = 0; i < (1 << order); i++)
++ clear_highpage(page + i);
++ }
++
+ arch_free_page(page, order);
+ kernel_poison_pages(page, 1 << order, 0);
+ kernel_map_pages(page, 1 << order, 0);
+@@ -1286,6 +1303,21 @@
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
+
++ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
++ unsigned long hash = 0;
++ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
++ const unsigned long *data = lowmem_page_address(page);
++
++ for (index = 0; index < end; index++)
++ hash ^= hash + data[index];
++#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
++ latent_entropy ^= hash;
++ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
++#else
++ add_device_randomness((const void *)&hash, sizeof(hash));
++#endif
++ }
++
+ page_zone(page)->managed_pages += nr_pages;
+ set_page_refcounted(page);
+ __free_pages(page, order);
+@@ -1754,8 +1786,8 @@
+
+ static inline bool free_pages_prezeroed(void)
+ {
+- return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
+- page_poisoning_enabled();
++ return IS_ENABLED(CONFIG_PAGE_SANITIZE) ||
++ (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && page_poisoning_enabled());
+ }
+
+ #ifdef CONFIG_DEBUG_VM
+@@ -1812,6 +1844,11 @@
+
+ post_alloc_hook(page, order, gfp_flags);
+
++ if (IS_ENABLED(CONFIG_PAGE_SANITIZE_VERIFY)) {
++ for (i = 0; i < (1 << order); i++)
++ verify_zero_highpage(page + i);
++ }
++
+ if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
+ for (i = 0; i < (1 << order); i++)
+ clear_highpage(page + i);
+diff -Naur linux-4.16/mm/slab_common.c linux-4.16-p/mm/slab_common.c
+--- linux-4.16/mm/slab_common.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/slab_common.c 2018-04-12 15:57:20.834694348 +0200
+@@ -26,10 +26,10 @@
+
+ #include "slab.h"
+
+-enum slab_state slab_state;
++enum slab_state slab_state __ro_after_init;
+ LIST_HEAD(slab_caches);
+ DEFINE_MUTEX(slab_mutex);
+-struct kmem_cache *kmem_cache;
++struct kmem_cache *kmem_cache __ro_after_init;
+
+ #ifdef CONFIG_HARDENED_USERCOPY
+ bool usercopy_fallback __ro_after_init =
+@@ -57,7 +57,7 @@
+ /*
+ * Merge control. If this is set then no merging of slab caches will occur.
+ */
+-static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
++static bool slab_nomerge __ro_after_init = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
+
+ static int __init setup_slab_nomerge(char *str)
+ {
+@@ -968,7 +968,7 @@
+ * of two cache sizes there. The size of larger slabs can be determined using
+ * fls.
+ */
+-static s8 size_index[24] = {
++static s8 size_index[24] __ro_after_init = {
+ 3, /* 8 */
+ 4, /* 16 */
+ 5, /* 24 */
+diff -Naur linux-4.16/mm/slab.h linux-4.16-p/mm/slab.h
+--- linux-4.16/mm/slab.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/slab.h 2018-04-12 15:57:20.835694347 +0200
+@@ -312,7 +312,11 @@
+ static inline bool slab_equal_or_root(struct kmem_cache *s,
+ struct kmem_cache *p)
+ {
++#ifdef CONFIG_SLAB_HARDENED
++ return p == s;
++#else
+ return true;
++#endif
+ }
+
+ static inline const char *cache_name(struct kmem_cache *s)
+@@ -364,18 +368,26 @@
+ * to not do even the assignment. In that case, slab_equal_or_root
+ * will also be a constant.
+ */
+- if (!memcg_kmem_enabled() &&
++ if (!IS_ENABLED(CONFIG_SLAB_HARDENED) &&
++ !memcg_kmem_enabled() &&
+ !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
+ return s;
+
+ page = virt_to_head_page(x);
++#ifdef CONFIG_SLAB_HARDENED
++ BUG_ON(!PageSlab(page));
++#endif
+ cachep = page->slab_cache;
+ if (slab_equal_or_root(cachep, s))
+ return cachep;
+
+ pr_err("%s: Wrong slab cache. %s but object is from %s\n",
+ __func__, s->name, cachep->name);
++#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
++ BUG_ON(1);
++#else
+ WARN_ON_ONCE(1);
++#endif
+ return s;
+ }
+
+@@ -400,7 +412,7 @@
+ * back there or track user information then we can
+ * only use the space before that information.
+ */
+- if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
++ if ((s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) || IS_ENABLED(CONFIG_SLAB_CANARY))
+ return s->inuse;
+ /*
+ * Else we can use all the padding etc for the allocation
+diff -Naur linux-4.16/mm/slub.c linux-4.16-p/mm/slub.c
+--- linux-4.16/mm/slub.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/slub.c 2018-04-12 15:57:20.835694347 +0200
+@@ -125,6 +125,16 @@
+ #endif
+ }
+
++static inline bool has_sanitize(struct kmem_cache *s)
++{
++ return IS_ENABLED(CONFIG_SLAB_SANITIZE) && !(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON));
++}
++
++static inline bool has_sanitize_verify(struct kmem_cache *s)
++{
++ return IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && has_sanitize(s);
++}
++
+ void *fixup_red_left(struct kmem_cache *s, void *p)
+ {
+ if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
+@@ -299,6 +309,35 @@
+ *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
+ }
+
++#ifdef CONFIG_SLAB_CANARY
++static inline unsigned long *get_canary(struct kmem_cache *s, void *object)
++{
++ if (s->offset)
++ return object + s->offset + sizeof(void *);
++ return object + s->inuse;
++}
++
++static inline unsigned long get_canary_value(const void *canary, unsigned long value)
++{
++ return (value ^ (unsigned long)canary) & CANARY_MASK;
++}
++
++static inline void set_canary(struct kmem_cache *s, void *object, unsigned long value)
++{
++ unsigned long *canary = get_canary(s, object);
++ *canary = get_canary_value(canary, value);
++}
++
++static inline void check_canary(struct kmem_cache *s, void *object, unsigned long value)
++{
++ unsigned long *canary = get_canary(s, object);
++ BUG_ON(*canary != get_canary_value(canary, value));
++}
++#else
++#define set_canary(s, object, value)
++#define check_canary(s, object, value)
++#endif
++
+ /* Loop over all objects in a slab */
+ #define for_each_object(__p, __s, __addr, __objects) \
+ for (__p = fixup_red_left(__s, __addr); \
+@@ -486,13 +525,13 @@
+ * Debug settings:
+ */
+ #if defined(CONFIG_SLUB_DEBUG_ON)
+-static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
++static slab_flags_t slub_debug __ro_after_init = DEBUG_DEFAULT_FLAGS;
+ #else
+-static slab_flags_t slub_debug;
++static slab_flags_t slub_debug __ro_after_init;
+ #endif
+
+-static char *slub_debug_slabs;
+-static int disable_higher_order_debug;
++static char *slub_debug_slabs __ro_after_init;
++static int disable_higher_order_debug __ro_after_init;
+
+ /*
+ * slub is about to manipulate internal object metadata. This memory lies
+@@ -552,6 +591,9 @@
+ else
+ p = object + s->inuse;
+
++ if (IS_ENABLED(CONFIG_SLAB_CANARY))
++ p = (void *)p + sizeof(void *);
++
+ return p + alloc;
+ }
+
+@@ -690,6 +732,9 @@
+ else
+ off = s->inuse;
+
++ if (IS_ENABLED(CONFIG_SLAB_CANARY))
++ off += sizeof(void *);
++
+ if (s->flags & SLAB_STORE_USER)
+ off += 2 * sizeof(struct track);
+
+@@ -819,6 +864,9 @@
+ /* Freepointer is placed after the object. */
+ off += sizeof(void *);
+
++ if (IS_ENABLED(CONFIG_SLAB_CANARY))
++ off += sizeof(void *);
++
+ if (s->flags & SLAB_STORE_USER)
+ /* We also have user information there */
+ off += 2 * sizeof(struct track);
+@@ -1420,8 +1468,9 @@
+ void *object)
+ {
+ setup_object_debug(s, page, object);
++ set_canary(s, object, s->random_inactive);
+ kasan_init_slab_obj(s, object);
+- if (unlikely(s->ctor)) {
++ if (unlikely(s->ctor) && !has_sanitize_verify(s)) {
+ kasan_unpoison_object_data(s, object);
+ s->ctor(object);
+ kasan_poison_object_data(s, object);
+@@ -2719,9 +2768,21 @@
+ stat(s, ALLOC_FASTPATH);
+ }
+
+- if (unlikely(gfpflags & __GFP_ZERO) && object)
++ if (has_sanitize_verify(s) && object) {
++ size_t offset = s->offset ? 0 : sizeof(void *);
++ BUG_ON(memchr_inv(object + offset, 0, s->object_size - offset));
++ if (s->ctor)
++ s->ctor(object);
++ if (unlikely(gfpflags & __GFP_ZERO) && offset)
++ memset(object, 0, sizeof(void *));
++ } else if (unlikely(gfpflags & __GFP_ZERO) && object)
+ memset(object, 0, s->object_size);
+
++ if (object) {
++ check_canary(s, object, s->random_inactive);
++ set_canary(s, object, s->random_active);
++ }
++
+ slab_post_alloc_hook(s, gfpflags, 1, &object);
+
+ return object;
+@@ -2928,6 +2989,27 @@
+ void *tail_obj = tail ? : head;
+ struct kmem_cache_cpu *c;
+ unsigned long tid;
++ bool sanitize = has_sanitize(s);
++
++ if (IS_ENABLED(CONFIG_SLAB_CANARY) || sanitize) {
++ __maybe_unused int offset = s->offset ? 0 : sizeof(void *);
++ void *x = head;
++
++ while (1) {
++ check_canary(s, x, s->random_active);
++ set_canary(s, x, s->random_inactive);
++
++ if (sanitize) {
++ memset(x + offset, 0, s->object_size - offset);
++ if (!IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && s->ctor)
++ s->ctor(x);
++ }
++ if (x == tail_obj)
++ break;
++ x = get_freepointer(s, x);
++ }
++ }
++
+ redo:
+ /*
+ * Determine the currently cpus per cpu slab.
+@@ -3106,7 +3188,7 @@
+ void **p)
+ {
+ struct kmem_cache_cpu *c;
+- int i;
++ int i, k;
+
+ /* memcg and kmem_cache debug support */
+ s = slab_pre_alloc_hook(s, flags);
+@@ -3143,13 +3225,29 @@
+ local_irq_enable();
+
+ /* Clear memory outside IRQ disabled fastpath loop */
+- if (unlikely(flags & __GFP_ZERO)) {
++ if (has_sanitize_verify(s)) {
++ int j;
++
++ for (j = 0; j < i; j++) {
++ size_t offset = s->offset ? 0 : sizeof(void *);
++ BUG_ON(memchr_inv(p[j] + offset, 0, s->object_size - offset));
++ if (s->ctor)
++ s->ctor(p[j]);
++ if (unlikely(flags & __GFP_ZERO) && offset)
++ memset(p[j], 0, sizeof(void *));
++ }
++ } else if (unlikely(flags & __GFP_ZERO)) {
+ int j;
+
+ for (j = 0; j < i; j++)
+ memset(p[j], 0, s->object_size);
+ }
+
++ for (k = 0; k < i; k++) {
++ check_canary(s, p[k], s->random_inactive);
++ set_canary(s, p[k], s->random_active);
++ }
++
+ /* memcg and kmem_cache debug support */
+ slab_post_alloc_hook(s, flags, size, p);
+ return i;
+@@ -3181,9 +3279,9 @@
+ * and increases the number of allocations possible without having to
+ * take the list_lock.
+ */
+-static int slub_min_order;
+-static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
+-static int slub_min_objects;
++static int slub_min_order __ro_after_init;
++static int slub_max_order __ro_after_init = PAGE_ALLOC_COSTLY_ORDER;
++static int slub_min_objects __ro_after_init;
+
+ /*
+ * Calculate the order of allocation given an slab object size.
+@@ -3353,6 +3451,7 @@
+ init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
+ init_tracking(kmem_cache_node, n);
+ #endif
++ set_canary(kmem_cache_node, n, kmem_cache_node->random_active);
+ kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
+ GFP_KERNEL);
+ init_kmem_cache_node(n);
+@@ -3509,6 +3608,9 @@
+ size += sizeof(void *);
+ }
+
++ if (IS_ENABLED(CONFIG_SLAB_CANARY))
++ size += sizeof(void *);
++
+ #ifdef CONFIG_SLUB_DEBUG
+ if (flags & SLAB_STORE_USER)
+ /*
+@@ -3579,6 +3681,10 @@
+ #ifdef CONFIG_SLAB_FREELIST_HARDENED
+ s->random = get_random_long();
+ #endif
++#ifdef CONFIG_SLAB_CANARY
++ s->random_active = get_random_long();
++ s->random_inactive = get_random_long();
++#endif
+
+ if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
+ s->reserved = sizeof(struct rcu_head);
+@@ -3846,6 +3952,8 @@
+ offset -= s->red_left_pad;
+ }
+
++ check_canary(s, (void *)ptr - offset, s->random_active);
++
+ /* Allow address range falling entirely within usercopy region. */
+ if (offset >= s->useroffset &&
+ offset - s->useroffset <= s->usersize &&
+@@ -3879,7 +3987,11 @@
+ page = virt_to_head_page(object);
+
+ if (unlikely(!PageSlab(page))) {
++#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
++ BUG_ON(!PageCompound(page));
++#else
+ WARN_ON(!PageCompound(page));
++#endif
+ return PAGE_SIZE << compound_order(page);
+ }
+
+@@ -4744,7 +4856,7 @@
+ #define SO_TOTAL (1 << SL_TOTAL)
+
+ #ifdef CONFIG_MEMCG
+-static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
++static bool memcg_sysfs_enabled __ro_after_init = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
+
+ static int __init setup_slub_memcg_sysfs(char *str)
+ {
+diff -Naur linux-4.16/mm/swap.c linux-4.16-p/mm/swap.c
+--- linux-4.16/mm/swap.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/swap.c 2018-04-12 15:57:20.836694347 +0200
+@@ -92,6 +92,13 @@
+ if (!PageHuge(page))
+ __page_cache_release(page);
+ dtor = get_compound_page_dtor(page);
++ if (!PageHuge(page))
++ BUG_ON(dtor != free_compound_page
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++ && dtor != free_transhuge_page
++#endif
++ );
++
+ (*dtor)(page);
+ }
+
+diff -Naur linux-4.16/net/core/dev.c linux-4.16-p/net/core/dev.c
+--- linux-4.16/net/core/dev.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/net/core/dev.c 2018-04-12 15:57:20.837694346 +0200
+@@ -4196,7 +4196,7 @@
+ }
+ EXPORT_SYMBOL(netif_rx_ni);
+
+-static __latent_entropy void net_tx_action(struct softirq_action *h)
++static __latent_entropy void net_tx_action(void)
+ {
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
+@@ -5745,7 +5745,7 @@
+ return work;
+ }
+
+-static __latent_entropy void net_rx_action(struct softirq_action *h)
++static __latent_entropy void net_rx_action(void)
+ {
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+ unsigned long time_limit = jiffies +
+diff -Naur linux-4.16/net/ipv4/Kconfig linux-4.16-p/net/ipv4/Kconfig
+--- linux-4.16/net/ipv4/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/net/ipv4/Kconfig 2018-04-12 15:57:20.837694346 +0200
+@@ -261,6 +261,7 @@
+
+ config SYN_COOKIES
+ bool "IP: TCP syncookie support"
++ default y
+ ---help---
+ Normal TCP/IP networking is open to an attack known as "SYN
+ flooding". This denial-of-service attack prevents legitimate remote
+diff -Naur linux-4.16/scripts/mod/modpost.c linux-4.16-p/scripts/mod/modpost.c
+--- linux-4.16/scripts/mod/modpost.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/scripts/mod/modpost.c 2018-04-12 15:57:20.837694346 +0200
+@@ -37,6 +37,7 @@
+ static int warn_unresolved = 0;
+ /* How a symbol is exported */
+ static int sec_mismatch_count = 0;
++static int writable_fptr_count = 0;
+ static int sec_mismatch_verbose = 1;
+ static int sec_mismatch_fatal = 0;
+ /* ignore missing files */
+@@ -965,6 +966,7 @@
+ ANY_EXIT_TO_ANY_INIT,
+ EXPORT_TO_INIT_EXIT,
+ EXTABLE_TO_NON_TEXT,
++ DATA_TO_TEXT
+ };
+
+ /**
+@@ -1091,6 +1093,12 @@
+ .good_tosec = {ALL_TEXT_SECTIONS , NULL},
+ .mismatch = EXTABLE_TO_NON_TEXT,
+ .handler = extable_mismatch_handler,
++},
++/* Do not reference code from writable data */
++{
++ .fromsec = { DATA_SECTIONS, NULL },
++ .bad_tosec = { ALL_TEXT_SECTIONS, NULL },
++ .mismatch = DATA_TO_TEXT
+ }
+ };
+
+@@ -1240,10 +1248,10 @@
+ continue;
+ if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
+ continue;
+- if (sym->st_value == addr)
+- return sym;
+ /* Find a symbol nearby - addr are maybe negative */
+ d = sym->st_value - addr;
++ if (d == 0)
++ return sym;
+ if (d < 0)
+ d = addr - sym->st_value;
+ if (d < distance) {
+@@ -1402,7 +1410,11 @@
+ char *prl_from;
+ char *prl_to;
+
+- sec_mismatch_count++;
++ if (mismatch->mismatch == DATA_TO_TEXT)
++ writable_fptr_count++;
++ else
++ sec_mismatch_count++;
++
+ if (!sec_mismatch_verbose)
+ return;
+
+@@ -1526,6 +1538,14 @@
+ fatal("There's a special handler for this mismatch type, "
+ "we should never get here.");
+ break;
++ case DATA_TO_TEXT:
++#if 0
++ fprintf(stderr,
++ "The %s %s:%s references\n"
++ "the %s %s:%s%s\n",
++ from, fromsec, fromsym, to, tosec, tosym, to_p);
++#endif
++ break;
+ }
+ fprintf(stderr, "\n");
+ }
+@@ -2539,6 +2559,14 @@
+ }
+ }
+ free(buf.p);
++ if (writable_fptr_count) {
++ if (!sec_mismatch_verbose) {
++ warn("modpost: Found %d writable function pointer(s).\n"
++ "To see full details build your kernel with:\n"
++ "'make CONFIG_DEBUG_SECTION_MISMATCH=y'\n",
++ writable_fptr_count);
++ }
++ }
+
+ return err;
+ }
+diff -Naur linux-4.16/security/Kconfig linux-4.16-p/security/Kconfig
+--- linux-4.16/security/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/security/Kconfig 2018-04-12 15:57:20.837694346 +0200
+@@ -8,7 +8,7 @@
+
+ config SECURITY_DMESG_RESTRICT
+ bool "Restrict unprivileged access to the kernel syslog"
+- default n
++ default y
+ help
+ This enforces restrictions on unprivileged users reading the kernel
+ syslog via dmesg(8).
+@@ -18,10 +18,34 @@
+
+ If you are unsure how to answer this question, answer N.
+
++config SECURITY_PERF_EVENTS_RESTRICT
++ bool "Restrict unprivileged use of performance events"
++ depends on PERF_EVENTS
++ default y
++ help
++ If you say Y here, the kernel.perf_event_paranoid sysctl
++ will be set to 3 by default, and no unprivileged use of the
++ perf_event_open syscall will be permitted unless it is
++ changed.
++
++config SECURITY_TIOCSTI_RESTRICT
++ bool "Restrict unprivileged use of tiocsti command injection"
++ default y
++ help
++ This enforces restrictions on unprivileged users injecting commands
++ into other processes which share a tty session using the TIOCSTI
++ ioctl. This option makes TIOCSTI use require CAP_SYS_ADMIN.
++
++ If this option is not selected, no restrictions will be enforced
++ unless the tiocsti_restrict sysctl is explicitly set to (1).
++
++ If you are unsure how to answer this question, answer N.
++
+ config SECURITY
+ bool "Enable different security models"
+ depends on SYSFS
+ depends on MULTIUSER
++ default y
+ help
+ This allows you to choose different security modules to be
+ configured into your kernel.
+@@ -48,6 +72,7 @@
+ config SECURITY_NETWORK
+ bool "Socket and Networking Security Hooks"
+ depends on SECURITY
++ default y
+ help
+ This enables the socket and networking security hooks.
+ If enabled, a security module can use these hooks to
+@@ -155,6 +180,7 @@
+ depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
+ select BUG
+ imply STRICT_DEVMEM
++ default y
+ help
+ This option checks for obviously wrong memory regions when
+ copying memory to/from the kernel (via copy_to_user() and
+@@ -192,10 +218,36 @@
+ config FORTIFY_SOURCE
+ bool "Harden common str/mem functions against buffer overflows"
+ depends on ARCH_HAS_FORTIFY_SOURCE
++ default y
+ help
+ Detect overflows of buffers in common string and memory functions
+ where the compiler can determine and validate the buffer sizes.
+
++config FORTIFY_SOURCE_STRICT_STRING
++ bool "Harden common functions against buffer overflows"
++ depends on FORTIFY_SOURCE
++ depends on EXPERT
++ help
++ Perform stricter overflow checks catching overflows within objects
++ for common C string functions rather than only between objects.
++
++ This is not yet intended for production use, only bug finding.
++
++config PAGE_SANITIZE
++ bool "Sanitize pages"
++ default y
++ help
++ Zero fill page allocations on free, reducing the lifetime of
++ sensitive data and helping to mitigate use-after-free bugs.
++
++config PAGE_SANITIZE_VERIFY
++ bool "Verify sanitized pages"
++ depends on PAGE_SANITIZE
++ default y
++ help
++ Verify that newly allocated pages are zeroed to detect
++ write-after-free bugs.
++
+ config STATIC_USERMODEHELPER
+ bool "Force all usermode helper calls through a single binary"
+ help
+diff -Naur linux-4.16/security/selinux/include/objsec.h linux-4.16-p/security/selinux/include/objsec.h
+--- linux-4.16/security/selinux/include/objsec.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/security/selinux/include/objsec.h 2018-04-12 15:57:20.837694346 +0200
+@@ -154,6 +154,6 @@
+ u32 sid; /*SID of bpf obj creater*/
+ };
+
+-extern unsigned int selinux_checkreqprot;
++extern const unsigned int selinux_checkreqprot;
+
+ #endif /* _SELINUX_OBJSEC_H_ */
+diff -Naur linux-4.16/security/selinux/Kconfig linux-4.16-p/security/selinux/Kconfig
+--- linux-4.16/security/selinux/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/security/selinux/Kconfig 2018-04-12 15:57:20.838694346 +0200
+@@ -2,7 +2,7 @@
+ bool "NSA SELinux Support"
+ depends on SECURITY_NETWORK && AUDIT && NET && INET
+ select NETWORK_SECMARK
+- default n
++ default y
+ help
+ This selects NSA Security-Enhanced Linux (SELinux).
+ You will also need a policy configuration and a labeled filesystem.
+@@ -79,23 +79,3 @@
+ This option collects access vector cache statistics to
+ /selinux/avc/cache_stats, which may be monitored via
+ tools such as avcstat.
+-
+-config SECURITY_SELINUX_CHECKREQPROT_VALUE
+- int "NSA SELinux checkreqprot default value"
+- depends on SECURITY_SELINUX
+- range 0 1
+- default 0
+- help
+- This option sets the default value for the 'checkreqprot' flag
+- that determines whether SELinux checks the protection requested
+- by the application or the protection that will be applied by the
+- kernel (including any implied execute for read-implies-exec) for
+- mmap and mprotect calls. If this option is set to 0 (zero),
+- SELinux will default to checking the protection that will be applied
+- by the kernel. If this option is set to 1 (one), SELinux will
+- default to checking the protection requested by the application.
+- The checkreqprot flag may be changed from the default via the
+- 'checkreqprot=' boot parameter. It may also be changed at runtime
+- via /selinux/checkreqprot if authorized by policy.
+-
+- If you are unsure how to answer this question, answer 0.
+diff -Naur linux-4.16/security/selinux/selinuxfs.c linux-4.16-p/security/selinux/selinuxfs.c
+--- linux-4.16/security/selinux/selinuxfs.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/security/selinux/selinuxfs.c 2018-04-12 15:57:20.838694346 +0200
+@@ -41,16 +41,7 @@
+ #include "objsec.h"
+ #include "conditional.h"
+
+-unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
+-
+-static int __init checkreqprot_setup(char *str)
+-{
+- unsigned long checkreqprot;
+- if (!kstrtoul(str, 0, &checkreqprot))
+- selinux_checkreqprot = checkreqprot ? 1 : 0;
+- return 1;
+-}
+-__setup("checkreqprot=", checkreqprot_setup);
++const unsigned int selinux_checkreqprot;
+
+ static DEFINE_MUTEX(sel_mutex);
+
+@@ -610,10 +601,9 @@
+ return PTR_ERR(page);
+
+ length = -EINVAL;
+- if (sscanf(page, "%u", &new_value) != 1)
++ if (sscanf(page, "%u", &new_value) != 1 || new_value)
+ goto out;
+
+- selinux_checkreqprot = new_value ? 1 : 0;
+ length = count;
+ out:
+ kfree(page);
+diff -Naur linux-4.16/security/yama/Kconfig linux-4.16-p/security/yama/Kconfig
+--- linux-4.16/security/yama/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/security/yama/Kconfig 2018-04-12 15:57:20.838694346 +0200
+@@ -1,7 +1,7 @@
+ config SECURITY_YAMA
+ bool "Yama support"
+ depends on SECURITY
+- default n
++ default y
+ help
+ This selects Yama, which extends DAC support with additional
+ system-wide security settings beyond regular Linux discretionary
diff --git a/sys-kernel/linux-sources-redcore/files/linux-hardened-v2.patch b/sys-kernel/linux-sources-redcore/files/linux-hardened-v2.patch
new file mode 100644
index 00000000..8ec7b812
--- /dev/null
+++ b/sys-kernel/linux-sources-redcore/files/linux-hardened-v2.patch
@@ -0,0 +1,2725 @@
+diff -Naur linux-4.16/arch/arm64/configs/defconfig linux-4.16-p/arch/arm64/configs/defconfig
+--- linux-4.16/arch/arm64/configs/defconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/arm64/configs/defconfig 2018-04-12 15:57:20.805694357 +0200
+@@ -1,4 +1,3 @@
+-CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+ CONFIG_AUDIT=y
+ CONFIG_NO_HZ_IDLE=y
+diff -Naur linux-4.16/arch/arm64/include/asm/elf.h linux-4.16-p/arch/arm64/include/asm/elf.h
+--- linux-4.16/arch/arm64/include/asm/elf.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/arm64/include/asm/elf.h 2018-04-12 15:57:20.806694357 +0200
+@@ -114,10 +114,10 @@
+
+ /*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+- * 64-bit, this is above 4GB to leave the entire 32-bit address
++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
++#define ELF_ET_DYN_BASE 0x100000000UL
+
+ #ifndef __ASSEMBLY__
+
+@@ -158,10 +158,10 @@
+ /* 1GB of VA */
+ #ifdef CONFIG_COMPAT
+ #define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
+- 0x7ff >> (PAGE_SHIFT - 12) : \
+- 0x3ffff >> (PAGE_SHIFT - 12))
++ ((1UL << mmap_rnd_compat_bits) - 1) >> (PAGE_SHIFT - 12) : \
++ ((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
+ #else
+-#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
++#define STACK_RND_MASK (((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
+ #endif
+
+ #ifdef __AARCH64EB__
+diff -Naur linux-4.16/arch/arm64/Kconfig linux-4.16-p/arch/arm64/Kconfig
+--- linux-4.16/arch/arm64/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/arm64/Kconfig 2018-04-12 15:57:20.806694357 +0200
+@@ -974,6 +974,7 @@
+
+ config ARM64_SW_TTBR0_PAN
+ bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
++ default y
+ help
+ Enabling this option prevents the kernel from accessing
+ user-space memory directly by pointing TTBR0_EL1 to a reserved
+@@ -1127,6 +1128,7 @@
+ bool "Randomize the address of the kernel image"
+ select ARM64_MODULE_PLTS if MODULES
+ select RELOCATABLE
++ default y
+ help
+ Randomizes the virtual address at which the kernel image is
+ loaded, as a security feature that deters exploit attempts
+diff -Naur linux-4.16/arch/arm64/Kconfig.debug linux-4.16-p/arch/arm64/Kconfig.debug
+--- linux-4.16/arch/arm64/Kconfig.debug 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/arm64/Kconfig.debug 2018-04-12 15:57:20.807694356 +0200
+@@ -45,6 +45,7 @@
+ config DEBUG_WX
+ bool "Warn on W+X mappings at boot"
+ select ARM64_PTDUMP_CORE
++ default y
+ ---help---
+ Generate a warning if any W+X mappings are found at boot.
+
+diff -Naur linux-4.16/arch/arm64/kernel/process.c linux-4.16-p/arch/arm64/kernel/process.c
+--- linux-4.16/arch/arm64/kernel/process.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/arm64/kernel/process.c 2018-04-12 15:57:20.807694356 +0200
+@@ -481,9 +481,9 @@
+ unsigned long arch_randomize_brk(struct mm_struct *mm)
+ {
+ if (is_compat_task())
+- return randomize_page(mm->brk, SZ_32M);
++ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
+ else
+- return randomize_page(mm->brk, SZ_1G);
++ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
+ }
+
+ /*
+diff -Naur linux-4.16/arch/Kconfig linux-4.16-p/arch/Kconfig
+--- linux-4.16/arch/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/Kconfig 2018-04-12 15:57:20.808694356 +0200
+@@ -454,6 +454,11 @@
+ is some slowdown of the boot process (about 0.5%) and fork and
+ irq processing.
+
++ When extra_latent_entropy is passed on the kernel command line,
++ entropy will be extracted from up to the first 4GB of RAM while the
++ runtime memory allocator is being initialized. This costs even more
++ slowdown of the boot process.
++
+ Note that entropy extracted this way is not cryptographically
+ secure!
+
+@@ -541,7 +546,7 @@
+ choice
+ prompt "Stack Protector buffer overflow detection"
+ depends on HAVE_CC_STACKPROTECTOR
+- default CC_STACKPROTECTOR_AUTO
++ default CC_STACKPROTECTOR_STRONG
+ help
+ This option turns on the "stack-protector" GCC feature. This
+ feature puts, at the beginning of functions, a canary value on
+@@ -747,7 +752,7 @@
+ int "Number of bits to use for ASLR of mmap base address" if EXPERT
+ range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
+ default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
+- default ARCH_MMAP_RND_BITS_MIN
++ default ARCH_MMAP_RND_BITS_MAX
+ depends on HAVE_ARCH_MMAP_RND_BITS
+ help
+ This value can be used to select the number of bits to use to
+@@ -781,7 +786,7 @@
+ int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
+ range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
+ default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
+- default ARCH_MMAP_RND_COMPAT_BITS_MIN
++ default ARCH_MMAP_RND_COMPAT_BITS_MAX
+ depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
+ help
+ This value can be used to select the number of bits to use to
+@@ -968,6 +973,7 @@
+
+ config REFCOUNT_FULL
+ bool "Perform full reference count validation at the expense of speed"
++ default y
+ help
+ Enabling this switches the refcounting infrastructure from a fast
+ unchecked atomic_t implementation to a fully state checked
+diff -Naur linux-4.16/arch/x86/configs/x86_64_defconfig linux-4.16-p/arch/x86/configs/x86_64_defconfig
+--- linux-4.16/arch/x86/configs/x86_64_defconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/configs/x86_64_defconfig 2018-04-12 15:57:20.808694356 +0200
+@@ -1,5 +1,4 @@
+ # CONFIG_LOCALVERSION_AUTO is not set
+-CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
+ CONFIG_BSD_PROCESS_ACCT=y
+ CONFIG_TASKSTATS=y
+diff -Naur linux-4.16/arch/x86/entry/vdso/vma.c linux-4.16-p/arch/x86/entry/vdso/vma.c
+--- linux-4.16/arch/x86/entry/vdso/vma.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/entry/vdso/vma.c 2018-04-12 15:57:20.808694356 +0200
+@@ -204,55 +204,9 @@
+ }
+
+ #ifdef CONFIG_X86_64
+-/*
+- * Put the vdso above the (randomized) stack with another randomized
+- * offset. This way there is no hole in the middle of address space.
+- * To save memory make sure it is still in the same PTE as the stack
+- * top. This doesn't give that many random bits.
+- *
+- * Note that this algorithm is imperfect: the distribution of the vdso
+- * start address within a PMD is biased toward the end.
+- *
+- * Only used for the 64-bit and x32 vdsos.
+- */
+-static unsigned long vdso_addr(unsigned long start, unsigned len)
+-{
+- unsigned long addr, end;
+- unsigned offset;
+-
+- /*
+- * Round up the start address. It can start out unaligned as a result
+- * of stack start randomization.
+- */
+- start = PAGE_ALIGN(start);
+-
+- /* Round the lowest possible end address up to a PMD boundary. */
+- end = (start + len + PMD_SIZE - 1) & PMD_MASK;
+- if (end >= TASK_SIZE_MAX)
+- end = TASK_SIZE_MAX;
+- end -= len;
+-
+- if (end > start) {
+- offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+- addr = start + (offset << PAGE_SHIFT);
+- } else {
+- addr = start;
+- }
+-
+- /*
+- * Forcibly align the final address in case we have a hardware
+- * issue that requires alignment for performance reasons.
+- */
+- addr = align_vdso_addr(addr);
+-
+- return addr;
+-}
+-
+ static int map_vdso_randomized(const struct vdso_image *image)
+ {
+- unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
+-
+- return map_vdso(image, addr);
++ return map_vdso(image, 0);
+ }
+ #endif
+
+diff -Naur linux-4.16/arch/x86/include/asm/elf.h linux-4.16-p/arch/x86/include/asm/elf.h
+--- linux-4.16/arch/x86/include/asm/elf.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/include/asm/elf.h 2018-04-12 15:57:20.809694356 +0200
+@@ -249,11 +249,11 @@
+
+ /*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+- * 64-bit, this is above 4GB to leave the entire 32-bit address
++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+ #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
+- (DEFAULT_MAP_WINDOW / 3 * 2))
++ 0x100000000UL)
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+@@ -313,8 +313,8 @@
+
+ #ifdef CONFIG_X86_32
+
+-#define __STACK_RND_MASK(is32bit) (0x7ff)
+-#define STACK_RND_MASK (0x7ff)
++#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
++#define STACK_RND_MASK ((1UL << mmap_rnd_bits) - 1)
+
+ #define ARCH_DLINFO ARCH_DLINFO_IA32
+
+@@ -323,7 +323,11 @@
+ #else /* CONFIG_X86_32 */
+
+ /* 1GB for 64bit, 8MB for 32bit */
+-#define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
++#ifdef CONFIG_COMPAT
++#define __STACK_RND_MASK(is32bit) ((is32bit) ? (1UL << mmap_rnd_compat_bits) - 1 : (1UL << mmap_rnd_bits) - 1)
++#else
++#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
++#endif
+ #define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())
+
+ #define ARCH_DLINFO \
+@@ -381,5 +385,4 @@
+ } ____cacheline_aligned;
+
+ extern struct va_alignment va_align;
+-extern unsigned long align_vdso_addr(unsigned long);
+ #endif /* _ASM_X86_ELF_H */
+diff -Naur linux-4.16/arch/x86/include/asm/tlbflush.h linux-4.16-p/arch/x86/include/asm/tlbflush.h
+--- linux-4.16/arch/x86/include/asm/tlbflush.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/include/asm/tlbflush.h 2018-04-12 15:57:20.809694356 +0200
+@@ -261,6 +261,7 @@
+
+ local_irq_save(flags);
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
++ BUG_ON(cr4 != __read_cr4());
+ if ((cr4 | mask) != cr4)
+ __cr4_set(cr4 | mask);
+ local_irq_restore(flags);
+@@ -273,6 +274,7 @@
+
+ local_irq_save(flags);
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
++ BUG_ON(cr4 != __read_cr4());
+ if ((cr4 & ~mask) != cr4)
+ __cr4_set(cr4 & ~mask);
+ local_irq_restore(flags);
+@@ -283,6 +285,7 @@
+ unsigned long cr4;
+
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
++ BUG_ON(cr4 != __read_cr4());
+ __cr4_set(cr4 ^ mask);
+ }
+
+@@ -389,6 +392,7 @@
+ raw_local_irq_save(flags);
+
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
++ BUG_ON(cr4 != __read_cr4());
+ /* toggle PGE */
+ native_write_cr4(cr4 ^ X86_CR4_PGE);
+ /* write old PGE again and flush TLBs */
+diff -Naur linux-4.16/arch/x86/Kconfig linux-4.16-p/arch/x86/Kconfig
+--- linux-4.16/arch/x86/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/Kconfig 2018-04-12 15:57:20.810694356 +0200
+@@ -1208,8 +1208,7 @@
+ default X86_LEGACY_VM86
+
+ config X86_16BIT
+- bool "Enable support for 16-bit segments" if EXPERT
+- default y
++ bool "Enable support for 16-bit segments"
+ depends on MODIFY_LDT_SYSCALL
+ ---help---
+ This option is required by programs like Wine to run 16-bit
+@@ -2299,7 +2298,7 @@
+ choice
+ prompt "vsyscall table for legacy applications"
+ depends on X86_64
+- default LEGACY_VSYSCALL_EMULATE
++ default LEGACY_VSYSCALL_NONE
+ help
+ Legacy user code that does not know how to find the vDSO expects
+ to be able to issue three syscalls by calling fixed addresses in
+@@ -2380,8 +2379,7 @@
+ be set to 'N' under normal conditions.
+
+ config MODIFY_LDT_SYSCALL
+- bool "Enable the LDT (local descriptor table)" if EXPERT
+- default y
++ bool "Enable the LDT (local descriptor table)"
+ ---help---
+ Linux can allow user programs to install a per-process x86
+ Local Descriptor Table (LDT) using the modify_ldt(2) system
+diff -Naur linux-4.16/arch/x86/Kconfig.debug linux-4.16-p/arch/x86/Kconfig.debug
+--- linux-4.16/arch/x86/Kconfig.debug 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/Kconfig.debug 2018-04-12 15:57:20.810694356 +0200
+@@ -101,6 +101,7 @@
+ config DEBUG_WX
+ bool "Warn on W+X mappings at boot"
+ select X86_PTDUMP_CORE
++ default y
+ ---help---
+ Generate a warning if any W+X mappings are found at boot.
+
+diff -Naur linux-4.16/arch/x86/kernel/cpu/common.c linux-4.16-p/arch/x86/kernel/cpu/common.c
+--- linux-4.16/arch/x86/kernel/cpu/common.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/kernel/cpu/common.c 2018-04-12 15:57:20.811694355 +0200
+@@ -1617,7 +1617,6 @@
+ wrmsrl(MSR_KERNEL_GS_BASE, 0);
+ barrier();
+
+- x86_configure_nx();
+ x2apic_setup();
+
+ /*
+diff -Naur linux-4.16/arch/x86/kernel/process.c linux-4.16-p/arch/x86/kernel/process.c
+--- linux-4.16/arch/x86/kernel/process.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/kernel/process.c 2018-04-12 15:57:20.812694355 +0200
+@@ -38,6 +38,8 @@
+ #include <asm/switch_to.h>
+ #include <asm/desc.h>
+ #include <asm/prctl.h>
++#include <asm/elf.h>
++#include <linux/sizes.h>
+
+ /*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+@@ -572,7 +574,10 @@
+
+ unsigned long arch_randomize_brk(struct mm_struct *mm)
+ {
+- return randomize_page(mm->brk, 0x02000000);
++ if (mmap_is_ia32())
++ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
++ else
++ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
+ }
+
+ /*
+diff -Naur linux-4.16/arch/x86/kernel/sys_x86_64.c linux-4.16-p/arch/x86/kernel/sys_x86_64.c
+--- linux-4.16/arch/x86/kernel/sys_x86_64.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/kernel/sys_x86_64.c 2018-04-12 15:57:20.812694355 +0200
+@@ -54,13 +54,6 @@
+ return va_align.bits & get_align_mask();
+ }
+
+-unsigned long align_vdso_addr(unsigned long addr)
+-{
+- unsigned long align_mask = get_align_mask();
+- addr = (addr + align_mask) & ~align_mask;
+- return addr | get_align_bits();
+-}
+-
+ static int __init control_va_addr_alignment(char *str)
+ {
+ /* guard against enabling this on other CPU families */
+@@ -122,10 +115,7 @@
+ }
+
+ *begin = get_mmap_base(1);
+- if (in_compat_syscall())
+- *end = task_size_32bit();
+- else
+- *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
++ *end = get_mmap_base(0);
+ }
+
+ unsigned long
+@@ -210,7 +200,7 @@
+
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+- info.low_limit = PAGE_SIZE;
++ info.low_limit = get_mmap_base(1);
+ info.high_limit = get_mmap_base(0);
+
+ /*
+diff -Naur linux-4.16/arch/x86/mm/init_32.c linux-4.16-p/arch/x86/mm/init_32.c
+--- linux-4.16/arch/x86/mm/init_32.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/mm/init_32.c 2018-04-12 15:57:20.812694355 +0200
+@@ -558,7 +558,7 @@
+ permanent_kmaps_init(pgd_base);
+ }
+
+-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
++pteval_t __supported_pte_mask __ro_after_init = ~(_PAGE_NX | _PAGE_GLOBAL);
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+
+ /* user-defined highmem size */
+@@ -866,7 +866,7 @@
+ #endif
+ #endif
+
+-int kernel_set_to_readonly __read_mostly;
++int kernel_set_to_readonly __ro_after_init;
+
+ void set_kernel_text_rw(void)
+ {
+@@ -918,12 +918,11 @@
+ unsigned long start = PFN_ALIGN(_text);
+ unsigned long size = PFN_ALIGN(_etext) - start;
+
++ kernel_set_to_readonly = 1;
+ set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
+ printk(KERN_INFO "Write protecting the kernel text: %luk\n",
+ size >> 10);
+
+- kernel_set_to_readonly = 1;
+-
+ #ifdef CONFIG_CPA_DEBUG
+ printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
+ start, start+size);
+diff -Naur linux-4.16/arch/x86/mm/init_64.c linux-4.16-p/arch/x86/mm/init_64.c
+--- linux-4.16/arch/x86/mm/init_64.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/arch/x86/mm/init_64.c 2018-04-12 15:57:20.813694355 +0200
+@@ -65,7 +65,7 @@
+ * around without checking the pgd every time.
+ */
+
+-pteval_t __supported_pte_mask __read_mostly = ~0;
++pteval_t __supported_pte_mask __ro_after_init = ~0;
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+
+ int force_personality32;
+@@ -1195,7 +1195,7 @@
+ mem_init_print_info(NULL);
+ }
+
+-int kernel_set_to_readonly;
++int kernel_set_to_readonly __ro_after_init;
+
+ void set_kernel_text_rw(void)
+ {
+@@ -1244,9 +1244,8 @@
+
+ printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+ (end - start) >> 10);
+- set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+-
+ kernel_set_to_readonly = 1;
++ set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+
+ /*
+ * The rodata/data/bss/brk section (but not the kernel text!)
+diff -Naur linux-4.16/block/blk-softirq.c linux-4.16-p/block/blk-softirq.c
+--- linux-4.16/block/blk-softirq.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/block/blk-softirq.c 2018-04-12 15:57:20.813694355 +0200
+@@ -20,7 +20,7 @@
+ * Softirq action handler - move entries to local list and loop over them
+ * while passing them to the queue registered handler.
+ */
+-static __latent_entropy void blk_done_softirq(struct softirq_action *h)
++static __latent_entropy void blk_done_softirq(void)
+ {
+ struct list_head *cpu_list, local_list;
+
+diff -Naur linux-4.16/Documentation/admin-guide/kernel-parameters.txt linux-4.16-p/Documentation/admin-guide/kernel-parameters.txt
+--- linux-4.16/Documentation/admin-guide/kernel-parameters.txt 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/Documentation/admin-guide/kernel-parameters.txt 2018-04-12 15:57:20.815694354 +0200
+@@ -496,16 +496,6 @@
+ nosocket -- Disable socket memory accounting.
+ nokmem -- Disable kernel memory accounting.
+
+- checkreqprot [SELINUX] Set initial checkreqprot flag value.
+- Format: { "0" | "1" }
+- See security/selinux/Kconfig help text.
+- 0 -- check protection applied by kernel (includes
+- any implied execute protection).
+- 1 -- check protection requested by application.
+- Default value is set via a kernel config option.
+- Value can be changed at runtime via
+- /selinux/checkreqprot.
+-
+ cio_ignore= [S390]
+ See Documentation/s390/CommonIO for details.
+ clk_ignore_unused
+@@ -2943,6 +2933,11 @@
+ the specified number of seconds. This is to be used if
+ your oopses keep scrolling off the screen.
+
++ extra_latent_entropy
++ Enable a very simple form of latent entropy extraction
++ from the first 4GB of memory as the bootmem allocator
++ passes the memory pages to the buddy allocator.
++
+ pcbit= [HW,ISDN]
+
+ pcd. [PARIDE]
+diff -Naur linux-4.16/Documentation/sysctl/kernel.txt linux-4.16-p/Documentation/sysctl/kernel.txt
+--- linux-4.16/Documentation/sysctl/kernel.txt 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/Documentation/sysctl/kernel.txt 2018-04-12 15:57:20.815694354 +0200
+@@ -92,6 +92,7 @@
+ - sysctl_writes_strict
+ - tainted
+ - threads-max
++- tiocsti_restrict
+ - unknown_nmi_panic
+ - watchdog
+ - watchdog_thresh
+@@ -1014,6 +1015,26 @@
+
+ ==============================================================
+
++tiocsti_restrict:
++
++This toggle indicates whether unprivileged users are prevented
++from using the TIOCSTI ioctl to inject commands into other processes
++which share a tty session.
++
++When tiocsti_restrict is set to (0) there are no restrictions(accept
++the default restriction of only being able to injection commands into
++one's own tty). When tiocsti_restrict is set to (1), users must
++have CAP_SYS_ADMIN to use the TIOCSTI ioctl.
++
++When user namespaces are in use, the check for the capability
++CAP_SYS_ADMIN is done against the user namespace that originally
++opened the tty.
++
++The kernel config option CONFIG_SECURITY_TIOCSTI_RESTRICT sets the
++default value of tiocsti_restrict.
++
++==============================================================
++
+ unknown_nmi_panic:
+
+ The value in this file affects behavior of handling NMI. When the
+diff -Naur linux-4.16/drivers/ata/libata-core.c linux-4.16-p/drivers/ata/libata-core.c
+--- linux-4.16/drivers/ata/libata-core.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/ata/libata-core.c 2018-04-12 15:57:20.817694353 +0200
+@@ -5148,7 +5148,7 @@
+ struct ata_port *ap;
+ unsigned int tag;
+
+- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+ ap = qc->ap;
+
+ qc->flags = 0;
+@@ -5165,7 +5165,7 @@
+ struct ata_port *ap;
+ struct ata_link *link;
+
+- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+ WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
+ ap = qc->ap;
+ link = qc->dev->link;
+diff -Naur linux-4.16/drivers/char/Kconfig linux-4.16-p/drivers/char/Kconfig
+--- linux-4.16/drivers/char/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/char/Kconfig 2018-04-12 15:57:20.817694353 +0200
+@@ -9,7 +9,6 @@
+
+ config DEVMEM
+ bool "/dev/mem virtual device support"
+- default y
+ help
+ Say Y here if you want to support the /dev/mem device.
+ The /dev/mem device is used to access areas of physical
+@@ -568,7 +567,6 @@
+ config DEVPORT
+ bool "/dev/port character device"
+ depends on ISA || PCI
+- default y
+ help
+ Say Y here if you want to support the /dev/port device. The /dev/port
+ device is similar to /dev/mem, but for I/O ports.
+diff -Naur linux-4.16/drivers/media/dvb-frontends/cx24116.c linux-4.16-p/drivers/media/dvb-frontends/cx24116.c
+--- linux-4.16/drivers/media/dvb-frontends/cx24116.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/cx24116.c 2018-04-12 15:57:20.818694353 +0200
+@@ -1456,7 +1456,7 @@
+ return cx24116_read_status(fe, status);
+ }
+
+-static int cx24116_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cx24116_get_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/dvb-frontends/cx24117.c linux-4.16-p/drivers/media/dvb-frontends/cx24117.c
+--- linux-4.16/drivers/media/dvb-frontends/cx24117.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/cx24117.c 2018-04-12 15:57:20.818694353 +0200
+@@ -1555,7 +1555,7 @@
+ return cx24117_read_status(fe, status);
+ }
+
+-static int cx24117_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cx24117_get_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/dvb-frontends/cx24120.c linux-4.16-p/drivers/media/dvb-frontends/cx24120.c
+--- linux-4.16/drivers/media/dvb-frontends/cx24120.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/cx24120.c 2018-04-12 15:57:20.818694353 +0200
+@@ -1491,7 +1491,7 @@
+ return cx24120_read_status(fe, status);
+ }
+
+-static int cx24120_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cx24120_get_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/dvb-frontends/cx24123.c linux-4.16-p/drivers/media/dvb-frontends/cx24123.c
+--- linux-4.16/drivers/media/dvb-frontends/cx24123.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/cx24123.c 2018-04-12 15:57:20.819694353 +0200
+@@ -1005,7 +1005,7 @@
+ return retval;
+ }
+
+-static int cx24123_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cx24123_get_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/dvb-frontends/cxd2820r_core.c linux-4.16-p/drivers/media/dvb-frontends/cxd2820r_core.c
+--- linux-4.16/drivers/media/dvb-frontends/cxd2820r_core.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/cxd2820r_core.c 2018-04-12 15:57:20.819694353 +0200
+@@ -403,7 +403,7 @@
+ return DVBFE_ALGO_SEARCH_ERROR;
+ }
+
+-static int cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_CUSTOM;
+ }
+diff -Naur linux-4.16/drivers/media/dvb-frontends/mb86a20s.c linux-4.16-p/drivers/media/dvb-frontends/mb86a20s.c
+--- linux-4.16/drivers/media/dvb-frontends/mb86a20s.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/mb86a20s.c 2018-04-12 15:57:20.819694353 +0200
+@@ -2055,7 +2055,7 @@
+ kfree(state);
+ }
+
+-static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/dvb-frontends/s921.c linux-4.16-p/drivers/media/dvb-frontends/s921.c
+--- linux-4.16/drivers/media/dvb-frontends/s921.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/dvb-frontends/s921.c 2018-04-12 15:57:20.819694353 +0200
+@@ -464,7 +464,7 @@
+ return rc;
+ }
+
+-static int s921_get_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo s921_get_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/pci/bt8xx/dst.c linux-4.16-p/drivers/media/pci/bt8xx/dst.c
+--- linux-4.16/drivers/media/pci/bt8xx/dst.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/pci/bt8xx/dst.c 2018-04-12 15:57:20.820694352 +0200
+@@ -1657,7 +1657,7 @@
+ return 0;
+ }
+
+-static int dst_get_tuning_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo dst_get_tuning_algo(struct dvb_frontend *fe)
+ {
+ return dst_algo ? DVBFE_ALGO_HW : DVBFE_ALGO_SW;
+ }
+diff -Naur linux-4.16/drivers/media/pci/pt1/va1j5jf8007s.c linux-4.16-p/drivers/media/pci/pt1/va1j5jf8007s.c
+--- linux-4.16/drivers/media/pci/pt1/va1j5jf8007s.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/pci/pt1/va1j5jf8007s.c 2018-04-12 15:57:20.820694352 +0200
+@@ -98,7 +98,7 @@
+ return 0;
+ }
+
+-static int va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/media/pci/pt1/va1j5jf8007t.c linux-4.16-p/drivers/media/pci/pt1/va1j5jf8007t.c
+--- linux-4.16/drivers/media/pci/pt1/va1j5jf8007t.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/media/pci/pt1/va1j5jf8007t.c 2018-04-12 15:57:20.820694352 +0200
+@@ -88,7 +88,7 @@
+ return 0;
+ }
+
+-static int va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
++static enum dvbfe_algo va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
+ {
+ return DVBFE_ALGO_HW;
+ }
+diff -Naur linux-4.16/drivers/tty/Kconfig linux-4.16-p/drivers/tty/Kconfig
+--- linux-4.16/drivers/tty/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/tty/Kconfig 2018-04-12 15:57:20.820694352 +0200
+@@ -122,7 +122,6 @@
+
+ config LEGACY_PTYS
+ bool "Legacy (BSD) PTY support"
+- default y
+ ---help---
+ A pseudo terminal (PTY) is a software device consisting of two
+ halves: a master and a slave. The slave device behaves identical to
+diff -Naur linux-4.16/drivers/tty/tty_io.c linux-4.16-p/drivers/tty/tty_io.c
+--- linux-4.16/drivers/tty/tty_io.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/tty/tty_io.c 2018-04-12 15:57:20.820694352 +0200
+@@ -172,6 +172,7 @@
+ put_device(tty->dev);
+ kfree(tty->write_buf);
+ tty->magic = 0xDEADDEAD;
++ put_user_ns(tty->owner_user_ns);
+ kfree(tty);
+ }
+
+@@ -2155,11 +2156,19 @@
+ * FIXME: may race normal receive processing
+ */
+
++int tiocsti_restrict = IS_ENABLED(CONFIG_SECURITY_TIOCSTI_RESTRICT);
++
+ static int tiocsti(struct tty_struct *tty, char __user *p)
+ {
+ char ch, mbz = 0;
+ struct tty_ldisc *ld;
+
++ if (tiocsti_restrict &&
++ !ns_capable(tty->owner_user_ns, CAP_SYS_ADMIN)) {
++ dev_warn_ratelimited(tty->dev,
++ "Denied TIOCSTI ioctl for non-privileged process\n");
++ return -EPERM;
++ }
+ if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ch, p))
+@@ -2839,6 +2848,7 @@
+ tty->index = idx;
+ tty_line_name(driver, idx, tty->name);
+ tty->dev = tty_get_device(tty);
++ tty->owner_user_ns = get_user_ns(current_user_ns());
+
+ return tty;
+ }
+diff -Naur linux-4.16/drivers/usb/core/hub.c linux-4.16-p/drivers/usb/core/hub.c
+--- linux-4.16/drivers/usb/core/hub.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/drivers/usb/core/hub.c 2018-04-12 15:57:20.821694352 +0200
+@@ -41,6 +41,8 @@
+ #define USB_TP_TRANSMISSION_DELAY 40 /* ns */
+ #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
+
++extern int deny_new_usb;
++
+ /* Protect struct usb_device->state and ->children members
+ * Note: Both are also protected by ->dev.sem, except that ->state can
+ * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
+@@ -4839,6 +4841,12 @@
+ goto done;
+ return;
+ }
++
++ if (deny_new_usb) {
++ dev_err(&port_dev->dev, "denied insert of USB device on port %d\n", port1);
++ goto done;
++ }
++
+ if (hub_is_superspeed(hub->hdev))
+ unit_load = 150;
+ else
+diff -Naur linux-4.16/fs/exec.c linux-4.16-p/fs/exec.c
+--- linux-4.16/fs/exec.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/fs/exec.c 2018-04-12 15:57:20.822694352 +0200
+@@ -62,6 +62,7 @@
+ #include <linux/oom.h>
+ #include <linux/compat.h>
+ #include <linux/vmalloc.h>
++#include <linux/random.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/mmu_context.h>
+@@ -321,6 +322,8 @@
+ arch_bprm_mm_init(mm, vma);
+ up_write(&mm->mmap_sem);
+ bprm->p = vma->vm_end - sizeof(void *);
++ if (randomize_va_space)
++ bprm->p ^= get_random_int() & ~PAGE_MASK;
+ return 0;
+ err:
+ up_write(&mm->mmap_sem);
+diff -Naur linux-4.16/fs/namei.c linux-4.16-p/fs/namei.c
+--- linux-4.16/fs/namei.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/fs/namei.c 2018-04-12 15:57:20.822694352 +0200
+@@ -882,8 +882,8 @@
+ path_put(&last->link);
+ }
+
+-int sysctl_protected_symlinks __read_mostly = 0;
+-int sysctl_protected_hardlinks __read_mostly = 0;
++int sysctl_protected_symlinks __read_mostly = 1;
++int sysctl_protected_hardlinks __read_mostly = 1;
+
+ /**
+ * may_follow_link - Check symlink following for unsafe situations
+diff -Naur linux-4.16/fs/nfs/Kconfig linux-4.16-p/fs/nfs/Kconfig
+--- linux-4.16/fs/nfs/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/fs/nfs/Kconfig 2018-04-12 15:57:20.822694352 +0200
+@@ -195,4 +195,3 @@
+ bool
+ depends on NFS_FS && SUNRPC_DEBUG
+ select CRC32
+- default y
+diff -Naur linux-4.16/fs/proc/Kconfig linux-4.16-p/fs/proc/Kconfig
+--- linux-4.16/fs/proc/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/fs/proc/Kconfig 2018-04-12 15:57:20.822694352 +0200
+@@ -39,7 +39,6 @@
+ config PROC_VMCORE
+ bool "/proc/vmcore support"
+ depends on PROC_FS && CRASH_DUMP
+- default y
+ help
+ Exports the dump image of crashed kernel in ELF format.
+
+diff -Naur linux-4.16/fs/stat.c linux-4.16-p/fs/stat.c
+--- linux-4.16/fs/stat.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/fs/stat.c 2018-04-12 15:57:20.823694351 +0200
+@@ -40,8 +40,13 @@
+ stat->gid = inode->i_gid;
+ stat->rdev = inode->i_rdev;
+ stat->size = i_size_read(inode);
+- stat->atime = inode->i_atime;
+- stat->mtime = inode->i_mtime;
++ if (is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
++ stat->atime = inode->i_ctime;
++ stat->mtime = inode->i_ctime;
++ } else {
++ stat->atime = inode->i_atime;
++ stat->mtime = inode->i_mtime;
++ }
+ stat->ctime = inode->i_ctime;
+ stat->blksize = i_blocksize(inode);
+ stat->blocks = inode->i_blocks;
+@@ -75,9 +80,14 @@
+ stat->result_mask |= STATX_BASIC_STATS;
+ request_mask &= STATX_ALL;
+ query_flags &= KSTAT_QUERY_FLAGS;
+- if (inode->i_op->getattr)
+- return inode->i_op->getattr(path, stat, request_mask,
+- query_flags);
++ if (inode->i_op->getattr) {
++ int retval = inode->i_op->getattr(path, stat, request_mask, query_flags);
++ if (!retval && is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
++ stat->atime = stat->ctime;
++ stat->mtime = stat->ctime;
++ }
++ return retval;
++ }
+
+ generic_fillattr(inode, stat);
+ return 0;
+diff -Naur linux-4.16/include/linux/cache.h linux-4.16-p/include/linux/cache.h
+--- linux-4.16/include/linux/cache.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/cache.h 2018-04-12 15:57:20.823694351 +0200
+@@ -31,6 +31,8 @@
+ #define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
+ #endif
+
++#define __read_only __ro_after_init
++
+ #ifndef ____cacheline_aligned
+ #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
+ #endif
+diff -Naur linux-4.16/include/linux/capability.h linux-4.16-p/include/linux/capability.h
+--- linux-4.16/include/linux/capability.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/capability.h 2018-04-12 15:57:20.823694351 +0200
+@@ -207,6 +207,7 @@
+ extern bool has_ns_capability_noaudit(struct task_struct *t,
+ struct user_namespace *ns, int cap);
+ extern bool capable(int cap);
++extern bool capable_noaudit(int cap);
+ extern bool ns_capable(struct user_namespace *ns, int cap);
+ extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
+ #else
+@@ -232,6 +233,10 @@
+ {
+ return true;
+ }
++static inline bool capable_noaudit(int cap)
++{
++ return true;
++}
+ static inline bool ns_capable(struct user_namespace *ns, int cap)
+ {
+ return true;
+diff -Naur linux-4.16/include/linux/fs.h linux-4.16-p/include/linux/fs.h
+--- linux-4.16/include/linux/fs.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/fs.h 2018-04-12 15:57:20.823694351 +0200
+@@ -3407,4 +3407,15 @@
+ extern bool path_noexec(const struct path *path);
+ extern void inode_nohighmem(struct inode *inode);
+
++extern int device_sidechannel_restrict;
++
++static inline bool is_sidechannel_device(const struct inode *inode)
++{
++ umode_t mode;
++ if (!device_sidechannel_restrict)
++ return false;
++ mode = inode->i_mode;
++ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
++}
++
+ #endif /* _LINUX_FS_H */
+diff -Naur linux-4.16/include/linux/fsnotify.h linux-4.16-p/include/linux/fsnotify.h
+--- linux-4.16/include/linux/fsnotify.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/fsnotify.h 2018-04-12 15:57:20.823694351 +0200
+@@ -181,6 +181,9 @@
+ struct inode *inode = path->dentry->d_inode;
+ __u32 mask = FS_ACCESS;
+
++ if (is_sidechannel_device(inode))
++ return;
++
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+@@ -199,6 +202,9 @@
+ struct inode *inode = path->dentry->d_inode;
+ __u32 mask = FS_MODIFY;
+
++ if (is_sidechannel_device(inode))
++ return;
++
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+diff -Naur linux-4.16/include/linux/gfp.h linux-4.16-p/include/linux/gfp.h
+--- linux-4.16/include/linux/gfp.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/gfp.h 2018-04-12 15:57:20.824694351 +0200
+@@ -513,9 +513,9 @@
+ extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
+ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
+
+-void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
++void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __attribute__((alloc_size(1)));
+ void free_pages_exact(void *virt, size_t size);
+-void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
++void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __attribute__((alloc_size(1)));
+
+ #define __get_free_page(gfp_mask) \
+ __get_free_pages((gfp_mask), 0)
+diff -Naur linux-4.16/include/linux/highmem.h linux-4.16-p/include/linux/highmem.h
+--- linux-4.16/include/linux/highmem.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/highmem.h 2018-04-12 15:57:20.824694351 +0200
+@@ -191,6 +191,13 @@
+ kunmap_atomic(kaddr);
+ }
+
++static inline void verify_zero_highpage(struct page *page)
++{
++ void *kaddr = kmap_atomic(page);
++ BUG_ON(memchr_inv(kaddr, 0, PAGE_SIZE));
++ kunmap_atomic(kaddr);
++}
++
+ static inline void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
+diff -Naur linux-4.16/include/linux/interrupt.h linux-4.16-p/include/linux/interrupt.h
+--- linux-4.16/include/linux/interrupt.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/interrupt.h 2018-04-12 15:57:20.824694351 +0200
+@@ -485,7 +485,7 @@
+
+ struct softirq_action
+ {
+- void (*action)(struct softirq_action *);
++ void (*action)(void);
+ };
+
+ asmlinkage void do_softirq(void);
+@@ -500,7 +500,7 @@
+ }
+ #endif
+
+-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
++extern void __init open_softirq(int nr, void (*action)(void));
+ extern void softirq_init(void);
+ extern void __raise_softirq_irqoff(unsigned int nr);
+
+diff -Naur linux-4.16/include/linux/kobject_ns.h linux-4.16-p/include/linux/kobject_ns.h
+--- linux-4.16/include/linux/kobject_ns.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/kobject_ns.h 2018-04-12 15:57:20.824694351 +0200
+@@ -45,7 +45,7 @@
+ void (*drop_ns)(void *);
+ };
+
+-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
++int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
+ int kobj_ns_type_registered(enum kobj_ns_type type);
+ const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
+ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
+diff -Naur linux-4.16/include/linux/mm.h linux-4.16-p/include/linux/mm.h
+--- linux-4.16/include/linux/mm.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/mm.h 2018-04-12 15:57:20.824694351 +0200
+@@ -535,7 +535,7 @@
+ }
+ #endif
+
+-extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
++extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __attribute__((alloc_size(1)));
+ static inline void *kvmalloc(size_t size, gfp_t flags)
+ {
+ return kvmalloc_node(size, flags, NUMA_NO_NODE);
+diff -Naur linux-4.16/include/linux/percpu.h linux-4.16-p/include/linux/percpu.h
+--- linux-4.16/include/linux/percpu.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/percpu.h 2018-04-12 15:57:20.825694351 +0200
+@@ -129,7 +129,7 @@
+ pcpu_fc_populate_pte_fn_t populate_pte_fn);
+ #endif
+
+-extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
++extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
+ extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
+ extern bool is_kernel_percpu_address(unsigned long addr);
+
+@@ -137,8 +137,8 @@
+ extern void __init setup_per_cpu_areas(void);
+ #endif
+
+-extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
+-extern void __percpu *__alloc_percpu(size_t size, size_t align);
++extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __attribute__((alloc_size(1)));
++extern void __percpu *__alloc_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
+ extern void free_percpu(void __percpu *__pdata);
+ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
+
+diff -Naur linux-4.16/include/linux/perf_event.h linux-4.16-p/include/linux/perf_event.h
+--- linux-4.16/include/linux/perf_event.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/perf_event.h 2018-04-12 15:57:20.825694351 +0200
+@@ -1151,6 +1151,11 @@
+ int perf_event_max_stack_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
++static inline bool perf_paranoid_any(void)
++{
++ return sysctl_perf_event_paranoid > 2;
++}
++
+ static inline bool perf_paranoid_tracepoint_raw(void)
+ {
+ return sysctl_perf_event_paranoid > -1;
+diff -Naur linux-4.16/include/linux/slab.h linux-4.16-p/include/linux/slab.h
+--- linux-4.16/include/linux/slab.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/slab.h 2018-04-12 15:57:20.825694351 +0200
+@@ -177,8 +177,8 @@
+ /*
+ * Common kmalloc functions provided by all allocators
+ */
+-void * __must_check __krealloc(const void *, size_t, gfp_t);
+-void * __must_check krealloc(const void *, size_t, gfp_t);
++void * __must_check __krealloc(const void *, size_t, gfp_t) __attribute__((alloc_size(2)));
++void * __must_check krealloc(const void *, size_t, gfp_t) __attribute((alloc_size(2)));
+ void kfree(const void *);
+ void kzfree(const void *);
+ size_t ksize(const void *);
+@@ -351,7 +351,7 @@
+ }
+ #endif /* !CONFIG_SLOB */
+
+-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
++void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
+ void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
+ void kmem_cache_free(struct kmem_cache *, void *);
+
+@@ -375,7 +375,7 @@
+ }
+
+ #ifdef CONFIG_NUMA
+-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
++void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
+ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
+ #else
+ static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
+@@ -497,7 +497,7 @@
+ * for general use, and so are not documented here. For a full list of
+ * potential flags, always refer to linux/gfp.h.
+ */
+-static __always_inline void *kmalloc(size_t size, gfp_t flags)
++static __always_inline __attribute__((alloc_size(1))) void *kmalloc(size_t size, gfp_t flags)
+ {
+ if (__builtin_constant_p(size)) {
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+@@ -537,7 +537,7 @@
+ return 0;
+ }
+
+-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
++static __always_inline __attribute__((alloc_size(1))) void *kmalloc_node(size_t size, gfp_t flags, int node)
+ {
+ #ifndef CONFIG_SLOB
+ if (__builtin_constant_p(size) &&
+diff -Naur linux-4.16/include/linux/slub_def.h linux-4.16-p/include/linux/slub_def.h
+--- linux-4.16/include/linux/slub_def.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/slub_def.h 2018-04-12 15:57:20.825694351 +0200
+@@ -120,6 +120,11 @@
+ unsigned long random;
+ #endif
+
++#ifdef CONFIG_SLAB_CANARY
++ unsigned long random_active;
++ unsigned long random_inactive;
++#endif
++
+ #ifdef CONFIG_NUMA
+ /*
+ * Defragmentation by allocating from a remote node.
+diff -Naur linux-4.16/include/linux/string.h linux-4.16-p/include/linux/string.h
+--- linux-4.16/include/linux/string.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/string.h 2018-04-12 15:57:20.825694351 +0200
+@@ -235,10 +235,16 @@
+ void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter");
+ void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
+
++#ifdef CONFIG_FORTIFY_SOURCE_STRICT_STRING
++#define __string_size(p) __builtin_object_size(p, 1)
++#else
++#define __string_size(p) __builtin_object_size(p, 0)
++#endif
++
+ #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
+ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
+ {
+- size_t p_size = __builtin_object_size(p, 0);
++ size_t p_size = __string_size(p);
+ if (__builtin_constant_p(size) && p_size < size)
+ __write_overflow();
+ if (p_size < size)
+@@ -248,7 +254,7 @@
+
+ __FORTIFY_INLINE char *strcat(char *p, const char *q)
+ {
+- size_t p_size = __builtin_object_size(p, 0);
++ size_t p_size = __string_size(p);
+ if (p_size == (size_t)-1)
+ return __builtin_strcat(p, q);
+ if (strlcat(p, q, p_size) >= p_size)
+@@ -259,7 +265,7 @@
+ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
+ {
+ __kernel_size_t ret;
+- size_t p_size = __builtin_object_size(p, 0);
++ size_t p_size = __string_size(p);
+
+ /* Work around gcc excess stack consumption issue */
+ if (p_size == (size_t)-1 ||
+@@ -274,7 +280,7 @@
+ extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+ __FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
+ {
+- size_t p_size = __builtin_object_size(p, 0);
++ size_t p_size = __string_size(p);
+ __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+ if (p_size <= ret && maxlen != ret)
+ fortify_panic(__func__);
+@@ -286,8 +292,8 @@
+ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
+ {
+ size_t ret;
+- size_t p_size = __builtin_object_size(p, 0);
+- size_t q_size = __builtin_object_size(q, 0);
++ size_t p_size = __string_size(p);
++ size_t q_size = __string_size(q);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __real_strlcpy(p, q, size);
+ ret = strlen(q);
+@@ -307,8 +313,8 @@
+ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
+ {
+ size_t p_len, copy_len;
+- size_t p_size = __builtin_object_size(p, 0);
+- size_t q_size = __builtin_object_size(q, 0);
++ size_t p_size = __string_size(p);
++ size_t q_size = __string_size(q);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __builtin_strncat(p, q, count);
+ p_len = strlen(p);
+@@ -421,8 +427,8 @@
+ /* defined after fortified strlen and memcpy to reuse them */
+ __FORTIFY_INLINE char *strcpy(char *p, const char *q)
+ {
+- size_t p_size = __builtin_object_size(p, 0);
+- size_t q_size = __builtin_object_size(q, 0);
++ size_t p_size = __string_size(p);
++ size_t q_size = __string_size(q);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __builtin_strcpy(p, q);
+ memcpy(p, q, strlen(q) + 1);
+diff -Naur linux-4.16/include/linux/tty.h linux-4.16-p/include/linux/tty.h
+--- linux-4.16/include/linux/tty.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/tty.h 2018-04-12 15:57:20.825694351 +0200
+@@ -13,6 +13,7 @@
+ #include <uapi/linux/tty.h>
+ #include <linux/rwsem.h>
+ #include <linux/llist.h>
++#include <linux/user_namespace.h>
+
+
+ /*
+@@ -335,6 +336,7 @@
+ /* If the tty has a pending do_SAK, queue it here - akpm */
+ struct work_struct SAK_work;
+ struct tty_port *port;
++ struct user_namespace *owner_user_ns;
+ } __randomize_layout;
+
+ /* Each of a tty's open files has private_data pointing to tty_file_private */
+@@ -344,6 +346,8 @@
+ struct list_head list;
+ };
+
++extern int tiocsti_restrict;
++
+ /* tty magic number */
+ #define TTY_MAGIC 0x5401
+
+diff -Naur linux-4.16/include/linux/vmalloc.h linux-4.16-p/include/linux/vmalloc.h
+--- linux-4.16/include/linux/vmalloc.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/include/linux/vmalloc.h 2018-04-12 15:57:20.826694350 +0200
+@@ -68,19 +68,19 @@
+ }
+ #endif
+
+-extern void *vmalloc(unsigned long size);
+-extern void *vzalloc(unsigned long size);
+-extern void *vmalloc_user(unsigned long size);
+-extern void *vmalloc_node(unsigned long size, int node);
+-extern void *vzalloc_node(unsigned long size, int node);
+-extern void *vmalloc_exec(unsigned long size);
+-extern void *vmalloc_32(unsigned long size);
+-extern void *vmalloc_32_user(unsigned long size);
+-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
++extern void *vmalloc(unsigned long size) __attribute__((alloc_size(1)));
++extern void *vzalloc(unsigned long size) __attribute__((alloc_size(1)));
++extern void *vmalloc_user(unsigned long size) __attribute__((alloc_size(1)));
++extern void *vmalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
++extern void *vzalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
++extern void *vmalloc_exec(unsigned long size) __attribute__((alloc_size(1)));
++extern void *vmalloc_32(unsigned long size) __attribute__((alloc_size(1)));
++extern void *vmalloc_32_user(unsigned long size) __attribute__((alloc_size(1)));
++extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __attribute__((alloc_size(1)));
+ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
+ unsigned long start, unsigned long end, gfp_t gfp_mask,
+ pgprot_t prot, unsigned long vm_flags, int node,
+- const void *caller);
++ const void *caller) __attribute__((alloc_size(1)));
+ #ifndef CONFIG_MMU
+ extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
+ static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
+diff -Naur linux-4.16/init/Kconfig linux-4.16-p/init/Kconfig
+--- linux-4.16/init/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/init/Kconfig 2018-04-12 15:57:20.826694350 +0200
+@@ -296,6 +296,7 @@
+ config AUDIT
+ bool "Auditing support"
+ depends on NET
++ default y
+ help
+ Enable auditing infrastructure that can be used with another
+ kernel subsystem, such as SELinux (which requires this for
+@@ -1039,6 +1040,12 @@
+
+ endchoice
+
++config LOCAL_INIT
++ bool "Zero uninitialized locals"
++ help
++ Zero-fill uninitialized local variables, other than variable-length
++ arrays. Requires compiler support.
++
+ config SYSCTL
+ bool
+
+@@ -1296,8 +1303,7 @@
+ which may be appropriate on small systems without swap.
+
+ config AIO
+- bool "Enable AIO support" if EXPERT
+- default y
++ bool "Enable AIO support"
+ help
+ This option enables POSIX asynchronous I/O which may by used
+ by some high performance threaded applications. Disabling
+@@ -1502,7 +1508,7 @@
+
+ config SLUB_DEBUG
+ default y
+- bool "Enable SLUB debugging support" if EXPERT
++ bool "Enable SLUB debugging support"
+ depends on SLUB && SYSFS
+ help
+ SLUB has extensive debug support features. Disabling these can
+@@ -1526,7 +1532,6 @@
+
+ config COMPAT_BRK
+ bool "Disable heap randomization"
+- default y
+ help
+ Randomizing heap placement makes heap exploits harder, but it
+ also breaks ancient binaries (including anything libc5 based).
+@@ -1573,7 +1578,6 @@
+
+ config SLAB_MERGE_DEFAULT
+ bool "Allow slab caches to be merged"
+- default y
+ help
+ For reduced kernel memory fragmentation, slab caches can be
+ merged when they share the same size and other characteristics.
+@@ -1586,9 +1590,9 @@
+ command line.
+
+ config SLAB_FREELIST_RANDOM
+- default n
+ depends on SLAB || SLUB
+ bool "SLAB freelist randomization"
++ default y
+ help
+ Randomizes the freelist order used on creating new pages. This
+ security feature reduces the predictability of the kernel slab
+@@ -1597,12 +1601,56 @@
+ config SLAB_FREELIST_HARDENED
+ bool "Harden slab freelist metadata"
+ depends on SLUB
++ default y
+ help
+ Many kernel heap attacks try to target slab cache metadata and
+ other infrastructure. This options makes minor performance
+ sacrifies to harden the kernel slab allocator against common
+ freelist exploit methods.
+
++config SLAB_HARDENED
++ default y
++ depends on SLUB
++ bool "Hardened SLAB infrastructure"
++ help
++ Make minor performance sacrifices to harden the kernel slab
++ allocator.
++
++config SLAB_CANARY
++ depends on SLUB
++ depends on !SLAB_MERGE_DEFAULT
++ bool "SLAB canaries"
++ default y
++ help
++ Place canaries at the end of kernel slab allocations, sacrificing
++ some performance and memory usage for security.
++
++ Canaries can detect some forms of heap corruption when allocations
++ are freed and as part of the HARDENED_USERCOPY feature. It provides
++ basic use-after-free detection for HARDENED_USERCOPY.
++
++ Canaries absorb small overflows (rendering them harmless), mitigate
++ non-NUL terminated C string overflows on 64-bit via a guaranteed zero
++ byte and provide basic double-free detection.
++
++config SLAB_SANITIZE
++ bool "Sanitize SLAB allocations"
++ depends on SLUB
++ default y
++ help
++ Zero fill slab allocations on free, reducing the lifetime of
++ sensitive data and helping to mitigate use-after-free bugs.
++
++ For slabs with debug poisoning enabling, this has no impact.
++
++config SLAB_SANITIZE_VERIFY
++ depends on SLAB_SANITIZE && PAGE_SANITIZE
++ default y
++ bool "Verify sanitized SLAB allocations"
++ help
++ Verify that newly allocated slab allocations are zeroed to detect
++ write-after-free bugs.
++
+ config SLUB_CPU_PARTIAL
+ default y
+ depends on SLUB && SMP
+diff -Naur linux-4.16/kernel/audit.c linux-4.16-p/kernel/audit.c
+--- linux-4.16/kernel/audit.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/audit.c 2018-04-12 15:57:20.826694350 +0200
+@@ -1578,6 +1578,9 @@
+
+ if (audit_default == AUDIT_OFF)
+ audit_initialized = AUDIT_DISABLED;
++ else if (!audit_ever_enabled)
++ audit_initialized = AUDIT_UNINITIALIZED;
++
+ if (audit_set_enabled(audit_default))
+ panic("audit: error setting audit state (%d)\n", audit_default);
+
+diff -Naur linux-4.16/kernel/capability.c linux-4.16-p/kernel/capability.c
+--- linux-4.16/kernel/capability.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/capability.c 2018-04-12 15:57:20.826694350 +0200
+@@ -431,6 +431,12 @@
+ return ns_capable(&init_user_ns, cap);
+ }
+ EXPORT_SYMBOL(capable);
++
++bool capable_noaudit(int cap)
++{
++ return ns_capable_noaudit(&init_user_ns, cap);
++}
++EXPORT_SYMBOL(capable_noaudit);
+ #endif /* CONFIG_MULTIUSER */
+
+ /**
+diff -Naur linux-4.16/kernel/events/core.c linux-4.16-p/kernel/events/core.c
+--- linux-4.16/kernel/events/core.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/events/core.c 2018-04-12 15:57:20.828694350 +0200
+@@ -397,8 +397,13 @@
+ * 0 - disallow raw tracepoint access for unpriv
+ * 1 - disallow cpu events for unpriv
+ * 2 - disallow kernel profiling for unpriv
++ * 3 - disallow all unpriv perf event use
+ */
++#ifdef CONFIG_SECURITY_PERF_EVENTS_RESTRICT
++int sysctl_perf_event_paranoid __read_mostly = 3;
++#else
+ int sysctl_perf_event_paranoid __read_mostly = 2;
++#endif
+
+ /* Minimum for 512 kiB + 1 user control page */
+ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
+@@ -9915,6 +9920,9 @@
+ if (flags & ~PERF_FLAG_ALL)
+ return -EINVAL;
+
++ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
++ return -EACCES;
++
+ err = perf_copy_attr(attr_uptr, &attr);
+ if (err)
+ return err;
+diff -Naur linux-4.16/kernel/fork.c linux-4.16-p/kernel/fork.c
+--- linux-4.16/kernel/fork.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/fork.c 2018-04-12 15:57:20.828694350 +0200
+@@ -103,6 +103,11 @@
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/task.h>
++#ifdef CONFIG_USER_NS
++extern int unprivileged_userns_clone;
++#else
++#define unprivileged_userns_clone 0
++#endif
+
+ /*
+ * Minimum number of threads to boot the kernel
+@@ -1591,6 +1596,10 @@
+ if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
+ return ERR_PTR(-EINVAL);
+
++ if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone)
++ if (!capable(CAP_SYS_ADMIN))
++ return ERR_PTR(-EPERM);
++
+ /*
+ * Thread groups must share signals as well, and detached threads
+ * can only be started up within the thread group.
+@@ -2385,6 +2394,12 @@
+ if (unshare_flags & CLONE_NEWNS)
+ unshare_flags |= CLONE_FS;
+
++ if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) {
++ err = -EPERM;
++ if (!capable(CAP_SYS_ADMIN))
++ goto bad_unshare_out;
++ }
++
+ err = check_unshare_flags(unshare_flags);
+ if (err)
+ goto bad_unshare_out;
+diff -Naur linux-4.16/kernel/power/snapshot.c linux-4.16-p/kernel/power/snapshot.c
+--- linux-4.16/kernel/power/snapshot.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/power/snapshot.c 2018-04-12 15:57:20.828694350 +0200
+@@ -1138,7 +1138,7 @@
+
+ void clear_free_pages(void)
+ {
+-#ifdef CONFIG_PAGE_POISONING_ZERO
++#if defined(CONFIG_PAGE_POISONING_ZERO) || defined(CONFIG_PAGE_SANITIZE)
+ struct memory_bitmap *bm = free_pages_map;
+ unsigned long pfn;
+
+@@ -1155,7 +1155,7 @@
+ }
+ memory_bm_position_reset(bm);
+ pr_info("free pages cleared after restore\n");
+-#endif /* PAGE_POISONING_ZERO */
++#endif /* PAGE_POISONING_ZERO || PAGE_SANITIZE */
+ }
+
+ /**
+diff -Naur linux-4.16/kernel/rcu/tiny.c linux-4.16-p/kernel/rcu/tiny.c
+--- linux-4.16/kernel/rcu/tiny.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/rcu/tiny.c 2018-04-12 15:57:20.829694349 +0200
+@@ -164,7 +164,7 @@
+ }
+ }
+
+-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
++static __latent_entropy void rcu_process_callbacks(void)
+ {
+ __rcu_process_callbacks(&rcu_sched_ctrlblk);
+ __rcu_process_callbacks(&rcu_bh_ctrlblk);
+diff -Naur linux-4.16/kernel/rcu/tree.c linux-4.16-p/kernel/rcu/tree.c
+--- linux-4.16/kernel/rcu/tree.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/rcu/tree.c 2018-04-12 15:57:20.829694349 +0200
+@@ -2906,7 +2906,7 @@
+ /*
+ * Do RCU core processing for the current CPU.
+ */
+-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
++static __latent_entropy void rcu_process_callbacks(void)
+ {
+ struct rcu_state *rsp;
+
+diff -Naur linux-4.16/kernel/sched/fair.c linux-4.16-p/kernel/sched/fair.c
+--- linux-4.16/kernel/sched/fair.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/sched/fair.c 2018-04-12 15:57:20.830694349 +0200
+@@ -9387,7 +9387,7 @@
+ * run_rebalance_domains is triggered when needed from the scheduler tick.
+ * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
+ */
+-static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
++static __latent_entropy void run_rebalance_domains(void)
+ {
+ struct rq *this_rq = this_rq();
+ enum cpu_idle_type idle = this_rq->idle_balance ?
+diff -Naur linux-4.16/kernel/softirq.c linux-4.16-p/kernel/softirq.c
+--- linux-4.16/kernel/softirq.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/softirq.c 2018-04-12 15:57:20.830694349 +0200
+@@ -53,7 +53,7 @@
+ EXPORT_SYMBOL(irq_stat);
+ #endif
+
+-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
++static struct softirq_action softirq_vec[NR_SOFTIRQS] __ro_after_init __aligned(PAGE_SIZE);
+
+ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+
+@@ -282,7 +282,7 @@
+ kstat_incr_softirqs_this_cpu(vec_nr);
+
+ trace_softirq_entry(vec_nr);
+- h->action(h);
++ h->action();
+ trace_softirq_exit(vec_nr);
+ if (unlikely(prev_count != preempt_count())) {
+ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
+@@ -444,7 +444,7 @@
+ or_softirq_pending(1UL << nr);
+ }
+
+-void open_softirq(int nr, void (*action)(struct softirq_action *))
++void __init open_softirq(int nr, void (*action)(void))
+ {
+ softirq_vec[nr].action = action;
+ }
+@@ -486,7 +486,7 @@
+ }
+ EXPORT_SYMBOL(__tasklet_hi_schedule);
+
+-static __latent_entropy void tasklet_action(struct softirq_action *a)
++static __latent_entropy void tasklet_action(void)
+ {
+ struct tasklet_struct *list;
+
+@@ -522,7 +522,7 @@
+ }
+ }
+
+-static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
++static __latent_entropy void tasklet_hi_action(void)
+ {
+ struct tasklet_struct *list;
+
+diff -Naur linux-4.16/kernel/sysctl.c linux-4.16-p/kernel/sysctl.c
+--- linux-4.16/kernel/sysctl.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/sysctl.c 2018-04-12 15:57:20.831694349 +0200
+@@ -67,6 +67,7 @@
+ #include <linux/bpf.h>
+ #include <linux/mount.h>
+ #include <linux/pipe_fs_i.h>
++#include <linux/tty.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/processor.h>
+@@ -99,12 +100,19 @@
+ #if defined(CONFIG_SYSCTL)
+
+ /* External variables not in a header file. */
++#if IS_ENABLED(CONFIG_USB)
++int deny_new_usb __read_mostly = 0;
++EXPORT_SYMBOL(deny_new_usb);
++#endif
+ extern int suid_dumpable;
+ #ifdef CONFIG_COREDUMP
+ extern int core_uses_pid;
+ extern char core_pattern[];
+ extern unsigned int core_pipe_limit;
+ #endif
++#ifdef CONFIG_USER_NS
++extern int unprivileged_userns_clone;
++#endif
+ extern int pid_max;
+ extern int pid_max_min, pid_max_max;
+ extern int percpu_pagelist_fraction;
+@@ -116,40 +124,43 @@
+
+ /* Constants used for minimum and maximum */
+ #ifdef CONFIG_LOCKUP_DETECTOR
+-static int sixty = 60;
++static int sixty __read_only = 60;
+ #endif
+
+-static int __maybe_unused neg_one = -1;
++static int __maybe_unused neg_one __read_only = -1;
+
+ static int zero;
+-static int __maybe_unused one = 1;
+-static int __maybe_unused two = 2;
+-static int __maybe_unused four = 4;
+-static unsigned long one_ul = 1;
+-static int one_hundred = 100;
+-static int one_thousand = 1000;
++static int __maybe_unused one __read_only = 1;
++static int __maybe_unused two __read_only = 2;
++static int __maybe_unused four __read_only = 4;
++static unsigned long one_ul __read_only = 1;
++static int one_hundred __read_only = 100;
++static int one_thousand __read_only = 1000;
+ #ifdef CONFIG_PRINTK
+-static int ten_thousand = 10000;
++static int ten_thousand __read_only = 10000;
+ #endif
+ #ifdef CONFIG_PERF_EVENTS
+-static int six_hundred_forty_kb = 640 * 1024;
++static int six_hundred_forty_kb __read_only = 640 * 1024;
+ #endif
+
+ /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
+-static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
++static unsigned long dirty_bytes_min __read_only = 2 * PAGE_SIZE;
+
+ /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
+-static int maxolduid = 65535;
+-static int minolduid;
++static int maxolduid __read_only = 65535;
++static int minolduid __read_only;
+
+-static int ngroups_max = NGROUPS_MAX;
++static int ngroups_max __read_only = NGROUPS_MAX;
+ static const int cap_last_cap = CAP_LAST_CAP;
+
+ /*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */
+ #ifdef CONFIG_DETECT_HUNG_TASK
+-static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
++static unsigned long hung_task_timeout_max __read_only = (LONG_MAX/HZ);
+ #endif
+
++int device_sidechannel_restrict __read_mostly = 1;
++EXPORT_SYMBOL(device_sidechannel_restrict);
++
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
+@@ -289,19 +300,19 @@
+ };
+
+ #ifdef CONFIG_SCHED_DEBUG
+-static int min_sched_granularity_ns = 100000; /* 100 usecs */
+-static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
+-static int min_wakeup_granularity_ns; /* 0 usecs */
+-static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
++static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */
++static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
++static int min_wakeup_granularity_ns __read_only; /* 0 usecs */
++static int max_wakeup_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
+ #ifdef CONFIG_SMP
+-static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
+-static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
++static int min_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_NONE;
++static int max_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_END-1;
+ #endif /* CONFIG_SMP */
+ #endif /* CONFIG_SCHED_DEBUG */
+
+ #ifdef CONFIG_COMPACTION
+-static int min_extfrag_threshold;
+-static int max_extfrag_threshold = 1000;
++static int min_extfrag_threshold __read_only;
++static int max_extfrag_threshold __read_only = 1000;
+ #endif
+
+ static struct ctl_table kern_table[] = {
+@@ -515,6 +526,15 @@
+ .proc_handler = proc_dointvec,
+ },
+ #endif
++#ifdef CONFIG_USER_NS
++ {
++ .procname = "unprivileged_userns_clone",
++ .data = &unprivileged_userns_clone,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++#endif
+ #ifdef CONFIG_PROC_SYSCTL
+ {
+ .procname = "tainted",
+@@ -857,6 +877,37 @@
+ .extra2 = &two,
+ },
+ #endif
++#if defined CONFIG_TTY
++ {
++ .procname = "tiocsti_restrict",
++ .data = &tiocsti_restrict,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax_sysadmin,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++#endif
++ {
++ .procname = "device_sidechannel_restrict",
++ .data = &device_sidechannel_restrict,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax_sysadmin,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++#if IS_ENABLED(CONFIG_USB)
++ {
++ .procname = "deny_new_usb",
++ .data = &deny_new_usb,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax_sysadmin,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++#endif
+ {
+ .procname = "ngroups_max",
+ .data = &ngroups_max,
+diff -Naur linux-4.16/kernel/time/hrtimer.c linux-4.16-p/kernel/time/hrtimer.c
+--- linux-4.16/kernel/time/hrtimer.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/time/hrtimer.c 2018-04-12 15:57:40.443687638 +0200
+@@ -1413,7 +1413,7 @@
+ }
+ }
+
+-static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
++static __latent_entropy void hrtimer_run_softirq(void)
+ {
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ unsigned long flags;
+diff -Naur linux-4.16/kernel/time/timer.c linux-4.16-p/kernel/time/timer.c
+--- linux-4.16/kernel/time/timer.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/time/timer.c 2018-04-12 15:57:20.831694349 +0200
+@@ -1672,7 +1672,7 @@
+ /*
+ * This function runs timers and the timer-tq in bottom half context.
+ */
+-static __latent_entropy void run_timer_softirq(struct softirq_action *h)
++static __latent_entropy void run_timer_softirq(void)
+ {
+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
+diff -Naur linux-4.16/kernel/user_namespace.c linux-4.16-p/kernel/user_namespace.c
+--- linux-4.16/kernel/user_namespace.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/kernel/user_namespace.c 2018-04-12 15:57:20.831694349 +0200
+@@ -26,6 +26,9 @@
+ #include <linux/bsearch.h>
+ #include <linux/sort.h>
+
++/* sysctl */
++int unprivileged_userns_clone;
++
+ static struct kmem_cache *user_ns_cachep __read_mostly;
+ static DEFINE_MUTEX(userns_state_mutex);
+
+diff -Naur linux-4.16/lib/irq_poll.c linux-4.16-p/lib/irq_poll.c
+--- linux-4.16/lib/irq_poll.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/lib/irq_poll.c 2018-04-12 15:57:20.831694349 +0200
+@@ -75,7 +75,7 @@
+ }
+ EXPORT_SYMBOL(irq_poll_complete);
+
+-static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
++static void __latent_entropy irq_poll_softirq(void)
+ {
+ struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
+ int rearm = 0, budget = irq_poll_budget;
+diff -Naur linux-4.16/lib/Kconfig.debug linux-4.16-p/lib/Kconfig.debug
+--- linux-4.16/lib/Kconfig.debug 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/lib/Kconfig.debug 2018-04-12 15:57:20.832694348 +0200
+@@ -945,6 +945,7 @@
+
+ config PANIC_ON_OOPS
+ bool "Panic on Oops"
++ default y
+ help
+ Say Y here to enable the kernel to panic when it oopses. This
+ has the same effect as setting oops=panic on the kernel command
+@@ -954,7 +955,7 @@
+ anything erroneous after an oops which could result in data
+ corruption or other issues.
+
+- Say N if unsure.
++ Say Y if unsure.
+
+ config PANIC_ON_OOPS_VALUE
+ int
+@@ -1309,6 +1310,7 @@
+ config DEBUG_LIST
+ bool "Debug linked list manipulation"
+ depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION
++ default y
+ help
+ Enable this to turn on extended checks in the linked-list
+ walking routines.
+@@ -1949,6 +1951,7 @@
+ config BUG_ON_DATA_CORRUPTION
+ bool "Trigger a BUG when data corruption is detected"
+ select DEBUG_LIST
++ default y
+ help
+ Select this option if the kernel should BUG when it encounters
+ data corruption in kernel memory structures when they get checked
+@@ -1988,6 +1991,7 @@
+ config IO_STRICT_DEVMEM
+ bool "Filter I/O access to /dev/mem"
+ depends on STRICT_DEVMEM
++ default y
+ ---help---
+ If this option is disabled, you allow userspace (root) access to all
+ io-memory regardless of whether a driver is actively using that
+diff -Naur linux-4.16/lib/kobject.c linux-4.16-p/lib/kobject.c
+--- linux-4.16/lib/kobject.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/lib/kobject.c 2018-04-12 15:57:20.832694348 +0200
+@@ -956,9 +956,9 @@
+
+
+ static DEFINE_SPINLOCK(kobj_ns_type_lock);
+-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
++static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __ro_after_init;
+
+-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
++int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
+ {
+ enum kobj_ns_type type = ops->type;
+ int error;
+diff -Naur linux-4.16/lib/nlattr.c linux-4.16-p/lib/nlattr.c
+--- linux-4.16/lib/nlattr.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/lib/nlattr.c 2018-04-12 15:57:20.832694348 +0200
+@@ -364,6 +364,8 @@
+ {
+ int minlen = min_t(int, count, nla_len(src));
+
++ BUG_ON(minlen < 0);
++
+ memcpy(dest, nla_data(src), minlen);
+ if (count > minlen)
+ memset(dest + minlen, 0, count - minlen);
+diff -Naur linux-4.16/lib/vsprintf.c linux-4.16-p/lib/vsprintf.c
+--- linux-4.16/lib/vsprintf.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/lib/vsprintf.c 2018-04-12 15:57:20.832694348 +0200
+@@ -1344,7 +1344,7 @@
+ return string(buf, end, uuid, spec);
+ }
+
+-int kptr_restrict __read_mostly;
++int kptr_restrict __read_mostly = 2;
+
+ static noinline_for_stack
+ char *restricted_pointer(char *buf, char *end, const void *ptr,
+diff -Naur linux-4.16/Makefile linux-4.16-p/Makefile
+--- linux-4.16/Makefile 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/Makefile 2018-04-12 15:57:20.833694348 +0200
+@@ -734,6 +734,9 @@
+ endif
+
+ ifeq ($(cc-name),clang)
++ifdef CONFIG_LOCAL_INIT
++KBUILD_CFLAGS += -fsanitize=local-init
++endif
+ KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
+ KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
+ KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+diff -Naur linux-4.16/mm/Kconfig linux-4.16-p/mm/Kconfig
+--- linux-4.16/mm/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/Kconfig 2018-04-12 15:57:20.833694348 +0200
+@@ -319,7 +319,8 @@
+ config DEFAULT_MMAP_MIN_ADDR
+ int "Low address space to protect from user allocation"
+ depends on MMU
+- default 4096
++ default 32768 if ARM || (ARM64 && COMPAT)
++ default 65536
+ help
+ This is the portion of low virtual memory which should be protected
+ from userspace allocation. Keeping a user from writing to low pages
+diff -Naur linux-4.16/mm/mmap.c linux-4.16-p/mm/mmap.c
+--- linux-4.16/mm/mmap.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/mmap.c 2018-04-12 15:57:20.833694348 +0200
+@@ -220,6 +220,13 @@
+
+ newbrk = PAGE_ALIGN(brk);
+ oldbrk = PAGE_ALIGN(mm->brk);
++ /* properly handle unaligned min_brk as an empty heap */
++ if (min_brk & ~PAGE_MASK) {
++ if (brk == min_brk)
++ newbrk -= PAGE_SIZE;
++ if (mm->brk == min_brk)
++ oldbrk -= PAGE_SIZE;
++ }
+ if (oldbrk == newbrk)
+ goto set_brk;
+
+diff -Naur linux-4.16/mm/page_alloc.c linux-4.16-p/mm/page_alloc.c
+--- linux-4.16/mm/page_alloc.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/page_alloc.c 2018-04-12 15:57:20.834694348 +0200
+@@ -68,6 +68,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/lockdep.h>
+ #include <linux/nmi.h>
++#include <linux/random.h>
+
+ #include <asm/sections.h>
+ #include <asm/tlbflush.h>
+@@ -101,6 +102,15 @@
+ DEFINE_MUTEX(pcpu_drain_mutex);
+ DEFINE_PER_CPU(struct work_struct, pcpu_drain);
+
++bool __meminitdata extra_latent_entropy;
++
++static int __init setup_extra_latent_entropy(char *str)
++{
++ extra_latent_entropy = true;
++ return 0;
++}
++early_param("extra_latent_entropy", setup_extra_latent_entropy);
++
+ #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
+ volatile unsigned long latent_entropy __latent_entropy;
+ EXPORT_SYMBOL(latent_entropy);
+@@ -1069,6 +1079,13 @@
+ debug_check_no_obj_freed(page_address(page),
+ PAGE_SIZE << order);
+ }
++
++ if (IS_ENABLED(CONFIG_PAGE_SANITIZE)) {
++ int i;
++ for (i = 0; i < (1 << order); i++)
++ clear_highpage(page + i);
++ }
++
+ arch_free_page(page, order);
+ kernel_poison_pages(page, 1 << order, 0);
+ kernel_map_pages(page, 1 << order, 0);
+@@ -1286,6 +1303,21 @@
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
+
++ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
++ unsigned long hash = 0;
++ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
++ const unsigned long *data = lowmem_page_address(page);
++
++ for (index = 0; index < end; index++)
++ hash ^= hash + data[index];
++#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
++ latent_entropy ^= hash;
++ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
++#else
++ add_device_randomness((const void *)&hash, sizeof(hash));
++#endif
++ }
++
+ page_zone(page)->managed_pages += nr_pages;
+ set_page_refcounted(page);
+ __free_pages(page, order);
+@@ -1754,8 +1786,8 @@
+
+ static inline bool free_pages_prezeroed(void)
+ {
+- return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
+- page_poisoning_enabled();
++ return IS_ENABLED(CONFIG_PAGE_SANITIZE) ||
++ (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && page_poisoning_enabled());
+ }
+
+ #ifdef CONFIG_DEBUG_VM
+@@ -1812,6 +1844,11 @@
+
+ post_alloc_hook(page, order, gfp_flags);
+
++ if (IS_ENABLED(CONFIG_PAGE_SANITIZE_VERIFY)) {
++ for (i = 0; i < (1 << order); i++)
++ verify_zero_highpage(page + i);
++ }
++
+ if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
+ for (i = 0; i < (1 << order); i++)
+ clear_highpage(page + i);
+diff -Naur linux-4.16/mm/slab_common.c linux-4.16-p/mm/slab_common.c
+--- linux-4.16/mm/slab_common.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/slab_common.c 2018-04-12 15:57:20.834694348 +0200
+@@ -26,10 +26,10 @@
+
+ #include "slab.h"
+
+-enum slab_state slab_state;
++enum slab_state slab_state __ro_after_init;
+ LIST_HEAD(slab_caches);
+ DEFINE_MUTEX(slab_mutex);
+-struct kmem_cache *kmem_cache;
++struct kmem_cache *kmem_cache __ro_after_init;
+
+ #ifdef CONFIG_HARDENED_USERCOPY
+ bool usercopy_fallback __ro_after_init =
+@@ -57,7 +57,7 @@
+ /*
+ * Merge control. If this is set then no merging of slab caches will occur.
+ */
+-static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
++static bool slab_nomerge __ro_after_init = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
+
+ static int __init setup_slab_nomerge(char *str)
+ {
+@@ -968,7 +968,7 @@
+ * of two cache sizes there. The size of larger slabs can be determined using
+ * fls.
+ */
+-static s8 size_index[24] = {
++static s8 size_index[24] __ro_after_init = {
+ 3, /* 8 */
+ 4, /* 16 */
+ 5, /* 24 */
+diff -Naur linux-4.16/mm/slab.h linux-4.16-p/mm/slab.h
+--- linux-4.16/mm/slab.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/slab.h 2018-04-12 15:57:20.835694347 +0200
+@@ -312,7 +312,11 @@
+ static inline bool slab_equal_or_root(struct kmem_cache *s,
+ struct kmem_cache *p)
+ {
++#ifdef CONFIG_SLAB_HARDENED
++ return p == s;
++#else
+ return true;
++#endif
+ }
+
+ static inline const char *cache_name(struct kmem_cache *s)
+@@ -364,18 +368,26 @@
+ * to not do even the assignment. In that case, slab_equal_or_root
+ * will also be a constant.
+ */
+- if (!memcg_kmem_enabled() &&
++ if (!IS_ENABLED(CONFIG_SLAB_HARDENED) &&
++ !memcg_kmem_enabled() &&
+ !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
+ return s;
+
+ page = virt_to_head_page(x);
++#ifdef CONFIG_SLAB_HARDENED
++ BUG_ON(!PageSlab(page));
++#endif
+ cachep = page->slab_cache;
+ if (slab_equal_or_root(cachep, s))
+ return cachep;
+
+ pr_err("%s: Wrong slab cache. %s but object is from %s\n",
+ __func__, s->name, cachep->name);
++#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
++ BUG_ON(1);
++#else
+ WARN_ON_ONCE(1);
++#endif
+ return s;
+ }
+
+@@ -400,7 +412,7 @@
+ * back there or track user information then we can
+ * only use the space before that information.
+ */
+- if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
++ if ((s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) || IS_ENABLED(CONFIG_SLAB_CANARY))
+ return s->inuse;
+ /*
+ * Else we can use all the padding etc for the allocation
+diff -Naur linux-4.16/mm/slub.c linux-4.16-p/mm/slub.c
+--- linux-4.16/mm/slub.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/slub.c 2018-04-12 15:57:20.835694347 +0200
+@@ -125,6 +125,16 @@
+ #endif
+ }
+
++static inline bool has_sanitize(struct kmem_cache *s)
++{
++ return IS_ENABLED(CONFIG_SLAB_SANITIZE) && !(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON));
++}
++
++static inline bool has_sanitize_verify(struct kmem_cache *s)
++{
++ return IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && has_sanitize(s);
++}
++
+ void *fixup_red_left(struct kmem_cache *s, void *p)
+ {
+ if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
+@@ -299,6 +309,35 @@
+ *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
+ }
+
++#ifdef CONFIG_SLAB_CANARY
++static inline unsigned long *get_canary(struct kmem_cache *s, void *object)
++{
++ if (s->offset)
++ return object + s->offset + sizeof(void *);
++ return object + s->inuse;
++}
++
++static inline unsigned long get_canary_value(const void *canary, unsigned long value)
++{
++ return (value ^ (unsigned long)canary) & CANARY_MASK;
++}
++
++static inline void set_canary(struct kmem_cache *s, void *object, unsigned long value)
++{
++ unsigned long *canary = get_canary(s, object);
++ *canary = get_canary_value(canary, value);
++}
++
++static inline void check_canary(struct kmem_cache *s, void *object, unsigned long value)
++{
++ unsigned long *canary = get_canary(s, object);
++ BUG_ON(*canary != get_canary_value(canary, value));
++}
++#else
++#define set_canary(s, object, value)
++#define check_canary(s, object, value)
++#endif
++
+ /* Loop over all objects in a slab */
+ #define for_each_object(__p, __s, __addr, __objects) \
+ for (__p = fixup_red_left(__s, __addr); \
+@@ -486,13 +525,13 @@
+ * Debug settings:
+ */
+ #if defined(CONFIG_SLUB_DEBUG_ON)
+-static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
++static slab_flags_t slub_debug __ro_after_init = DEBUG_DEFAULT_FLAGS;
+ #else
+-static slab_flags_t slub_debug;
++static slab_flags_t slub_debug __ro_after_init;
+ #endif
+
+-static char *slub_debug_slabs;
+-static int disable_higher_order_debug;
++static char *slub_debug_slabs __ro_after_init;
++static int disable_higher_order_debug __ro_after_init;
+
+ /*
+ * slub is about to manipulate internal object metadata. This memory lies
+@@ -552,6 +591,9 @@
+ else
+ p = object + s->inuse;
+
++ if (IS_ENABLED(CONFIG_SLAB_CANARY))
++ p = (void *)p + sizeof(void *);
++
+ return p + alloc;
+ }
+
+@@ -690,6 +732,9 @@
+ else
+ off = s->inuse;
+
++ if (IS_ENABLED(CONFIG_SLAB_CANARY))
++ off += sizeof(void *);
++
+ if (s->flags & SLAB_STORE_USER)
+ off += 2 * sizeof(struct track);
+
+@@ -819,6 +864,9 @@
+ /* Freepointer is placed after the object. */
+ off += sizeof(void *);
+
++ if (IS_ENABLED(CONFIG_SLAB_CANARY))
++ off += sizeof(void *);
++
+ if (s->flags & SLAB_STORE_USER)
+ /* We also have user information there */
+ off += 2 * sizeof(struct track);
+@@ -1420,8 +1468,9 @@
+ void *object)
+ {
+ setup_object_debug(s, page, object);
++ set_canary(s, object, s->random_inactive);
+ kasan_init_slab_obj(s, object);
+- if (unlikely(s->ctor)) {
++ if (unlikely(s->ctor) && !has_sanitize_verify(s)) {
+ kasan_unpoison_object_data(s, object);
+ s->ctor(object);
+ kasan_poison_object_data(s, object);
+@@ -2719,9 +2768,21 @@
+ stat(s, ALLOC_FASTPATH);
+ }
+
+- if (unlikely(gfpflags & __GFP_ZERO) && object)
++ if (has_sanitize_verify(s) && object) {
++ size_t offset = s->offset ? 0 : sizeof(void *);
++ BUG_ON(memchr_inv(object + offset, 0, s->object_size - offset));
++ if (s->ctor)
++ s->ctor(object);
++ if (unlikely(gfpflags & __GFP_ZERO) && offset)
++ memset(object, 0, sizeof(void *));
++ } else if (unlikely(gfpflags & __GFP_ZERO) && object)
+ memset(object, 0, s->object_size);
+
++ if (object) {
++ check_canary(s, object, s->random_inactive);
++ set_canary(s, object, s->random_active);
++ }
++
+ slab_post_alloc_hook(s, gfpflags, 1, &object);
+
+ return object;
+@@ -2928,6 +2989,27 @@
+ void *tail_obj = tail ? : head;
+ struct kmem_cache_cpu *c;
+ unsigned long tid;
++ bool sanitize = has_sanitize(s);
++
++ if (IS_ENABLED(CONFIG_SLAB_CANARY) || sanitize) {
++ __maybe_unused int offset = s->offset ? 0 : sizeof(void *);
++ void *x = head;
++
++ while (1) {
++ check_canary(s, x, s->random_active);
++ set_canary(s, x, s->random_inactive);
++
++ if (sanitize) {
++ memset(x + offset, 0, s->object_size - offset);
++ if (!IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && s->ctor)
++ s->ctor(x);
++ }
++ if (x == tail_obj)
++ break;
++ x = get_freepointer(s, x);
++ }
++ }
++
+ redo:
+ /*
+ * Determine the currently cpus per cpu slab.
+@@ -3106,7 +3188,7 @@
+ void **p)
+ {
+ struct kmem_cache_cpu *c;
+- int i;
++ int i, k;
+
+ /* memcg and kmem_cache debug support */
+ s = slab_pre_alloc_hook(s, flags);
+@@ -3143,13 +3225,29 @@
+ local_irq_enable();
+
+ /* Clear memory outside IRQ disabled fastpath loop */
+- if (unlikely(flags & __GFP_ZERO)) {
++ if (has_sanitize_verify(s)) {
++ int j;
++
++ for (j = 0; j < i; j++) {
++ size_t offset = s->offset ? 0 : sizeof(void *);
++ BUG_ON(memchr_inv(p[j] + offset, 0, s->object_size - offset));
++ if (s->ctor)
++ s->ctor(p[j]);
++ if (unlikely(flags & __GFP_ZERO) && offset)
++ memset(p[j], 0, sizeof(void *));
++ }
++ } else if (unlikely(flags & __GFP_ZERO)) {
+ int j;
+
+ for (j = 0; j < i; j++)
+ memset(p[j], 0, s->object_size);
+ }
+
++ for (k = 0; k < i; k++) {
++ check_canary(s, p[k], s->random_inactive);
++ set_canary(s, p[k], s->random_active);
++ }
++
+ /* memcg and kmem_cache debug support */
+ slab_post_alloc_hook(s, flags, size, p);
+ return i;
+@@ -3181,9 +3279,9 @@
+ * and increases the number of allocations possible without having to
+ * take the list_lock.
+ */
+-static int slub_min_order;
+-static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
+-static int slub_min_objects;
++static int slub_min_order __ro_after_init;
++static int slub_max_order __ro_after_init = PAGE_ALLOC_COSTLY_ORDER;
++static int slub_min_objects __ro_after_init;
+
+ /*
+ * Calculate the order of allocation given an slab object size.
+@@ -3353,6 +3451,7 @@
+ init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
+ init_tracking(kmem_cache_node, n);
+ #endif
++ set_canary(kmem_cache_node, n, kmem_cache_node->random_active);
+ kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
+ GFP_KERNEL);
+ init_kmem_cache_node(n);
+@@ -3509,6 +3608,9 @@
+ size += sizeof(void *);
+ }
+
++ if (IS_ENABLED(CONFIG_SLAB_CANARY))
++ size += sizeof(void *);
++
+ #ifdef CONFIG_SLUB_DEBUG
+ if (flags & SLAB_STORE_USER)
+ /*
+@@ -3579,6 +3681,10 @@
+ #ifdef CONFIG_SLAB_FREELIST_HARDENED
+ s->random = get_random_long();
+ #endif
++#ifdef CONFIG_SLAB_CANARY
++ s->random_active = get_random_long();
++ s->random_inactive = get_random_long();
++#endif
+
+ if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
+ s->reserved = sizeof(struct rcu_head);
+@@ -3846,6 +3952,8 @@
+ offset -= s->red_left_pad;
+ }
+
++ check_canary(s, (void *)ptr - offset, s->random_active);
++
+ /* Allow address range falling entirely within usercopy region. */
+ if (offset >= s->useroffset &&
+ offset - s->useroffset <= s->usersize &&
+@@ -3879,7 +3987,11 @@
+ page = virt_to_head_page(object);
+
+ if (unlikely(!PageSlab(page))) {
++#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
++ BUG_ON(!PageCompound(page));
++#else
+ WARN_ON(!PageCompound(page));
++#endif
+ return PAGE_SIZE << compound_order(page);
+ }
+
+@@ -4744,7 +4856,7 @@
+ #define SO_TOTAL (1 << SL_TOTAL)
+
+ #ifdef CONFIG_MEMCG
+-static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
++static bool memcg_sysfs_enabled __ro_after_init = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
+
+ static int __init setup_slub_memcg_sysfs(char *str)
+ {
+diff -Naur linux-4.16/mm/swap.c linux-4.16-p/mm/swap.c
+--- linux-4.16/mm/swap.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/mm/swap.c 2018-04-12 15:57:20.836694347 +0200
+@@ -92,6 +92,13 @@
+ if (!PageHuge(page))
+ __page_cache_release(page);
+ dtor = get_compound_page_dtor(page);
++ if (!PageHuge(page))
++ BUG_ON(dtor != free_compound_page
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++ && dtor != free_transhuge_page
++#endif
++ );
++
+ (*dtor)(page);
+ }
+
+diff -Naur linux-4.16/net/core/dev.c linux-4.16-p/net/core/dev.c
+--- linux-4.16/net/core/dev.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/net/core/dev.c 2018-04-12 15:57:20.837694346 +0200
+@@ -4196,7 +4196,7 @@
+ }
+ EXPORT_SYMBOL(netif_rx_ni);
+
+-static __latent_entropy void net_tx_action(struct softirq_action *h)
++static __latent_entropy void net_tx_action(void)
+ {
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
+@@ -5745,7 +5745,7 @@
+ return work;
+ }
+
+-static __latent_entropy void net_rx_action(struct softirq_action *h)
++static __latent_entropy void net_rx_action(void)
+ {
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+ unsigned long time_limit = jiffies +
+diff -Naur linux-4.16/net/ipv4/Kconfig linux-4.16-p/net/ipv4/Kconfig
+--- linux-4.16/net/ipv4/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/net/ipv4/Kconfig 2018-04-12 15:57:20.837694346 +0200
+@@ -261,6 +261,7 @@
+
+ config SYN_COOKIES
+ bool "IP: TCP syncookie support"
++ default y
+ ---help---
+ Normal TCP/IP networking is open to an attack known as "SYN
+ flooding". This denial-of-service attack prevents legitimate remote
+diff -Naur linux-4.16/scripts/mod/modpost.c linux-4.16-p/scripts/mod/modpost.c
+--- linux-4.16/scripts/mod/modpost.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/scripts/mod/modpost.c 2018-04-12 15:57:20.837694346 +0200
+@@ -37,6 +37,7 @@
+ static int warn_unresolved = 0;
+ /* How a symbol is exported */
+ static int sec_mismatch_count = 0;
++static int writable_fptr_count = 0;
+ static int sec_mismatch_verbose = 1;
+ static int sec_mismatch_fatal = 0;
+ /* ignore missing files */
+@@ -965,6 +966,7 @@
+ ANY_EXIT_TO_ANY_INIT,
+ EXPORT_TO_INIT_EXIT,
+ EXTABLE_TO_NON_TEXT,
++ DATA_TO_TEXT
+ };
+
+ /**
+@@ -1091,6 +1093,12 @@
+ .good_tosec = {ALL_TEXT_SECTIONS , NULL},
+ .mismatch = EXTABLE_TO_NON_TEXT,
+ .handler = extable_mismatch_handler,
++},
++/* Do not reference code from writable data */
++{
++ .fromsec = { DATA_SECTIONS, NULL },
++ .bad_tosec = { ALL_TEXT_SECTIONS, NULL },
++ .mismatch = DATA_TO_TEXT
+ }
+ };
+
+@@ -1240,10 +1248,10 @@
+ continue;
+ if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
+ continue;
+- if (sym->st_value == addr)
+- return sym;
+ /* Find a symbol nearby - addr are maybe negative */
+ d = sym->st_value - addr;
++ if (d == 0)
++ return sym;
+ if (d < 0)
+ d = addr - sym->st_value;
+ if (d < distance) {
+@@ -1402,7 +1410,11 @@
+ char *prl_from;
+ char *prl_to;
+
+- sec_mismatch_count++;
++ if (mismatch->mismatch == DATA_TO_TEXT)
++ writable_fptr_count++;
++ else
++ sec_mismatch_count++;
++
+ if (!sec_mismatch_verbose)
+ return;
+
+@@ -1526,6 +1538,14 @@
+ fatal("There's a special handler for this mismatch type, "
+ "we should never get here.");
+ break;
++ case DATA_TO_TEXT:
++#if 0
++ fprintf(stderr,
++ "The %s %s:%s references\n"
++ "the %s %s:%s%s\n",
++ from, fromsec, fromsym, to, tosec, tosym, to_p);
++#endif
++ break;
+ }
+ fprintf(stderr, "\n");
+ }
+@@ -2539,6 +2559,14 @@
+ }
+ }
+ free(buf.p);
++ if (writable_fptr_count) {
++ if (!sec_mismatch_verbose) {
++ warn("modpost: Found %d writable function pointer(s).\n"
++ "To see full details build your kernel with:\n"
++ "'make CONFIG_DEBUG_SECTION_MISMATCH=y'\n",
++ writable_fptr_count);
++ }
++ }
+
+ return err;
+ }
+diff -Naur linux-4.16/security/Kconfig linux-4.16-p/security/Kconfig
+--- linux-4.16/security/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/security/Kconfig 2018-04-12 15:57:20.837694346 +0200
+@@ -8,7 +8,7 @@
+
+ config SECURITY_DMESG_RESTRICT
+ bool "Restrict unprivileged access to the kernel syslog"
+- default n
++ default y
+ help
+ This enforces restrictions on unprivileged users reading the kernel
+ syslog via dmesg(8).
+@@ -18,10 +18,34 @@
+
+ If you are unsure how to answer this question, answer N.
+
++config SECURITY_PERF_EVENTS_RESTRICT
++ bool "Restrict unprivileged use of performance events"
++ depends on PERF_EVENTS
++ default y
++ help
++ If you say Y here, the kernel.perf_event_paranoid sysctl
++ will be set to 3 by default, and no unprivileged use of the
++ perf_event_open syscall will be permitted unless it is
++ changed.
++
++config SECURITY_TIOCSTI_RESTRICT
++ bool "Restrict unprivileged use of tiocsti command injection"
++ default y
++ help
++ This enforces restrictions on unprivileged users injecting commands
++ into other processes which share a tty session using the TIOCSTI
++ ioctl. This option makes TIOCSTI use require CAP_SYS_ADMIN.
++
++ If this option is not selected, no restrictions will be enforced
++ unless the tiocsti_restrict sysctl is explicitly set to (1).
++
++ If you are unsure how to answer this question, answer N.
++
+ config SECURITY
+ bool "Enable different security models"
+ depends on SYSFS
+ depends on MULTIUSER
++ default y
+ help
+ This allows you to choose different security modules to be
+ configured into your kernel.
+@@ -48,6 +72,7 @@
+ config SECURITY_NETWORK
+ bool "Socket and Networking Security Hooks"
+ depends on SECURITY
++ default y
+ help
+ This enables the socket and networking security hooks.
+ If enabled, a security module can use these hooks to
+@@ -155,6 +180,7 @@
+ depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
+ select BUG
+ imply STRICT_DEVMEM
++ default y
+ help
+ This option checks for obviously wrong memory regions when
+ copying memory to/from the kernel (via copy_to_user() and
+@@ -192,10 +218,36 @@
+ config FORTIFY_SOURCE
+ bool "Harden common str/mem functions against buffer overflows"
+ depends on ARCH_HAS_FORTIFY_SOURCE
++ default y
+ help
+ Detect overflows of buffers in common string and memory functions
+ where the compiler can determine and validate the buffer sizes.
+
++config FORTIFY_SOURCE_STRICT_STRING
++ bool "Harden common functions against buffer overflows"
++ depends on FORTIFY_SOURCE
++ depends on EXPERT
++ help
++ Perform stricter overflow checks catching overflows within objects
++ for common C string functions rather than only between objects.
++
++ This is not yet intended for production use, only bug finding.
++
++config PAGE_SANITIZE
++ bool "Sanitize pages"
++ default y
++ help
++ Zero fill page allocations on free, reducing the lifetime of
++ sensitive data and helping to mitigate use-after-free bugs.
++
++config PAGE_SANITIZE_VERIFY
++ bool "Verify sanitized pages"
++ depends on PAGE_SANITIZE
++ default y
++ help
++ Verify that newly allocated pages are zeroed to detect
++ write-after-free bugs.
++
+ config STATIC_USERMODEHELPER
+ bool "Force all usermode helper calls through a single binary"
+ help
+diff -Naur linux-4.16/security/selinux/include/objsec.h linux-4.16-p/security/selinux/include/objsec.h
+--- linux-4.16/security/selinux/include/objsec.h 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/security/selinux/include/objsec.h 2018-04-12 15:57:20.837694346 +0200
+@@ -154,6 +154,6 @@
+ u32 sid; /*SID of bpf obj creater*/
+ };
+
+-extern unsigned int selinux_checkreqprot;
++extern const unsigned int selinux_checkreqprot;
+
+ #endif /* _SELINUX_OBJSEC_H_ */
+diff -Naur linux-4.16/security/selinux/Kconfig linux-4.16-p/security/selinux/Kconfig
+--- linux-4.16/security/selinux/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/security/selinux/Kconfig 2018-04-12 15:57:20.838694346 +0200
+@@ -2,7 +2,7 @@
+ bool "NSA SELinux Support"
+ depends on SECURITY_NETWORK && AUDIT && NET && INET
+ select NETWORK_SECMARK
+- default n
++ default y
+ help
+ This selects NSA Security-Enhanced Linux (SELinux).
+ You will also need a policy configuration and a labeled filesystem.
+@@ -79,23 +79,3 @@
+ This option collects access vector cache statistics to
+ /selinux/avc/cache_stats, which may be monitored via
+ tools such as avcstat.
+-
+-config SECURITY_SELINUX_CHECKREQPROT_VALUE
+- int "NSA SELinux checkreqprot default value"
+- depends on SECURITY_SELINUX
+- range 0 1
+- default 0
+- help
+- This option sets the default value for the 'checkreqprot' flag
+- that determines whether SELinux checks the protection requested
+- by the application or the protection that will be applied by the
+- kernel (including any implied execute for read-implies-exec) for
+- mmap and mprotect calls. If this option is set to 0 (zero),
+- SELinux will default to checking the protection that will be applied
+- by the kernel. If this option is set to 1 (one), SELinux will
+- default to checking the protection requested by the application.
+- The checkreqprot flag may be changed from the default via the
+- 'checkreqprot=' boot parameter. It may also be changed at runtime
+- via /selinux/checkreqprot if authorized by policy.
+-
+- If you are unsure how to answer this question, answer 0.
+diff -Naur linux-4.16/security/selinux/selinuxfs.c linux-4.16-p/security/selinux/selinuxfs.c
+--- linux-4.16/security/selinux/selinuxfs.c 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/security/selinux/selinuxfs.c 2018-04-12 15:57:20.838694346 +0200
+@@ -41,16 +41,7 @@
+ #include "objsec.h"
+ #include "conditional.h"
+
+-unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
+-
+-static int __init checkreqprot_setup(char *str)
+-{
+- unsigned long checkreqprot;
+- if (!kstrtoul(str, 0, &checkreqprot))
+- selinux_checkreqprot = checkreqprot ? 1 : 0;
+- return 1;
+-}
+-__setup("checkreqprot=", checkreqprot_setup);
++const unsigned int selinux_checkreqprot;
+
+ static DEFINE_MUTEX(sel_mutex);
+
+@@ -610,10 +601,9 @@
+ return PTR_ERR(page);
+
+ length = -EINVAL;
+- if (sscanf(page, "%u", &new_value) != 1)
++ if (sscanf(page, "%u", &new_value) != 1 || new_value)
+ goto out;
+
+- selinux_checkreqprot = new_value ? 1 : 0;
+ length = count;
+ out:
+ kfree(page);
+diff -Naur linux-4.16/security/yama/Kconfig linux-4.16-p/security/yama/Kconfig
+--- linux-4.16/security/yama/Kconfig 2018-04-01 23:20:27.000000000 +0200
++++ linux-4.16-p/security/yama/Kconfig 2018-04-12 15:57:20.838694346 +0200
+@@ -1,7 +1,7 @@
+ config SECURITY_YAMA
+ bool "Yama support"
+ depends on SECURITY
+- default n
++ default y
+ help
+ This selects Yama, which extends DAC support with additional
+ system-wide security settings beyond regular Linux discretionary