summaryrefslogtreecommitdiff
path: root/sys-kernel
diff options
context:
space:
mode:
authorV3n3RiX <venerix@redcorelinux.org>2020-02-11 09:18:44 +0000
committerV3n3RiX <venerix@redcorelinux.org>2020-02-11 09:18:44 +0000
commit721cd01be1f66083834cac129a4ae7e8cd684d3f (patch)
tree5c0681d3ccb6495d805987216a1f5f9b1ac21c8b /sys-kernel
parent1f371d486086841a999a54b85603b12a8bd96c14 (diff)
sys-kernel/linux-{image,sources}-redcore-lts : version bump (v5.4.18)
Diffstat (limited to 'sys-kernel')
-rw-r--r--sys-kernel/linux-image-redcore-lts/Manifest2
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/5.4-linux-hardened.patch108
-rw-r--r--sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-5.4.18.ebuild (renamed from sys-kernel/linux-image-redcore-lts/linux-image-redcore-5.4.15.ebuild)2
-rw-r--r--sys-kernel/linux-sources-redcore-lts/Manifest2
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/5.4-linux-hardened.patch108
-rw-r--r--sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-5.4.18.ebuild (renamed from sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-5.4.15.ebuild)2
6 files changed, 100 insertions, 124 deletions
diff --git a/sys-kernel/linux-image-redcore-lts/Manifest b/sys-kernel/linux-image-redcore-lts/Manifest
index eb43633f..e5809ef2 100644
--- a/sys-kernel/linux-image-redcore-lts/Manifest
+++ b/sys-kernel/linux-image-redcore-lts/Manifest
@@ -1 +1 @@
-DIST linux-5.4.15.tar.xz 109461584 BLAKE2B 0384ac416aaca05ac73e77fae2cf85b2d773f343aa9a95191818a237354904b2c36404cbe08e373be832fa9f17c7961362dfe3be50f8cfe040c2eaddbab37da1 SHA512 be890d2f893e4470bf51ea84e60088e33420083ffd39e50d204a063e8405176035f2364333657ebabdd68bd4635ae3ea535d0c939de6c88e3e118c3619be1866
+DIST linux-5.4.18.tar.xz 109470980 BLAKE2B 4345598577f39644b00693c95a8402d70c7aba94614bd9a61f5e212768c6e58ffbaa0d8392c129010e77d6c9770182d4967635c797a2d86e11ecad417df87a27 SHA512 ec4568bfb816b3645bbe6c709343b4cc058068500948c10e9da191d2556ab1bd66a90674880e5be2dbf169afe416fe88c48ed5bd9fc1a55739ea94f8f0cb62b2
diff --git a/sys-kernel/linux-image-redcore-lts/files/5.4-linux-hardened.patch b/sys-kernel/linux-image-redcore-lts/files/5.4-linux-hardened.patch
index 699f56b2..590651ed 100644
--- a/sys-kernel/linux-image-redcore-lts/files/5.4-linux-hardened.patch
+++ b/sys-kernel/linux-image-redcore-lts/files/5.4-linux-hardened.patch
@@ -1,5 +1,5 @@
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index f5a551e4332d..a0d202674a43 100644
+index 5594c8bf1dcd..ac80978f4629 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -505,16 +505,6 @@
@@ -100,10 +100,10 @@ index 5f8a5d84dbbe..60103a76d33e 100644
Enabling this switches the refcounting infrastructure from a fast
unchecked atomic_t implementation to a fully state checked
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index 3f047afb982c..869d4b0ee141 100644
+index 6ccd2ed30963..56d39ec3c2c3 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -1138,6 +1138,7 @@ config RODATA_FULL_DEFAULT_ENABLED
+@@ -1139,6 +1139,7 @@ config RODATA_FULL_DEFAULT_ENABLED
config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
@@ -111,7 +111,7 @@ index 3f047afb982c..869d4b0ee141 100644
help
Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved
-@@ -1537,6 +1538,7 @@ config RANDOMIZE_BASE
+@@ -1538,6 +1539,7 @@ config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
select ARM64_MODULE_PLTS if MODULES
select RELOCATABLE
@@ -452,7 +452,7 @@ index f7476ce23b6e..652169a2b23a 100644
/*
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index 930edeb41ec3..d80d2577af6a 100644
+index 0a74407ef92e..5ceff405c81c 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -560,9 +560,9 @@ static void __init pagetable_init(void)
@@ -468,7 +468,7 @@ index 930edeb41ec3..d80d2577af6a 100644
/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
EXPORT_SYMBOL(__default_kernel_pte_mask);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index a6b5c653727b..24981a11b92a 100644
+index b8541d77452c..a231504e0348 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -97,9 +97,9 @@ DEFINE_ENTRY(pte, pte, init)
@@ -497,7 +497,7 @@ index 457d9ba3eb20..5f987fc1c0a0 100644
struct list_head *cpu_list, local_list;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index 28c492be0a57..6cf8c9ffda79 100644
+index 84b183a6424e..b83bff5e9ab5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5143,7 +5143,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
@@ -591,7 +591,7 @@ index 802c1210558f..0cc320f33cdc 100644
return tty;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index dfe9ac8d2375..add80b1e4c91 100644
+index 4ac74b354801..7c2cb5b3a449 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -42,6 +42,8 @@
@@ -603,7 +603,7 @@ index dfe9ac8d2375..add80b1e4c91 100644
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
-@@ -4990,6 +4992,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+@@ -4991,6 +4993,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
goto done;
return;
}
@@ -638,7 +638,7 @@ index c27231234764..4038334db213 100644
err:
up_write(&mm->mmap_sem);
diff --git a/fs/namei.c b/fs/namei.c
-index 671c3c1a3425..618ef0b5d000 100644
+index bd1c0ca4151c..8f67ca391509 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -877,10 +877,10 @@ static inline void put_link(struct nameidata *nd)
@@ -716,7 +716,7 @@ index c38e4c2e1221..6135fbaf7298 100644
generic_fillattr(inode, stat);
return 0;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
-index f9fd18670e22..d16e48bed451 100644
+index d99d166fd892..7a4f2854feb8 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -28,7 +28,11 @@
@@ -881,7 +881,7 @@ index 069aa2ebef90..cb9e3637a620 100644
const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
diff --git a/include/linux/mm.h b/include/linux/mm.h
-index a2adf95b3f9c..6f6c068e645d 100644
+index b249d2e033aa..a4855777d1fa 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -664,7 +664,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
@@ -1146,10 +1146,10 @@ index 4e7809408073..0b58a5176a25 100644
extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
diff --git a/init/Kconfig b/init/Kconfig
-index b4daad2bac23..c1016fd960f0 100644
+index 0328b53d09ad..fde78a967939 100644
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -381,6 +381,7 @@ config USELIB
+@@ -382,6 +382,7 @@ config USELIB
config AUDIT
bool "Auditing support"
depends on NET
@@ -1157,7 +1157,7 @@ index b4daad2bac23..c1016fd960f0 100644
help
Enable auditing infrastructure that can be used with another
kernel subsystem, such as SELinux (which requires this for
-@@ -1118,6 +1119,22 @@ config USER_NS
+@@ -1119,6 +1120,22 @@ config USER_NS
If unsure, say N.
@@ -1180,7 +1180,7 @@ index b4daad2bac23..c1016fd960f0 100644
config PID_NS
bool "PID Namespaces"
default y
-@@ -1538,8 +1555,7 @@ config SHMEM
+@@ -1539,8 +1556,7 @@ config SHMEM
which may be appropriate on small systems without swap.
config AIO
@@ -1190,7 +1190,7 @@ index b4daad2bac23..c1016fd960f0 100644
help
This option enables POSIX asynchronous I/O which may by used
by some high performance threaded applications. Disabling
-@@ -1650,6 +1666,23 @@ config USERFAULTFD
+@@ -1651,6 +1667,23 @@ config USERFAULTFD
Enable the userfaultfd() system call that allows to intercept and
handle page faults in userland.
@@ -1214,7 +1214,7 @@ index b4daad2bac23..c1016fd960f0 100644
config ARCH_HAS_MEMBARRIER_CALLBACKS
bool
-@@ -1762,7 +1795,7 @@ config VM_EVENT_COUNTERS
+@@ -1763,7 +1796,7 @@ config VM_EVENT_COUNTERS
config SLUB_DEBUG
default y
@@ -1223,7 +1223,7 @@ index b4daad2bac23..c1016fd960f0 100644
depends on SLUB && SYSFS
help
SLUB has extensive debug support features. Disabling these can
-@@ -1786,7 +1819,6 @@ config SLUB_MEMCG_SYSFS_ON
+@@ -1787,7 +1820,6 @@ config SLUB_MEMCG_SYSFS_ON
config COMPAT_BRK
bool "Disable heap randomization"
@@ -1231,7 +1231,7 @@ index b4daad2bac23..c1016fd960f0 100644
help
Randomizing heap placement makes heap exploits harder, but it
also breaks ancient binaries (including anything libc5 based).
-@@ -1833,7 +1865,6 @@ endchoice
+@@ -1834,7 +1866,6 @@ endchoice
config SLAB_MERGE_DEFAULT
bool "Allow slab caches to be merged"
@@ -1239,7 +1239,7 @@ index b4daad2bac23..c1016fd960f0 100644
help
For reduced kernel memory fragmentation, slab caches can be
merged when they share the same size and other characteristics.
-@@ -1846,9 +1877,9 @@ config SLAB_MERGE_DEFAULT
+@@ -1847,9 +1878,9 @@ config SLAB_MERGE_DEFAULT
command line.
config SLAB_FREELIST_RANDOM
@@ -1250,7 +1250,7 @@ index b4daad2bac23..c1016fd960f0 100644
help
Randomizes the freelist order used on creating new pages. This
security feature reduces the predictability of the kernel slab
-@@ -1857,12 +1888,30 @@ config SLAB_FREELIST_RANDOM
+@@ -1858,12 +1889,30 @@ config SLAB_FREELIST_RANDOM
config SLAB_FREELIST_HARDENED
bool "Harden slab freelist metadata"
depends on SLUB
@@ -1339,7 +1339,7 @@ index 1444f3954d75..8cc9dd7992f2 100644
/**
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 00a014670ed0..2f177466f34b 100644
+index 6c829e22bad3..3063a7239a94 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -398,8 +398,13 @@ static cpumask_var_t perf_online_mask;
@@ -1356,7 +1356,7 @@ index 00a014670ed0..2f177466f34b 100644
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
-@@ -10897,6 +10902,9 @@ SYSCALL_DEFINE5(perf_event_open,
+@@ -10895,6 +10900,9 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
@@ -1367,7 +1367,7 @@ index 00a014670ed0..2f177466f34b 100644
if (err)
return err;
diff --git a/kernel/fork.c b/kernel/fork.c
-index 6cabc124378c..fda4986da9eb 100644
+index 755d8160e001..ed909f8050b2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -106,6 +106,11 @@
@@ -1393,7 +1393,7 @@ index 6cabc124378c..fda4986da9eb 100644
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
-@@ -2826,6 +2835,12 @@ int ksys_unshare(unsigned long unshare_flags)
+@@ -2836,6 +2845,12 @@ int ksys_unshare(unsigned long unshare_flags)
if (unshare_flags & CLONE_NEWNS)
unshare_flags |= CLONE_FS;
@@ -1407,14 +1407,10 @@ index 6cabc124378c..fda4986da9eb 100644
if (err)
goto bad_unshare_out;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
-index 83105874f255..0951ee9d0b9a 100644
+index d65f2d5ab694..145e3c62c380 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
-@@ -1140,10 +1140,12 @@ void free_basic_memory_bitmaps(void)
-
- void clear_free_pages(void)
- {
--#ifdef CONFIG_PAGE_POISONING_ZERO
+@@ -1150,6 +1150,9 @@ void clear_free_pages(void)
struct memory_bitmap *bm = free_pages_map;
unsigned long pfn;
@@ -1424,14 +1420,6 @@ index 83105874f255..0951ee9d0b9a 100644
if (WARN_ON(!(free_pages_map)))
return;
-@@ -1157,7 +1159,6 @@ void clear_free_pages(void)
- }
- memory_bm_position_reset(bm);
- pr_info("free pages cleared after restore\n");
--#endif /* PAGE_POISONING_ZERO */
- }
-
- /**
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 477b4eb44af5..db28cc3fd301 100644
--- a/kernel/rcu/tiny.c
@@ -1459,10 +1447,10 @@ index 81105141b6a8..38f04f653d29 100644
rcu_core();
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 69a81a5709ff..915bc17a97bc 100644
+index c87a798d1456..341c384cc597 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -9876,7 +9876,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
+@@ -9889,7 +9889,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
*/
@@ -1532,7 +1520,7 @@ index 0427a86743a4..5e6a9b4ccb41 100644
void tasklet_init(struct tasklet_struct *t,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index b6f2f35d0bcf..8d13b2fc5ec4 100644
+index 70665934d53e..8ea67d08b926 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -68,6 +68,7 @@
@@ -1708,10 +1696,10 @@ index b6f2f35d0bcf..8d13b2fc5ec4 100644
{
.procname = "ngroups_max",
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index 65605530ee34..1553604b6a78 100644
+index 7f31932216a1..9ede224fc81f 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1580,7 +1580,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
+@@ -1583,7 +1583,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
}
}
@@ -1876,10 +1864,10 @@ index a5dae9a7eb51..0a3070c5a125 100644
This is the portion of low virtual memory which should be protected
from userspace allocation. Keeping a user from writing to low pages
diff --git a/mm/mmap.c b/mm/mmap.c
-index a7d8c84d19b7..4b8d4c645cde 100644
+index 4390dbea4aa5..076fd46af68c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
-@@ -236,6 +236,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+@@ -230,6 +230,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
newbrk = PAGE_ALIGN(brk);
oldbrk = PAGE_ALIGN(mm->brk);
@@ -1894,7 +1882,7 @@ index a7d8c84d19b7..4b8d4c645cde 100644
mm->brk = brk;
goto success;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index f391c0c4ed1d..64b66144f5ee 100644
+index 45e39131a716..78b4865f8a1c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -68,6 +68,7 @@
@@ -1921,7 +1909,7 @@ index f391c0c4ed1d..64b66144f5ee 100644
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
volatile unsigned long latent_entropy __latent_entropy;
EXPORT_SYMBOL(latent_entropy);
-@@ -1434,6 +1444,25 @@ static void __free_pages_ok(struct page *page, unsigned int order)
+@@ -1427,6 +1437,25 @@ static void __free_pages_ok(struct page *page, unsigned int order)
local_irq_restore(flags);
}
@@ -1947,7 +1935,7 @@ index f391c0c4ed1d..64b66144f5ee 100644
void __free_pages_core(struct page *page, unsigned int order)
{
unsigned int nr_pages = 1 << order;
-@@ -1448,7 +1477,6 @@ void __free_pages_core(struct page *page, unsigned int order)
+@@ -1441,7 +1470,6 @@ void __free_pages_core(struct page *page, unsigned int order)
}
__ClearPageReserved(p);
set_page_count(p, 0);
@@ -1955,7 +1943,7 @@ index f391c0c4ed1d..64b66144f5ee 100644
atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
set_page_refcounted(page);
__free_pages(page, order);
-@@ -1499,6 +1527,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
+@@ -1492,6 +1520,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
{
if (early_page_uninitialised(pfn))
return;
@@ -1963,7 +1951,7 @@ index f391c0c4ed1d..64b66144f5ee 100644
__free_pages_core(page, order);
}
-@@ -1589,6 +1618,7 @@ static void __init deferred_free_range(unsigned long pfn,
+@@ -1582,6 +1611,7 @@ static void __init deferred_free_range(unsigned long pfn,
if (nr_pages == pageblock_nr_pages &&
(pfn & (pageblock_nr_pages - 1)) == 0) {
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
@@ -1971,7 +1959,7 @@ index f391c0c4ed1d..64b66144f5ee 100644
__free_pages_core(page, pageblock_order);
return;
}
-@@ -1596,6 +1626,7 @@ static void __init deferred_free_range(unsigned long pfn,
+@@ -1589,6 +1619,7 @@ static void __init deferred_free_range(unsigned long pfn,
for (i = 0; i < nr_pages; i++, page++, pfn++) {
if ((pfn & (pageblock_nr_pages - 1)) == 0)
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
@@ -1979,7 +1967,7 @@ index f391c0c4ed1d..64b66144f5ee 100644
__free_pages_core(page, 0);
}
}
-@@ -2167,6 +2198,12 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
+@@ -2156,6 +2187,12 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
{
post_alloc_hook(page, order, gfp_flags);
@@ -2068,7 +2056,7 @@ index b2b01694dc43..b531661095a2 100644
}
diff --git a/mm/slab_common.c b/mm/slab_common.c
-index 78402b362df9..6f13ba0e81e8 100644
+index ade6c257d4b4..f8f9ebd51296 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -28,10 +28,10 @@
@@ -2094,7 +2082,7 @@ index 78402b362df9..6f13ba0e81e8 100644
static int __init setup_slab_nomerge(char *str)
{
diff --git a/mm/slub.c b/mm/slub.c
-index e72e802fc569..23a714ea4343 100644
+index 20d72cb20515..6690bce322a4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -125,6 +125,12 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
@@ -2430,7 +2418,7 @@ index 3ad6db9a722e..80209685f67c 100644
unsigned long arch_mmap_rnd(void)
diff --git a/net/core/dev.c b/net/core/dev.c
-index 046307445ece..5de4da30565c 100644
+index 82325d3d1371..240e3ae8e298 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4492,7 +4492,7 @@ int netif_rx_ni(struct sk_buff *skb)
@@ -2442,7 +2430,7 @@ index 046307445ece..5de4da30565c 100644
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -6353,7 +6353,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
+@@ -6355,7 +6355,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
return work;
}
@@ -2476,10 +2464,10 @@ index 952fff485546..59ffccdb1be4 100644
$(if $(filter nsdeps,$(MAKECMDGOALS)),-d)
diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
-index d33de0b9f4f5..b7071438b0ab 100644
+index e3569543bdac..55cc439b3bc6 100644
--- a/scripts/gcc-plugins/Kconfig
+++ b/scripts/gcc-plugins/Kconfig
-@@ -62,6 +62,11 @@ config GCC_PLUGIN_LATENT_ENTROPY
+@@ -61,6 +61,11 @@ config GCC_PLUGIN_LATENT_ENTROPY
is some slowdown of the boot process (about 0.5%) and fork and
irq processing.
diff --git a/sys-kernel/linux-image-redcore-lts/linux-image-redcore-5.4.15.ebuild b/sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-5.4.18.ebuild
index 136967be..bdd5a76c 100644
--- a/sys-kernel/linux-image-redcore-lts/linux-image-redcore-5.4.15.ebuild
+++ b/sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-5.4.18.ebuild
@@ -5,7 +5,7 @@ EAPI=6
inherit eutils
-EXTRAVERSION="redcore"
+EXTRAVERSION="redcore-lts"
KV_FULL="${PV}-${EXTRAVERSION}"
KV_MAJOR="5.4"
diff --git a/sys-kernel/linux-sources-redcore-lts/Manifest b/sys-kernel/linux-sources-redcore-lts/Manifest
index eb43633f..e5809ef2 100644
--- a/sys-kernel/linux-sources-redcore-lts/Manifest
+++ b/sys-kernel/linux-sources-redcore-lts/Manifest
@@ -1 +1 @@
-DIST linux-5.4.15.tar.xz 109461584 BLAKE2B 0384ac416aaca05ac73e77fae2cf85b2d773f343aa9a95191818a237354904b2c36404cbe08e373be832fa9f17c7961362dfe3be50f8cfe040c2eaddbab37da1 SHA512 be890d2f893e4470bf51ea84e60088e33420083ffd39e50d204a063e8405176035f2364333657ebabdd68bd4635ae3ea535d0c939de6c88e3e118c3619be1866
+DIST linux-5.4.18.tar.xz 109470980 BLAKE2B 4345598577f39644b00693c95a8402d70c7aba94614bd9a61f5e212768c6e58ffbaa0d8392c129010e77d6c9770182d4967635c797a2d86e11ecad417df87a27 SHA512 ec4568bfb816b3645bbe6c709343b4cc058068500948c10e9da191d2556ab1bd66a90674880e5be2dbf169afe416fe88c48ed5bd9fc1a55739ea94f8f0cb62b2
diff --git a/sys-kernel/linux-sources-redcore-lts/files/5.4-linux-hardened.patch b/sys-kernel/linux-sources-redcore-lts/files/5.4-linux-hardened.patch
index 699f56b2..590651ed 100644
--- a/sys-kernel/linux-sources-redcore-lts/files/5.4-linux-hardened.patch
+++ b/sys-kernel/linux-sources-redcore-lts/files/5.4-linux-hardened.patch
@@ -1,5 +1,5 @@
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index f5a551e4332d..a0d202674a43 100644
+index 5594c8bf1dcd..ac80978f4629 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -505,16 +505,6 @@
@@ -100,10 +100,10 @@ index 5f8a5d84dbbe..60103a76d33e 100644
Enabling this switches the refcounting infrastructure from a fast
unchecked atomic_t implementation to a fully state checked
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index 3f047afb982c..869d4b0ee141 100644
+index 6ccd2ed30963..56d39ec3c2c3 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -1138,6 +1138,7 @@ config RODATA_FULL_DEFAULT_ENABLED
+@@ -1139,6 +1139,7 @@ config RODATA_FULL_DEFAULT_ENABLED
config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
@@ -111,7 +111,7 @@ index 3f047afb982c..869d4b0ee141 100644
help
Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved
-@@ -1537,6 +1538,7 @@ config RANDOMIZE_BASE
+@@ -1538,6 +1539,7 @@ config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
select ARM64_MODULE_PLTS if MODULES
select RELOCATABLE
@@ -452,7 +452,7 @@ index f7476ce23b6e..652169a2b23a 100644
/*
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index 930edeb41ec3..d80d2577af6a 100644
+index 0a74407ef92e..5ceff405c81c 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -560,9 +560,9 @@ static void __init pagetable_init(void)
@@ -468,7 +468,7 @@ index 930edeb41ec3..d80d2577af6a 100644
/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
EXPORT_SYMBOL(__default_kernel_pte_mask);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index a6b5c653727b..24981a11b92a 100644
+index b8541d77452c..a231504e0348 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -97,9 +97,9 @@ DEFINE_ENTRY(pte, pte, init)
@@ -497,7 +497,7 @@ index 457d9ba3eb20..5f987fc1c0a0 100644
struct list_head *cpu_list, local_list;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index 28c492be0a57..6cf8c9ffda79 100644
+index 84b183a6424e..b83bff5e9ab5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5143,7 +5143,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
@@ -591,7 +591,7 @@ index 802c1210558f..0cc320f33cdc 100644
return tty;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index dfe9ac8d2375..add80b1e4c91 100644
+index 4ac74b354801..7c2cb5b3a449 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -42,6 +42,8 @@
@@ -603,7 +603,7 @@ index dfe9ac8d2375..add80b1e4c91 100644
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
-@@ -4990,6 +4992,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+@@ -4991,6 +4993,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
goto done;
return;
}
@@ -638,7 +638,7 @@ index c27231234764..4038334db213 100644
err:
up_write(&mm->mmap_sem);
diff --git a/fs/namei.c b/fs/namei.c
-index 671c3c1a3425..618ef0b5d000 100644
+index bd1c0ca4151c..8f67ca391509 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -877,10 +877,10 @@ static inline void put_link(struct nameidata *nd)
@@ -716,7 +716,7 @@ index c38e4c2e1221..6135fbaf7298 100644
generic_fillattr(inode, stat);
return 0;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
-index f9fd18670e22..d16e48bed451 100644
+index d99d166fd892..7a4f2854feb8 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -28,7 +28,11 @@
@@ -881,7 +881,7 @@ index 069aa2ebef90..cb9e3637a620 100644
const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
diff --git a/include/linux/mm.h b/include/linux/mm.h
-index a2adf95b3f9c..6f6c068e645d 100644
+index b249d2e033aa..a4855777d1fa 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -664,7 +664,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
@@ -1146,10 +1146,10 @@ index 4e7809408073..0b58a5176a25 100644
extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
diff --git a/init/Kconfig b/init/Kconfig
-index b4daad2bac23..c1016fd960f0 100644
+index 0328b53d09ad..fde78a967939 100644
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -381,6 +381,7 @@ config USELIB
+@@ -382,6 +382,7 @@ config USELIB
config AUDIT
bool "Auditing support"
depends on NET
@@ -1157,7 +1157,7 @@ index b4daad2bac23..c1016fd960f0 100644
help
Enable auditing infrastructure that can be used with another
kernel subsystem, such as SELinux (which requires this for
-@@ -1118,6 +1119,22 @@ config USER_NS
+@@ -1119,6 +1120,22 @@ config USER_NS
If unsure, say N.
@@ -1180,7 +1180,7 @@ index b4daad2bac23..c1016fd960f0 100644
config PID_NS
bool "PID Namespaces"
default y
-@@ -1538,8 +1555,7 @@ config SHMEM
+@@ -1539,8 +1556,7 @@ config SHMEM
which may be appropriate on small systems without swap.
config AIO
@@ -1190,7 +1190,7 @@ index b4daad2bac23..c1016fd960f0 100644
help
This option enables POSIX asynchronous I/O which may by used
by some high performance threaded applications. Disabling
-@@ -1650,6 +1666,23 @@ config USERFAULTFD
+@@ -1651,6 +1667,23 @@ config USERFAULTFD
Enable the userfaultfd() system call that allows to intercept and
handle page faults in userland.
@@ -1214,7 +1214,7 @@ index b4daad2bac23..c1016fd960f0 100644
config ARCH_HAS_MEMBARRIER_CALLBACKS
bool
-@@ -1762,7 +1795,7 @@ config VM_EVENT_COUNTERS
+@@ -1763,7 +1796,7 @@ config VM_EVENT_COUNTERS
config SLUB_DEBUG
default y
@@ -1223,7 +1223,7 @@ index b4daad2bac23..c1016fd960f0 100644
depends on SLUB && SYSFS
help
SLUB has extensive debug support features. Disabling these can
-@@ -1786,7 +1819,6 @@ config SLUB_MEMCG_SYSFS_ON
+@@ -1787,7 +1820,6 @@ config SLUB_MEMCG_SYSFS_ON
config COMPAT_BRK
bool "Disable heap randomization"
@@ -1231,7 +1231,7 @@ index b4daad2bac23..c1016fd960f0 100644
help
Randomizing heap placement makes heap exploits harder, but it
also breaks ancient binaries (including anything libc5 based).
-@@ -1833,7 +1865,6 @@ endchoice
+@@ -1834,7 +1866,6 @@ endchoice
config SLAB_MERGE_DEFAULT
bool "Allow slab caches to be merged"
@@ -1239,7 +1239,7 @@ index b4daad2bac23..c1016fd960f0 100644
help
For reduced kernel memory fragmentation, slab caches can be
merged when they share the same size and other characteristics.
-@@ -1846,9 +1877,9 @@ config SLAB_MERGE_DEFAULT
+@@ -1847,9 +1878,9 @@ config SLAB_MERGE_DEFAULT
command line.
config SLAB_FREELIST_RANDOM
@@ -1250,7 +1250,7 @@ index b4daad2bac23..c1016fd960f0 100644
help
Randomizes the freelist order used on creating new pages. This
security feature reduces the predictability of the kernel slab
-@@ -1857,12 +1888,30 @@ config SLAB_FREELIST_RANDOM
+@@ -1858,12 +1889,30 @@ config SLAB_FREELIST_RANDOM
config SLAB_FREELIST_HARDENED
bool "Harden slab freelist metadata"
depends on SLUB
@@ -1339,7 +1339,7 @@ index 1444f3954d75..8cc9dd7992f2 100644
/**
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 00a014670ed0..2f177466f34b 100644
+index 6c829e22bad3..3063a7239a94 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -398,8 +398,13 @@ static cpumask_var_t perf_online_mask;
@@ -1356,7 +1356,7 @@ index 00a014670ed0..2f177466f34b 100644
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
-@@ -10897,6 +10902,9 @@ SYSCALL_DEFINE5(perf_event_open,
+@@ -10895,6 +10900,9 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
@@ -1367,7 +1367,7 @@ index 00a014670ed0..2f177466f34b 100644
if (err)
return err;
diff --git a/kernel/fork.c b/kernel/fork.c
-index 6cabc124378c..fda4986da9eb 100644
+index 755d8160e001..ed909f8050b2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -106,6 +106,11 @@
@@ -1393,7 +1393,7 @@ index 6cabc124378c..fda4986da9eb 100644
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
-@@ -2826,6 +2835,12 @@ int ksys_unshare(unsigned long unshare_flags)
+@@ -2836,6 +2845,12 @@ int ksys_unshare(unsigned long unshare_flags)
if (unshare_flags & CLONE_NEWNS)
unshare_flags |= CLONE_FS;
@@ -1407,14 +1407,10 @@ index 6cabc124378c..fda4986da9eb 100644
if (err)
goto bad_unshare_out;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
-index 83105874f255..0951ee9d0b9a 100644
+index d65f2d5ab694..145e3c62c380 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
-@@ -1140,10 +1140,12 @@ void free_basic_memory_bitmaps(void)
-
- void clear_free_pages(void)
- {
--#ifdef CONFIG_PAGE_POISONING_ZERO
+@@ -1150,6 +1150,9 @@ void clear_free_pages(void)
struct memory_bitmap *bm = free_pages_map;
unsigned long pfn;
@@ -1424,14 +1420,6 @@ index 83105874f255..0951ee9d0b9a 100644
if (WARN_ON(!(free_pages_map)))
return;
-@@ -1157,7 +1159,6 @@ void clear_free_pages(void)
- }
- memory_bm_position_reset(bm);
- pr_info("free pages cleared after restore\n");
--#endif /* PAGE_POISONING_ZERO */
- }
-
- /**
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 477b4eb44af5..db28cc3fd301 100644
--- a/kernel/rcu/tiny.c
@@ -1459,10 +1447,10 @@ index 81105141b6a8..38f04f653d29 100644
rcu_core();
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 69a81a5709ff..915bc17a97bc 100644
+index c87a798d1456..341c384cc597 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -9876,7 +9876,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
+@@ -9889,7 +9889,7 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
*/
@@ -1532,7 +1520,7 @@ index 0427a86743a4..5e6a9b4ccb41 100644
void tasklet_init(struct tasklet_struct *t,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index b6f2f35d0bcf..8d13b2fc5ec4 100644
+index 70665934d53e..8ea67d08b926 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -68,6 +68,7 @@
@@ -1708,10 +1696,10 @@ index b6f2f35d0bcf..8d13b2fc5ec4 100644
{
.procname = "ngroups_max",
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index 65605530ee34..1553604b6a78 100644
+index 7f31932216a1..9ede224fc81f 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1580,7 +1580,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
+@@ -1583,7 +1583,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
}
}
@@ -1876,10 +1864,10 @@ index a5dae9a7eb51..0a3070c5a125 100644
This is the portion of low virtual memory which should be protected
from userspace allocation. Keeping a user from writing to low pages
diff --git a/mm/mmap.c b/mm/mmap.c
-index a7d8c84d19b7..4b8d4c645cde 100644
+index 4390dbea4aa5..076fd46af68c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
-@@ -236,6 +236,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+@@ -230,6 +230,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
newbrk = PAGE_ALIGN(brk);
oldbrk = PAGE_ALIGN(mm->brk);
@@ -1894,7 +1882,7 @@ index a7d8c84d19b7..4b8d4c645cde 100644
mm->brk = brk;
goto success;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index f391c0c4ed1d..64b66144f5ee 100644
+index 45e39131a716..78b4865f8a1c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -68,6 +68,7 @@
@@ -1921,7 +1909,7 @@ index f391c0c4ed1d..64b66144f5ee 100644
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
volatile unsigned long latent_entropy __latent_entropy;
EXPORT_SYMBOL(latent_entropy);
-@@ -1434,6 +1444,25 @@ static void __free_pages_ok(struct page *page, unsigned int order)
+@@ -1427,6 +1437,25 @@ static void __free_pages_ok(struct page *page, unsigned int order)
local_irq_restore(flags);
}
@@ -1947,7 +1935,7 @@ index f391c0c4ed1d..64b66144f5ee 100644
void __free_pages_core(struct page *page, unsigned int order)
{
unsigned int nr_pages = 1 << order;
-@@ -1448,7 +1477,6 @@ void __free_pages_core(struct page *page, unsigned int order)
+@@ -1441,7 +1470,6 @@ void __free_pages_core(struct page *page, unsigned int order)
}
__ClearPageReserved(p);
set_page_count(p, 0);
@@ -1955,7 +1943,7 @@ index f391c0c4ed1d..64b66144f5ee 100644
atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
set_page_refcounted(page);
__free_pages(page, order);
-@@ -1499,6 +1527,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
+@@ -1492,6 +1520,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
{
if (early_page_uninitialised(pfn))
return;
@@ -1963,7 +1951,7 @@ index f391c0c4ed1d..64b66144f5ee 100644
__free_pages_core(page, order);
}
-@@ -1589,6 +1618,7 @@ static void __init deferred_free_range(unsigned long pfn,
+@@ -1582,6 +1611,7 @@ static void __init deferred_free_range(unsigned long pfn,
if (nr_pages == pageblock_nr_pages &&
(pfn & (pageblock_nr_pages - 1)) == 0) {
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
@@ -1971,7 +1959,7 @@ index f391c0c4ed1d..64b66144f5ee 100644
__free_pages_core(page, pageblock_order);
return;
}
-@@ -1596,6 +1626,7 @@ static void __init deferred_free_range(unsigned long pfn,
+@@ -1589,6 +1619,7 @@ static void __init deferred_free_range(unsigned long pfn,
for (i = 0; i < nr_pages; i++, page++, pfn++) {
if ((pfn & (pageblock_nr_pages - 1)) == 0)
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
@@ -1979,7 +1967,7 @@ index f391c0c4ed1d..64b66144f5ee 100644
__free_pages_core(page, 0);
}
}
-@@ -2167,6 +2198,12 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
+@@ -2156,6 +2187,12 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
{
post_alloc_hook(page, order, gfp_flags);
@@ -2068,7 +2056,7 @@ index b2b01694dc43..b531661095a2 100644
}
diff --git a/mm/slab_common.c b/mm/slab_common.c
-index 78402b362df9..6f13ba0e81e8 100644
+index ade6c257d4b4..f8f9ebd51296 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -28,10 +28,10 @@
@@ -2094,7 +2082,7 @@ index 78402b362df9..6f13ba0e81e8 100644
static int __init setup_slab_nomerge(char *str)
{
diff --git a/mm/slub.c b/mm/slub.c
-index e72e802fc569..23a714ea4343 100644
+index 20d72cb20515..6690bce322a4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -125,6 +125,12 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
@@ -2430,7 +2418,7 @@ index 3ad6db9a722e..80209685f67c 100644
unsigned long arch_mmap_rnd(void)
diff --git a/net/core/dev.c b/net/core/dev.c
-index 046307445ece..5de4da30565c 100644
+index 82325d3d1371..240e3ae8e298 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4492,7 +4492,7 @@ int netif_rx_ni(struct sk_buff *skb)
@@ -2442,7 +2430,7 @@ index 046307445ece..5de4da30565c 100644
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -6353,7 +6353,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
+@@ -6355,7 +6355,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
return work;
}
@@ -2476,10 +2464,10 @@ index 952fff485546..59ffccdb1be4 100644
$(if $(filter nsdeps,$(MAKECMDGOALS)),-d)
diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
-index d33de0b9f4f5..b7071438b0ab 100644
+index e3569543bdac..55cc439b3bc6 100644
--- a/scripts/gcc-plugins/Kconfig
+++ b/scripts/gcc-plugins/Kconfig
-@@ -62,6 +62,11 @@ config GCC_PLUGIN_LATENT_ENTROPY
+@@ -61,6 +61,11 @@ config GCC_PLUGIN_LATENT_ENTROPY
is some slowdown of the boot process (about 0.5%) and fork and
irq processing.
diff --git a/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-5.4.15.ebuild b/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-5.4.18.ebuild
index 9c8ae378..570f9335 100644
--- a/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-5.4.15.ebuild
+++ b/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-5.4.18.ebuild
@@ -5,7 +5,7 @@ EAPI=6
inherit eutils
-EXTRAVERSION="redcore"
+EXTRAVERSION="redcore-lts"
KV_FULL="${PV}-${EXTRAVERSION}"
KV_MAJOR="5.4"