summaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-10-30 19:09:55 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2023-10-30 19:09:55 -1000
commitbefaa609f4c784f505c02ea3ff036adf4f4aa814 (patch)
tree310a29484cda62f7cb19fb07f4c3cd7c1b87c5f2 /scripts
parentfdce8bd38037a4a2b2961ca4abffaab195690b30 (diff)
parent9cca73d7b4bfec75b2fcef751015f31691afa792 (diff)
Merge tag 'hardening-v6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull hardening updates from Kees Cook: "One of the more voluminous set of changes is for adding the new __counted_by annotation[1] to gain run-time bounds checking of dynamically sized arrays with UBSan. - Add LKDTM test for stuck CPUs (Mark Rutland) - Improve LKDTM selftest behavior under UBSan (Ricardo CaƱuelo) - Refactor more 1-element arrays into flexible arrays (Gustavo A. R. Silva) - Analyze and replace strlcpy and strncpy uses (Justin Stitt, Azeem Shaikh) - Convert group_info.usage to refcount_t (Elena Reshetova) - Add __counted_by annotations (Kees Cook, Gustavo A. R. Silva) - Add Kconfig fragment for basic hardening options (Kees Cook, Lukas Bulwahn) - Fix randstruct GCC plugin performance mode to stay in groups (Kees Cook) - Fix strtomem() compile-time check for small sources (Kees Cook)" * tag 'hardening-v6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (56 commits) hwmon: (acpi_power_meter) replace open-coded kmemdup_nul reset: Annotate struct reset_control_array with __counted_by kexec: Annotate struct crash_mem with __counted_by virtio_console: Annotate struct port_buffer with __counted_by ima: Add __counted_by for struct modsig and use struct_size() MAINTAINERS: Include stackleak paths in hardening entry string: Adjust strtomem() logic to allow for smaller sources hardening: x86: drop reference to removed config AMD_IOMMU_V2 randstruct: Fix gcc-plugin performance mode to stay in group mailbox: zynqmp: Annotate struct zynqmp_ipi_pdata with __counted_by drivers: thermal: tsens: Annotate struct tsens_priv with __counted_by irqchip/imx-intmux: Annotate struct intmux_data with __counted_by KVM: Annotate struct kvm_irq_routing_table with __counted_by virt: acrn: Annotate struct vm_memory_region_batch with __counted_by hwmon: Annotate struct gsc_hwmon_platform_data with __counted_by sparc: Annotate struct cpuinfo_tree with __counted_by isdn: kcapi: replace deprecated strncpy with strscpy_pad isdn: replace deprecated strncpy with strscpy NFS/flexfiles: Annotate struct nfs4_ff_layout_segment with __counted_by nfs41: Annotate struct nfs4_file_layout_dsaddr with __counted_by ...
Diffstat (limited to 'scripts')
-rw-r--r--scripts/gcc-plugins/randomize_layout_plugin.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
index 951b74ba1b24..366395cab490 100644
--- a/scripts/gcc-plugins/randomize_layout_plugin.c
+++ b/scripts/gcc-plugins/randomize_layout_plugin.c
@@ -191,12 +191,14 @@ static void partition_struct(tree *fields, unsigned long length, struct partitio
static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
{
- unsigned long i, x;
+ unsigned long i, x, index;
struct partition_group size_group[length];
unsigned long num_groups = 0;
unsigned long randnum;
partition_struct(newtree, length, (struct partition_group *)&size_group, &num_groups);
+
+ /* FIXME: this group shuffle is currently a no-op. */
for (i = num_groups - 1; i > 0; i--) {
struct partition_group tmp;
randnum = ranval(prng_state) % (i + 1);
@@ -206,11 +208,14 @@ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prn
}
for (x = 0; x < num_groups; x++) {
- for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start; i--) {
+ for (index = size_group[x].length - 1; index > 0; index--) {
tree tmp;
+
+ i = size_group[x].start + index;
if (DECL_BIT_FIELD_TYPE(newtree[i]))
continue;
- randnum = ranval(prng_state) % (i + 1);
+ randnum = ranval(prng_state) % (index + 1);
+ randnum += size_group[x].start;
// we could handle this case differently if desired
if (DECL_BIT_FIELD_TYPE(newtree[randnum]))
continue;